1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * SCSI disk target driver. 28 */ 29 #include <sys/scsi/scsi.h> 30 #include <sys/dkbad.h> 31 #include <sys/dklabel.h> 32 #include <sys/dkio.h> 33 #include <sys/fdio.h> 34 #include <sys/cdio.h> 35 #include <sys/mhd.h> 36 #include <sys/vtoc.h> 37 #include <sys/dktp/fdisk.h> 38 #include <sys/kstat.h> 39 #include <sys/vtrace.h> 40 #include <sys/note.h> 41 #include <sys/thread.h> 42 #include <sys/proc.h> 43 #include <sys/efi_partition.h> 44 #include <sys/var.h> 45 #include <sys/aio_req.h> 46 47 #ifdef __lock_lint 48 #define _LP64 49 #define __amd64 50 #endif 51 52 #if (defined(__fibre)) 53 /* Note: is there a leadville version of the following? */ 54 #include <sys/fc4/fcal_linkapp.h> 55 #endif 56 #include <sys/taskq.h> 57 #include <sys/uuid.h> 58 #include <sys/byteorder.h> 59 #include <sys/sdt.h> 60 61 #include "sd_xbuf.h" 62 63 #include <sys/scsi/targets/sddef.h> 64 #include <sys/cmlb.h> 65 #include <sys/sysevent/eventdefs.h> 66 #include <sys/sysevent/dev.h> 67 68 69 /* 70 * Loadable module info. 71 */ 72 #if (defined(__fibre)) 73 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver 1.588" 74 char _depends_on[] = "misc/scsi misc/cmlb drv/fcp"; 75 #else 76 #define SD_MODULE_NAME "SCSI Disk Driver 1.588" 77 char _depends_on[] = "misc/scsi misc/cmlb"; 78 #endif 79 80 /* 81 * Define the interconnect type, to allow the driver to distinguish 82 * between parallel SCSI (sd) and fibre channel (ssd) behaviors. 83 * 84 * This is really for backward compatibility. In the future, the driver 85 * should actually check the "interconnect-type" property as reported by 86 * the HBA; however at present this property is not defined by all HBAs, 87 * so we will use this #define (1) to permit the driver to run in 88 * backward-compatibility mode; and (2) to print a notification message 89 * if an FC HBA does not support the "interconnect-type" property. The 90 * behavior of the driver will be to assume parallel SCSI behaviors unless 91 * the "interconnect-type" property is defined by the HBA **AND** has a 92 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or 93 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre 94 * Channel behaviors (as per the old ssd). (Note that the 95 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and 96 * will result in the driver assuming parallel SCSI behaviors.) 97 * 98 * (see common/sys/scsi/impl/services.h) 99 * 100 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default 101 * since some FC HBAs may already support that, and there is some code in 102 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the 103 * default would confuse that code, and besides things should work fine 104 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the 105 * "interconnect_type" property. 106 * 107 */ 108 #if (defined(__fibre)) 109 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE 110 #else 111 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL 112 #endif 113 114 /* 115 * The name of the driver, established from the module name in _init. 116 */ 117 static char *sd_label = NULL; 118 119 /* 120 * Driver name is unfortunately prefixed on some driver.conf properties. 121 */ 122 #if (defined(__fibre)) 123 #define sd_max_xfer_size ssd_max_xfer_size 124 #define sd_config_list ssd_config_list 125 static char *sd_max_xfer_size = "ssd_max_xfer_size"; 126 static char *sd_config_list = "ssd-config-list"; 127 #else 128 static char *sd_max_xfer_size = "sd_max_xfer_size"; 129 static char *sd_config_list = "sd-config-list"; 130 #endif 131 132 /* 133 * Driver global variables 134 */ 135 136 #if (defined(__fibre)) 137 /* 138 * These #defines are to avoid namespace collisions that occur because this 139 * code is currently used to compile two separate driver modules: sd and ssd. 140 * All global variables need to be treated this way (even if declared static) 141 * in order to allow the debugger to resolve the names properly. 142 * It is anticipated that in the near future the ssd module will be obsoleted, 143 * at which time this namespace issue should go away. 144 */ 145 #define sd_state ssd_state 146 #define sd_io_time ssd_io_time 147 #define sd_failfast_enable ssd_failfast_enable 148 #define sd_ua_retry_count ssd_ua_retry_count 149 #define sd_report_pfa ssd_report_pfa 150 #define sd_max_throttle ssd_max_throttle 151 #define sd_min_throttle ssd_min_throttle 152 #define sd_rot_delay ssd_rot_delay 153 154 #define sd_retry_on_reservation_conflict \ 155 ssd_retry_on_reservation_conflict 156 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay 157 #define sd_resv_conflict_name ssd_resv_conflict_name 158 159 #define sd_component_mask ssd_component_mask 160 #define sd_level_mask ssd_level_mask 161 #define sd_debug_un ssd_debug_un 162 #define sd_error_level ssd_error_level 163 164 #define sd_xbuf_active_limit ssd_xbuf_active_limit 165 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit 166 167 #define sd_tr ssd_tr 168 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout 169 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout 170 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable 171 #define sd_check_media_time ssd_check_media_time 172 #define sd_wait_cmds_complete ssd_wait_cmds_complete 173 #define sd_label_mutex ssd_label_mutex 174 #define sd_detach_mutex ssd_detach_mutex 175 #define sd_log_buf ssd_log_buf 176 #define sd_log_mutex ssd_log_mutex 177 178 #define sd_disk_table ssd_disk_table 179 #define sd_disk_table_size ssd_disk_table_size 180 #define sd_sense_mutex ssd_sense_mutex 181 #define sd_cdbtab ssd_cdbtab 182 183 #define sd_cb_ops ssd_cb_ops 184 #define sd_ops ssd_ops 185 #define sd_additional_codes ssd_additional_codes 186 #define sd_tgops ssd_tgops 187 188 #define sd_minor_data ssd_minor_data 189 #define sd_minor_data_efi ssd_minor_data_efi 190 191 #define sd_tq ssd_tq 192 #define sd_wmr_tq ssd_wmr_tq 193 #define sd_taskq_name ssd_taskq_name 194 #define sd_wmr_taskq_name ssd_wmr_taskq_name 195 #define sd_taskq_minalloc ssd_taskq_minalloc 196 #define sd_taskq_maxalloc ssd_taskq_maxalloc 197 198 #define sd_dump_format_string ssd_dump_format_string 199 200 #define sd_iostart_chain ssd_iostart_chain 201 #define sd_iodone_chain ssd_iodone_chain 202 203 #define sd_pm_idletime ssd_pm_idletime 204 205 #define sd_force_pm_supported ssd_force_pm_supported 206 207 #define sd_dtype_optical_bind ssd_dtype_optical_bind 208 209 #endif 210 211 212 #ifdef SDDEBUG 213 int sd_force_pm_supported = 0; 214 #endif /* SDDEBUG */ 215 216 void *sd_state = NULL; 217 int sd_io_time = SD_IO_TIME; 218 int sd_failfast_enable = 1; 219 int sd_ua_retry_count = SD_UA_RETRY_COUNT; 220 int sd_report_pfa = 1; 221 int sd_max_throttle = SD_MAX_THROTTLE; 222 int sd_min_throttle = SD_MIN_THROTTLE; 223 int sd_rot_delay = 4; /* Default 4ms Rotation delay */ 224 int sd_qfull_throttle_enable = TRUE; 225 226 int sd_retry_on_reservation_conflict = 1; 227 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 228 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay)) 229 230 static int sd_dtype_optical_bind = -1; 231 232 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */ 233 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict"; 234 235 /* 236 * Global data for debug logging. To enable debug printing, sd_component_mask 237 * and sd_level_mask should be set to the desired bit patterns as outlined in 238 * sddef.h. 239 */ 240 uint_t sd_component_mask = 0x0; 241 uint_t sd_level_mask = 0x0; 242 struct sd_lun *sd_debug_un = NULL; 243 uint_t sd_error_level = SCSI_ERR_RETRYABLE; 244 245 /* Note: these may go away in the future... */ 246 static uint32_t sd_xbuf_active_limit = 512; 247 static uint32_t sd_xbuf_reserve_limit = 16; 248 249 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 }; 250 251 /* 252 * Timer value used to reset the throttle after it has been reduced 253 * (typically in response to TRAN_BUSY or STATUS_QFULL) 254 */ 255 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT; 256 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT; 257 258 /* 259 * Interval value associated with the media change scsi watch. 260 */ 261 static int sd_check_media_time = 3000000; 262 263 /* 264 * Wait value used for in progress operations during a DDI_SUSPEND 265 */ 266 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE; 267 268 /* 269 * sd_label_mutex protects a static buffer used in the disk label 270 * component of the driver 271 */ 272 static kmutex_t sd_label_mutex; 273 274 /* 275 * sd_detach_mutex protects un_layer_count, un_detach_count, and 276 * un_opens_in_progress in the sd_lun structure. 277 */ 278 static kmutex_t sd_detach_mutex; 279 280 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex, 281 sd_lun::{un_layer_count un_detach_count un_opens_in_progress})) 282 283 /* 284 * Global buffer and mutex for debug logging 285 */ 286 static char sd_log_buf[1024]; 287 static kmutex_t sd_log_mutex; 288 289 /* 290 * Structs and globals for recording attached lun information. 291 * This maintains a chain. Each node in the chain represents a SCSI controller. 292 * The structure records the number of luns attached to each target connected 293 * with the controller. 294 * For parallel scsi device only. 295 */ 296 struct sd_scsi_hba_tgt_lun { 297 struct sd_scsi_hba_tgt_lun *next; 298 dev_info_t *pdip; 299 int nlun[NTARGETS_WIDE]; 300 }; 301 302 /* 303 * Flag to indicate the lun is attached or detached 304 */ 305 #define SD_SCSI_LUN_ATTACH 0 306 #define SD_SCSI_LUN_DETACH 1 307 308 static kmutex_t sd_scsi_target_lun_mutex; 309 static struct sd_scsi_hba_tgt_lun *sd_scsi_target_lun_head = NULL; 310 311 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 312 sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip)) 313 314 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 315 sd_scsi_target_lun_head)) 316 317 /* 318 * "Smart" Probe Caching structs, globals, #defines, etc. 319 * For parallel scsi and non-self-identify device only. 320 */ 321 322 /* 323 * The following resources and routines are implemented to support 324 * "smart" probing, which caches the scsi_probe() results in an array, 325 * in order to help avoid long probe times. 326 */ 327 struct sd_scsi_probe_cache { 328 struct sd_scsi_probe_cache *next; 329 dev_info_t *pdip; 330 int cache[NTARGETS_WIDE]; 331 }; 332 333 static kmutex_t sd_scsi_probe_cache_mutex; 334 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; 335 336 /* 337 * Really we only need protection on the head of the linked list, but 338 * better safe than sorry. 339 */ 340 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 341 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip)) 342 343 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 344 sd_scsi_probe_cache_head)) 345 346 347 /* 348 * Vendor specific data name property declarations 349 */ 350 351 #if defined(__fibre) || defined(__i386) ||defined(__amd64) 352 353 static sd_tunables seagate_properties = { 354 SEAGATE_THROTTLE_VALUE, 355 0, 356 0, 357 0, 358 0, 359 0, 360 0, 361 0, 362 0 363 }; 364 365 366 static sd_tunables fujitsu_properties = { 367 FUJITSU_THROTTLE_VALUE, 368 0, 369 0, 370 0, 371 0, 372 0, 373 0, 374 0, 375 0 376 }; 377 378 static sd_tunables ibm_properties = { 379 IBM_THROTTLE_VALUE, 380 0, 381 0, 382 0, 383 0, 384 0, 385 0, 386 0, 387 0 388 }; 389 390 static sd_tunables purple_properties = { 391 PURPLE_THROTTLE_VALUE, 392 0, 393 0, 394 PURPLE_BUSY_RETRIES, 395 PURPLE_RESET_RETRY_COUNT, 396 PURPLE_RESERVE_RELEASE_TIME, 397 0, 398 0, 399 0 400 }; 401 402 static sd_tunables sve_properties = { 403 SVE_THROTTLE_VALUE, 404 0, 405 0, 406 SVE_BUSY_RETRIES, 407 SVE_RESET_RETRY_COUNT, 408 SVE_RESERVE_RELEASE_TIME, 409 SVE_MIN_THROTTLE_VALUE, 410 SVE_DISKSORT_DISABLED_FLAG, 411 0 412 }; 413 414 static sd_tunables maserati_properties = { 415 0, 416 0, 417 0, 418 0, 419 0, 420 0, 421 0, 422 MASERATI_DISKSORT_DISABLED_FLAG, 423 MASERATI_LUN_RESET_ENABLED_FLAG 424 }; 425 426 static sd_tunables pirus_properties = { 427 PIRUS_THROTTLE_VALUE, 428 0, 429 PIRUS_NRR_COUNT, 430 PIRUS_BUSY_RETRIES, 431 PIRUS_RESET_RETRY_COUNT, 432 0, 433 PIRUS_MIN_THROTTLE_VALUE, 434 PIRUS_DISKSORT_DISABLED_FLAG, 435 PIRUS_LUN_RESET_ENABLED_FLAG 436 }; 437 438 #endif 439 440 #if (defined(__sparc) && !defined(__fibre)) || \ 441 (defined(__i386) || defined(__amd64)) 442 443 444 static sd_tunables elite_properties = { 445 ELITE_THROTTLE_VALUE, 446 0, 447 0, 448 0, 449 0, 450 0, 451 0, 452 0, 453 0 454 }; 455 456 static sd_tunables st31200n_properties = { 457 ST31200N_THROTTLE_VALUE, 458 0, 459 0, 460 0, 461 0, 462 0, 463 0, 464 0, 465 0 466 }; 467 468 #endif /* Fibre or not */ 469 470 static sd_tunables lsi_properties_scsi = { 471 LSI_THROTTLE_VALUE, 472 0, 473 LSI_NOTREADY_RETRIES, 474 0, 475 0, 476 0, 477 0, 478 0, 479 0 480 }; 481 482 static sd_tunables symbios_properties = { 483 SYMBIOS_THROTTLE_VALUE, 484 0, 485 SYMBIOS_NOTREADY_RETRIES, 486 0, 487 0, 488 0, 489 0, 490 0, 491 0 492 }; 493 494 static sd_tunables lsi_properties = { 495 0, 496 0, 497 LSI_NOTREADY_RETRIES, 498 0, 499 0, 500 0, 501 0, 502 0, 503 0 504 }; 505 506 static sd_tunables lsi_oem_properties = { 507 0, 508 0, 509 LSI_OEM_NOTREADY_RETRIES, 510 0, 511 0, 512 0, 513 0, 514 0, 515 0, 516 1 517 }; 518 519 520 521 #if (defined(SD_PROP_TST)) 522 523 #define SD_TST_CTYPE_VAL CTYPE_CDROM 524 #define SD_TST_THROTTLE_VAL 16 525 #define SD_TST_NOTREADY_VAL 12 526 #define SD_TST_BUSY_VAL 60 527 #define SD_TST_RST_RETRY_VAL 36 528 #define SD_TST_RSV_REL_TIME 60 529 530 static sd_tunables tst_properties = { 531 SD_TST_THROTTLE_VAL, 532 SD_TST_CTYPE_VAL, 533 SD_TST_NOTREADY_VAL, 534 SD_TST_BUSY_VAL, 535 SD_TST_RST_RETRY_VAL, 536 SD_TST_RSV_REL_TIME, 537 0, 538 0, 539 0 540 }; 541 #endif 542 543 /* This is similar to the ANSI toupper implementation */ 544 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C)) 545 546 /* 547 * Static Driver Configuration Table 548 * 549 * This is the table of disks which need throttle adjustment (or, perhaps 550 * something else as defined by the flags at a future time.) device_id 551 * is a string consisting of concatenated vid (vendor), pid (product/model) 552 * and revision strings as defined in the scsi_inquiry structure. Offsets of 553 * the parts of the string are as defined by the sizes in the scsi_inquiry 554 * structure. Device type is searched as far as the device_id string is 555 * defined. Flags defines which values are to be set in the driver from the 556 * properties list. 557 * 558 * Entries below which begin and end with a "*" are a special case. 559 * These do not have a specific vendor, and the string which follows 560 * can appear anywhere in the 16 byte PID portion of the inquiry data. 561 * 562 * Entries below which begin and end with a " " (blank) are a special 563 * case. The comparison function will treat multiple consecutive blanks 564 * as equivalent to a single blank. For example, this causes a 565 * sd_disk_table entry of " NEC CDROM " to match a device's id string 566 * of "NEC CDROM". 567 * 568 * Note: The MD21 controller type has been obsoleted. 569 * ST318202F is a Legacy device 570 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been 571 * made with an FC connection. The entries here are a legacy. 572 */ 573 static sd_disk_config_t sd_disk_table[] = { 574 #if defined(__fibre) || defined(__i386) || defined(__amd64) 575 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 576 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 577 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 578 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 579 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 580 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 581 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 582 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 583 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 584 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 585 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 586 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 587 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 588 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 589 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 590 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 591 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 592 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 593 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 594 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 595 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 596 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 597 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 598 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties }, 599 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties }, 600 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties }, 601 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties }, 602 { "IBM 1724-100", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 603 { "IBM 1726-2xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 604 { "IBM 1726-22x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 605 { "IBM 1726-4xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 606 { "IBM 1726-42x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 607 { "IBM 1726-3xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 608 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 609 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 610 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 611 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 612 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 613 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 614 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 615 { "IBM 1814", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 616 { "IBM 1814-200", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 617 { "IBM 1818", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 618 { "DELL MD3000", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 619 { "DELL MD3000i", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 620 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 621 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 622 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 623 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 624 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT | 625 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 626 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT | 627 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 628 { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties }, 629 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, 630 { "SUN T3", SD_CONF_BSET_THROTTLE | 631 SD_CONF_BSET_BSY_RETRY_COUNT| 632 SD_CONF_BSET_RST_RETRIES| 633 SD_CONF_BSET_RSV_REL_TIME, 634 &purple_properties }, 635 { "SUN SESS01", SD_CONF_BSET_THROTTLE | 636 SD_CONF_BSET_BSY_RETRY_COUNT| 637 SD_CONF_BSET_RST_RETRIES| 638 SD_CONF_BSET_RSV_REL_TIME| 639 SD_CONF_BSET_MIN_THROTTLE| 640 SD_CONF_BSET_DISKSORT_DISABLED, 641 &sve_properties }, 642 { "SUN T4", SD_CONF_BSET_THROTTLE | 643 SD_CONF_BSET_BSY_RETRY_COUNT| 644 SD_CONF_BSET_RST_RETRIES| 645 SD_CONF_BSET_RSV_REL_TIME, 646 &purple_properties }, 647 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED | 648 SD_CONF_BSET_LUN_RESET_ENABLED, 649 &maserati_properties }, 650 { "SUN SE6920", SD_CONF_BSET_THROTTLE | 651 SD_CONF_BSET_NRR_COUNT| 652 SD_CONF_BSET_BSY_RETRY_COUNT| 653 SD_CONF_BSET_RST_RETRIES| 654 SD_CONF_BSET_MIN_THROTTLE| 655 SD_CONF_BSET_DISKSORT_DISABLED| 656 SD_CONF_BSET_LUN_RESET_ENABLED, 657 &pirus_properties }, 658 { "SUN SE6940", SD_CONF_BSET_THROTTLE | 659 SD_CONF_BSET_NRR_COUNT| 660 SD_CONF_BSET_BSY_RETRY_COUNT| 661 SD_CONF_BSET_RST_RETRIES| 662 SD_CONF_BSET_MIN_THROTTLE| 663 SD_CONF_BSET_DISKSORT_DISABLED| 664 SD_CONF_BSET_LUN_RESET_ENABLED, 665 &pirus_properties }, 666 { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE | 667 SD_CONF_BSET_NRR_COUNT| 668 SD_CONF_BSET_BSY_RETRY_COUNT| 669 SD_CONF_BSET_RST_RETRIES| 670 SD_CONF_BSET_MIN_THROTTLE| 671 SD_CONF_BSET_DISKSORT_DISABLED| 672 SD_CONF_BSET_LUN_RESET_ENABLED, 673 &pirus_properties }, 674 { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE | 675 SD_CONF_BSET_NRR_COUNT| 676 SD_CONF_BSET_BSY_RETRY_COUNT| 677 SD_CONF_BSET_RST_RETRIES| 678 SD_CONF_BSET_MIN_THROTTLE| 679 SD_CONF_BSET_DISKSORT_DISABLED| 680 SD_CONF_BSET_LUN_RESET_ENABLED, 681 &pirus_properties }, 682 { "SUN PSX1000", SD_CONF_BSET_THROTTLE | 683 SD_CONF_BSET_NRR_COUNT| 684 SD_CONF_BSET_BSY_RETRY_COUNT| 685 SD_CONF_BSET_RST_RETRIES| 686 SD_CONF_BSET_MIN_THROTTLE| 687 SD_CONF_BSET_DISKSORT_DISABLED| 688 SD_CONF_BSET_LUN_RESET_ENABLED, 689 &pirus_properties }, 690 { "SUN SE6330", SD_CONF_BSET_THROTTLE | 691 SD_CONF_BSET_NRR_COUNT| 692 SD_CONF_BSET_BSY_RETRY_COUNT| 693 SD_CONF_BSET_RST_RETRIES| 694 SD_CONF_BSET_MIN_THROTTLE| 695 SD_CONF_BSET_DISKSORT_DISABLED| 696 SD_CONF_BSET_LUN_RESET_ENABLED, 697 &pirus_properties }, 698 { "SUN STK6580_6780", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 699 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 700 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 701 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 702 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 703 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties }, 704 #endif /* fibre or NON-sparc platforms */ 705 #if ((defined(__sparc) && !defined(__fibre)) ||\ 706 (defined(__i386) || defined(__amd64))) 707 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties }, 708 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties }, 709 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL }, 710 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL }, 711 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL }, 712 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL }, 713 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, 714 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, 715 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, 716 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, 717 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, 718 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL }, 719 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT, 720 &symbios_properties }, 721 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT, 722 &lsi_properties_scsi }, 723 #if defined(__i386) || defined(__amd64) 724 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD 725 | SD_CONF_BSET_READSUB_BCD 726 | SD_CONF_BSET_READ_TOC_ADDR_BCD 727 | SD_CONF_BSET_NO_READ_HEADER 728 | SD_CONF_BSET_READ_CD_XD4), NULL }, 729 730 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD 731 | SD_CONF_BSET_READSUB_BCD 732 | SD_CONF_BSET_READ_TOC_ADDR_BCD 733 | SD_CONF_BSET_NO_READ_HEADER 734 | SD_CONF_BSET_READ_CD_XD4), NULL }, 735 #endif /* __i386 || __amd64 */ 736 #endif /* sparc NON-fibre or NON-sparc platforms */ 737 738 #if (defined(SD_PROP_TST)) 739 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE 740 | SD_CONF_BSET_CTYPE 741 | SD_CONF_BSET_NRR_COUNT 742 | SD_CONF_BSET_FAB_DEVID 743 | SD_CONF_BSET_NOCACHE 744 | SD_CONF_BSET_BSY_RETRY_COUNT 745 | SD_CONF_BSET_PLAYMSF_BCD 746 | SD_CONF_BSET_READSUB_BCD 747 | SD_CONF_BSET_READ_TOC_TRK_BCD 748 | SD_CONF_BSET_READ_TOC_ADDR_BCD 749 | SD_CONF_BSET_NO_READ_HEADER 750 | SD_CONF_BSET_READ_CD_XD4 751 | SD_CONF_BSET_RST_RETRIES 752 | SD_CONF_BSET_RSV_REL_TIME 753 | SD_CONF_BSET_TUR_CHECK), &tst_properties}, 754 #endif 755 }; 756 757 static const int sd_disk_table_size = 758 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t); 759 760 761 762 #define SD_INTERCONNECT_PARALLEL 0 763 #define SD_INTERCONNECT_FABRIC 1 764 #define SD_INTERCONNECT_FIBRE 2 765 #define SD_INTERCONNECT_SSA 3 766 #define SD_INTERCONNECT_SATA 4 767 #define SD_IS_PARALLEL_SCSI(un) \ 768 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL) 769 #define SD_IS_SERIAL(un) \ 770 ((un)->un_interconnect_type == SD_INTERCONNECT_SATA) 771 772 /* 773 * Definitions used by device id registration routines 774 */ 775 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */ 776 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */ 777 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */ 778 779 static kmutex_t sd_sense_mutex = {0}; 780 781 /* 782 * Macros for updates of the driver state 783 */ 784 #define New_state(un, s) \ 785 (un)->un_last_state = (un)->un_state, (un)->un_state = (s) 786 #define Restore_state(un) \ 787 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); } 788 789 static struct sd_cdbinfo sd_cdbtab[] = { 790 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, }, 791 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, }, 792 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, }, 793 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, }, 794 }; 795 796 /* 797 * Specifies the number of seconds that must have elapsed since the last 798 * cmd. has completed for a device to be declared idle to the PM framework. 799 */ 800 static int sd_pm_idletime = 1; 801 802 /* 803 * Internal function prototypes 804 */ 805 806 #if (defined(__fibre)) 807 /* 808 * These #defines are to avoid namespace collisions that occur because this 809 * code is currently used to compile two separate driver modules: sd and ssd. 810 * All function names need to be treated this way (even if declared static) 811 * in order to allow the debugger to resolve the names properly. 812 * It is anticipated that in the near future the ssd module will be obsoleted, 813 * at which time this ugliness should go away. 814 */ 815 #define sd_log_trace ssd_log_trace 816 #define sd_log_info ssd_log_info 817 #define sd_log_err ssd_log_err 818 #define sdprobe ssdprobe 819 #define sdinfo ssdinfo 820 #define sd_prop_op ssd_prop_op 821 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init 822 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini 823 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache 824 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache 825 #define sd_scsi_target_lun_init ssd_scsi_target_lun_init 826 #define sd_scsi_target_lun_fini ssd_scsi_target_lun_fini 827 #define sd_scsi_get_target_lun_count ssd_scsi_get_target_lun_count 828 #define sd_scsi_update_lun_on_target ssd_scsi_update_lun_on_target 829 #define sd_spin_up_unit ssd_spin_up_unit 830 #define sd_enable_descr_sense ssd_enable_descr_sense 831 #define sd_reenable_dsense_task ssd_reenable_dsense_task 832 #define sd_set_mmc_caps ssd_set_mmc_caps 833 #define sd_read_unit_properties ssd_read_unit_properties 834 #define sd_process_sdconf_file ssd_process_sdconf_file 835 #define sd_process_sdconf_table ssd_process_sdconf_table 836 #define sd_sdconf_id_match ssd_sdconf_id_match 837 #define sd_blank_cmp ssd_blank_cmp 838 #define sd_chk_vers1_data ssd_chk_vers1_data 839 #define sd_set_vers1_properties ssd_set_vers1_properties 840 841 #define sd_get_physical_geometry ssd_get_physical_geometry 842 #define sd_get_virtual_geometry ssd_get_virtual_geometry 843 #define sd_update_block_info ssd_update_block_info 844 #define sd_register_devid ssd_register_devid 845 #define sd_get_devid ssd_get_devid 846 #define sd_create_devid ssd_create_devid 847 #define sd_write_deviceid ssd_write_deviceid 848 #define sd_check_vpd_page_support ssd_check_vpd_page_support 849 #define sd_setup_pm ssd_setup_pm 850 #define sd_create_pm_components ssd_create_pm_components 851 #define sd_ddi_suspend ssd_ddi_suspend 852 #define sd_ddi_pm_suspend ssd_ddi_pm_suspend 853 #define sd_ddi_resume ssd_ddi_resume 854 #define sd_ddi_pm_resume ssd_ddi_pm_resume 855 #define sdpower ssdpower 856 #define sdattach ssdattach 857 #define sddetach ssddetach 858 #define sd_unit_attach ssd_unit_attach 859 #define sd_unit_detach ssd_unit_detach 860 #define sd_set_unit_attributes ssd_set_unit_attributes 861 #define sd_create_errstats ssd_create_errstats 862 #define sd_set_errstats ssd_set_errstats 863 #define sd_set_pstats ssd_set_pstats 864 #define sddump ssddump 865 #define sd_scsi_poll ssd_scsi_poll 866 #define sd_send_polled_RQS ssd_send_polled_RQS 867 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll 868 #define sd_init_event_callbacks ssd_init_event_callbacks 869 #define sd_event_callback ssd_event_callback 870 #define sd_cache_control ssd_cache_control 871 #define sd_get_write_cache_enabled ssd_get_write_cache_enabled 872 #define sd_get_nv_sup ssd_get_nv_sup 873 #define sd_make_device ssd_make_device 874 #define sdopen ssdopen 875 #define sdclose ssdclose 876 #define sd_ready_and_valid ssd_ready_and_valid 877 #define sdmin ssdmin 878 #define sdread ssdread 879 #define sdwrite ssdwrite 880 #define sdaread ssdaread 881 #define sdawrite ssdawrite 882 #define sdstrategy ssdstrategy 883 #define sdioctl ssdioctl 884 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart 885 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart 886 #define sd_checksum_iostart ssd_checksum_iostart 887 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart 888 #define sd_pm_iostart ssd_pm_iostart 889 #define sd_core_iostart ssd_core_iostart 890 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone 891 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone 892 #define sd_checksum_iodone ssd_checksum_iodone 893 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone 894 #define sd_pm_iodone ssd_pm_iodone 895 #define sd_initpkt_for_buf ssd_initpkt_for_buf 896 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf 897 #define sd_setup_rw_pkt ssd_setup_rw_pkt 898 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt 899 #define sd_buf_iodone ssd_buf_iodone 900 #define sd_uscsi_strategy ssd_uscsi_strategy 901 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi 902 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi 903 #define sd_uscsi_iodone ssd_uscsi_iodone 904 #define sd_xbuf_strategy ssd_xbuf_strategy 905 #define sd_xbuf_init ssd_xbuf_init 906 #define sd_pm_entry ssd_pm_entry 907 #define sd_pm_exit ssd_pm_exit 908 909 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler 910 #define sd_pm_timeout_handler ssd_pm_timeout_handler 911 912 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq 913 #define sdintr ssdintr 914 #define sd_start_cmds ssd_start_cmds 915 #define sd_send_scsi_cmd ssd_send_scsi_cmd 916 #define sd_bioclone_alloc ssd_bioclone_alloc 917 #define sd_bioclone_free ssd_bioclone_free 918 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc 919 #define sd_shadow_buf_free ssd_shadow_buf_free 920 #define sd_print_transport_rejected_message \ 921 ssd_print_transport_rejected_message 922 #define sd_retry_command ssd_retry_command 923 #define sd_set_retry_bp ssd_set_retry_bp 924 #define sd_send_request_sense_command ssd_send_request_sense_command 925 #define sd_start_retry_command ssd_start_retry_command 926 #define sd_start_direct_priority_command \ 927 ssd_start_direct_priority_command 928 #define sd_return_failed_command ssd_return_failed_command 929 #define sd_return_failed_command_no_restart \ 930 ssd_return_failed_command_no_restart 931 #define sd_return_command ssd_return_command 932 #define sd_sync_with_callback ssd_sync_with_callback 933 #define sdrunout ssdrunout 934 #define sd_mark_rqs_busy ssd_mark_rqs_busy 935 #define sd_mark_rqs_idle ssd_mark_rqs_idle 936 #define sd_reduce_throttle ssd_reduce_throttle 937 #define sd_restore_throttle ssd_restore_throttle 938 #define sd_print_incomplete_msg ssd_print_incomplete_msg 939 #define sd_init_cdb_limits ssd_init_cdb_limits 940 #define sd_pkt_status_good ssd_pkt_status_good 941 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition 942 #define sd_pkt_status_busy ssd_pkt_status_busy 943 #define sd_pkt_status_reservation_conflict \ 944 ssd_pkt_status_reservation_conflict 945 #define sd_pkt_status_qfull ssd_pkt_status_qfull 946 #define sd_handle_request_sense ssd_handle_request_sense 947 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense 948 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg 949 #define sd_validate_sense_data ssd_validate_sense_data 950 #define sd_decode_sense ssd_decode_sense 951 #define sd_print_sense_msg ssd_print_sense_msg 952 #define sd_sense_key_no_sense ssd_sense_key_no_sense 953 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error 954 #define sd_sense_key_not_ready ssd_sense_key_not_ready 955 #define sd_sense_key_medium_or_hardware_error \ 956 ssd_sense_key_medium_or_hardware_error 957 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request 958 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention 959 #define sd_sense_key_fail_command ssd_sense_key_fail_command 960 #define sd_sense_key_blank_check ssd_sense_key_blank_check 961 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command 962 #define sd_sense_key_default ssd_sense_key_default 963 #define sd_print_retry_msg ssd_print_retry_msg 964 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg 965 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete 966 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err 967 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset 968 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted 969 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout 970 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free 971 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject 972 #define sd_pkt_reason_default ssd_pkt_reason_default 973 #define sd_reset_target ssd_reset_target 974 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback 975 #define sd_start_stop_unit_task ssd_start_stop_unit_task 976 #define sd_taskq_create ssd_taskq_create 977 #define sd_taskq_delete ssd_taskq_delete 978 #define sd_target_change_task ssd_target_change_task 979 #define sd_log_lun_expansion_event ssd_log_lun_expansion_event 980 #define sd_media_change_task ssd_media_change_task 981 #define sd_handle_mchange ssd_handle_mchange 982 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK 983 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY 984 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16 985 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION 986 #define sd_send_scsi_feature_GET_CONFIGURATION \ 987 sd_send_scsi_feature_GET_CONFIGURATION 988 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT 989 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY 990 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY 991 #define sd_send_scsi_PERSISTENT_RESERVE_IN \ 992 ssd_send_scsi_PERSISTENT_RESERVE_IN 993 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \ 994 ssd_send_scsi_PERSISTENT_RESERVE_OUT 995 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE 996 #define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \ 997 ssd_send_scsi_SYNCHRONIZE_CACHE_biodone 998 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE 999 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT 1000 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR 1001 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE 1002 #define sd_alloc_rqs ssd_alloc_rqs 1003 #define sd_free_rqs ssd_free_rqs 1004 #define sd_dump_memory ssd_dump_memory 1005 #define sd_get_media_info ssd_get_media_info 1006 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info 1007 #define sd_nvpair_str_decode ssd_nvpair_str_decode 1008 #define sd_strtok_r ssd_strtok_r 1009 #define sd_set_properties ssd_set_properties 1010 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf 1011 #define sd_setup_next_xfer ssd_setup_next_xfer 1012 #define sd_dkio_get_temp ssd_dkio_get_temp 1013 #define sd_check_mhd ssd_check_mhd 1014 #define sd_mhd_watch_cb ssd_mhd_watch_cb 1015 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete 1016 #define sd_sname ssd_sname 1017 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover 1018 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread 1019 #define sd_take_ownership ssd_take_ownership 1020 #define sd_reserve_release ssd_reserve_release 1021 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req 1022 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb 1023 #define sd_persistent_reservation_in_read_keys \ 1024 ssd_persistent_reservation_in_read_keys 1025 #define sd_persistent_reservation_in_read_resv \ 1026 ssd_persistent_reservation_in_read_resv 1027 #define sd_mhdioc_takeown ssd_mhdioc_takeown 1028 #define sd_mhdioc_failfast ssd_mhdioc_failfast 1029 #define sd_mhdioc_release ssd_mhdioc_release 1030 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid 1031 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys 1032 #define sd_mhdioc_inresv ssd_mhdioc_inresv 1033 #define sr_change_blkmode ssr_change_blkmode 1034 #define sr_change_speed ssr_change_speed 1035 #define sr_atapi_change_speed ssr_atapi_change_speed 1036 #define sr_pause_resume ssr_pause_resume 1037 #define sr_play_msf ssr_play_msf 1038 #define sr_play_trkind ssr_play_trkind 1039 #define sr_read_all_subcodes ssr_read_all_subcodes 1040 #define sr_read_subchannel ssr_read_subchannel 1041 #define sr_read_tocentry ssr_read_tocentry 1042 #define sr_read_tochdr ssr_read_tochdr 1043 #define sr_read_cdda ssr_read_cdda 1044 #define sr_read_cdxa ssr_read_cdxa 1045 #define sr_read_mode1 ssr_read_mode1 1046 #define sr_read_mode2 ssr_read_mode2 1047 #define sr_read_cd_mode2 ssr_read_cd_mode2 1048 #define sr_sector_mode ssr_sector_mode 1049 #define sr_eject ssr_eject 1050 #define sr_ejected ssr_ejected 1051 #define sr_check_wp ssr_check_wp 1052 #define sd_check_media ssd_check_media 1053 #define sd_media_watch_cb ssd_media_watch_cb 1054 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast 1055 #define sr_volume_ctrl ssr_volume_ctrl 1056 #define sr_read_sony_session_offset ssr_read_sony_session_offset 1057 #define sd_log_page_supported ssd_log_page_supported 1058 #define sd_check_for_writable_cd ssd_check_for_writable_cd 1059 #define sd_wm_cache_constructor ssd_wm_cache_constructor 1060 #define sd_wm_cache_destructor ssd_wm_cache_destructor 1061 #define sd_range_lock ssd_range_lock 1062 #define sd_get_range ssd_get_range 1063 #define sd_free_inlist_wmap ssd_free_inlist_wmap 1064 #define sd_range_unlock ssd_range_unlock 1065 #define sd_read_modify_write_task ssd_read_modify_write_task 1066 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw 1067 1068 #define sd_iostart_chain ssd_iostart_chain 1069 #define sd_iodone_chain ssd_iodone_chain 1070 #define sd_initpkt_map ssd_initpkt_map 1071 #define sd_destroypkt_map ssd_destroypkt_map 1072 #define sd_chain_type_map ssd_chain_type_map 1073 #define sd_chain_index_map ssd_chain_index_map 1074 1075 #define sd_failfast_flushctl ssd_failfast_flushctl 1076 #define sd_failfast_flushq ssd_failfast_flushq 1077 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback 1078 1079 #define sd_is_lsi ssd_is_lsi 1080 #define sd_tg_rdwr ssd_tg_rdwr 1081 #define sd_tg_getinfo ssd_tg_getinfo 1082 1083 #endif /* #if (defined(__fibre)) */ 1084 1085 1086 int _init(void); 1087 int _fini(void); 1088 int _info(struct modinfo *modinfop); 1089 1090 /*PRINTFLIKE3*/ 1091 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1092 /*PRINTFLIKE3*/ 1093 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1094 /*PRINTFLIKE3*/ 1095 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1096 1097 static int sdprobe(dev_info_t *devi); 1098 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 1099 void **result); 1100 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1101 int mod_flags, char *name, caddr_t valuep, int *lengthp); 1102 1103 /* 1104 * Smart probe for parallel scsi 1105 */ 1106 static void sd_scsi_probe_cache_init(void); 1107 static void sd_scsi_probe_cache_fini(void); 1108 static void sd_scsi_clear_probe_cache(void); 1109 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)()); 1110 1111 /* 1112 * Attached luns on target for parallel scsi 1113 */ 1114 static void sd_scsi_target_lun_init(void); 1115 static void sd_scsi_target_lun_fini(void); 1116 static int sd_scsi_get_target_lun_count(dev_info_t *dip, int target); 1117 static void sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag); 1118 1119 static int sd_spin_up_unit(struct sd_lun *un); 1120 #ifdef _LP64 1121 static void sd_enable_descr_sense(struct sd_lun *un); 1122 static void sd_reenable_dsense_task(void *arg); 1123 #endif /* _LP64 */ 1124 1125 static void sd_set_mmc_caps(struct sd_lun *un); 1126 1127 static void sd_read_unit_properties(struct sd_lun *un); 1128 static int sd_process_sdconf_file(struct sd_lun *un); 1129 static void sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str); 1130 static char *sd_strtok_r(char *string, const char *sepset, char **lasts); 1131 static void sd_set_properties(struct sd_lun *un, char *name, char *value); 1132 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags, 1133 int *data_list, sd_tunables *values); 1134 static void sd_process_sdconf_table(struct sd_lun *un); 1135 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen); 1136 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen); 1137 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 1138 int list_len, char *dataname_ptr); 1139 static void sd_set_vers1_properties(struct sd_lun *un, int flags, 1140 sd_tunables *prop_list); 1141 1142 static void sd_register_devid(struct sd_lun *un, dev_info_t *devi, 1143 int reservation_flag); 1144 static int sd_get_devid(struct sd_lun *un); 1145 static ddi_devid_t sd_create_devid(struct sd_lun *un); 1146 static int sd_write_deviceid(struct sd_lun *un); 1147 static int sd_get_devid_page(struct sd_lun *un, uchar_t *wwn, int *len); 1148 static int sd_check_vpd_page_support(struct sd_lun *un); 1149 1150 static void sd_setup_pm(struct sd_lun *un, dev_info_t *devi); 1151 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); 1152 1153 static int sd_ddi_suspend(dev_info_t *devi); 1154 static int sd_ddi_pm_suspend(struct sd_lun *un); 1155 static int sd_ddi_resume(dev_info_t *devi); 1156 static int sd_ddi_pm_resume(struct sd_lun *un); 1157 static int sdpower(dev_info_t *devi, int component, int level); 1158 1159 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 1160 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 1161 static int sd_unit_attach(dev_info_t *devi); 1162 static int sd_unit_detach(dev_info_t *devi); 1163 1164 static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi); 1165 static void sd_create_errstats(struct sd_lun *un, int instance); 1166 static void sd_set_errstats(struct sd_lun *un); 1167 static void sd_set_pstats(struct sd_lun *un); 1168 1169 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 1170 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt); 1171 static int sd_send_polled_RQS(struct sd_lun *un); 1172 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt); 1173 1174 #if (defined(__fibre)) 1175 /* 1176 * Event callbacks (photon) 1177 */ 1178 static void sd_init_event_callbacks(struct sd_lun *un); 1179 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *); 1180 #endif 1181 1182 /* 1183 * Defines for sd_cache_control 1184 */ 1185 1186 #define SD_CACHE_ENABLE 1 1187 #define SD_CACHE_DISABLE 0 1188 #define SD_CACHE_NOCHANGE -1 1189 1190 static int sd_cache_control(struct sd_lun *un, int rcd_flag, int wce_flag); 1191 static int sd_get_write_cache_enabled(struct sd_lun *un, int *is_enabled); 1192 static void sd_get_nv_sup(struct sd_lun *un); 1193 static dev_t sd_make_device(dev_info_t *devi); 1194 1195 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize, 1196 uint64_t capacity); 1197 1198 /* 1199 * Driver entry point functions. 1200 */ 1201 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 1202 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 1203 static int sd_ready_and_valid(struct sd_lun *un); 1204 1205 static void sdmin(struct buf *bp); 1206 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p); 1207 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 1208 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1209 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1210 1211 static int sdstrategy(struct buf *bp); 1212 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 1213 1214 /* 1215 * Function prototypes for layering functions in the iostart chain. 1216 */ 1217 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un, 1218 struct buf *bp); 1219 static void sd_mapblocksize_iostart(int index, struct sd_lun *un, 1220 struct buf *bp); 1221 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp); 1222 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un, 1223 struct buf *bp); 1224 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp); 1225 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp); 1226 1227 /* 1228 * Function prototypes for layering functions in the iodone chain. 1229 */ 1230 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp); 1231 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp); 1232 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un, 1233 struct buf *bp); 1234 static void sd_mapblocksize_iodone(int index, struct sd_lun *un, 1235 struct buf *bp); 1236 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp); 1237 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un, 1238 struct buf *bp); 1239 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp); 1240 1241 /* 1242 * Prototypes for functions to support buf(9S) based IO. 1243 */ 1244 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg); 1245 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **); 1246 static void sd_destroypkt_for_buf(struct buf *); 1247 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp, 1248 struct buf *bp, int flags, 1249 int (*callback)(caddr_t), caddr_t callback_arg, 1250 diskaddr_t lba, uint32_t blockcount); 1251 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp, 1252 struct buf *bp, diskaddr_t lba, uint32_t blockcount); 1253 1254 /* 1255 * Prototypes for functions to support USCSI IO. 1256 */ 1257 static int sd_uscsi_strategy(struct buf *bp); 1258 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **); 1259 static void sd_destroypkt_for_uscsi(struct buf *); 1260 1261 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 1262 uchar_t chain_type, void *pktinfop); 1263 1264 static int sd_pm_entry(struct sd_lun *un); 1265 static void sd_pm_exit(struct sd_lun *un); 1266 1267 static void sd_pm_idletimeout_handler(void *arg); 1268 1269 /* 1270 * sd_core internal functions (used at the sd_core_io layer). 1271 */ 1272 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp); 1273 static void sdintr(struct scsi_pkt *pktp); 1274 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp); 1275 1276 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1277 enum uio_seg dataspace, int path_flag); 1278 1279 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen, 1280 daddr_t blkno, int (*func)(struct buf *)); 1281 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen, 1282 uint_t bflags, daddr_t blkno, int (*func)(struct buf *)); 1283 static void sd_bioclone_free(struct buf *bp); 1284 static void sd_shadow_buf_free(struct buf *bp); 1285 1286 static void sd_print_transport_rejected_message(struct sd_lun *un, 1287 struct sd_xbuf *xp, int code); 1288 static void sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, 1289 void *arg, int code); 1290 static void sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, 1291 void *arg, int code); 1292 static void sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, 1293 void *arg, int code); 1294 1295 static void sd_retry_command(struct sd_lun *un, struct buf *bp, 1296 int retry_check_flag, 1297 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, 1298 int c), 1299 void *user_arg, int failure_code, clock_t retry_delay, 1300 void (*statp)(kstat_io_t *)); 1301 1302 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, 1303 clock_t retry_delay, void (*statp)(kstat_io_t *)); 1304 1305 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 1306 struct scsi_pkt *pktp); 1307 static void sd_start_retry_command(void *arg); 1308 static void sd_start_direct_priority_command(void *arg); 1309 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, 1310 int errcode); 1311 static void sd_return_failed_command_no_restart(struct sd_lun *un, 1312 struct buf *bp, int errcode); 1313 static void sd_return_command(struct sd_lun *un, struct buf *bp); 1314 static void sd_sync_with_callback(struct sd_lun *un); 1315 static int sdrunout(caddr_t arg); 1316 1317 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp); 1318 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp); 1319 1320 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type); 1321 static void sd_restore_throttle(void *arg); 1322 1323 static void sd_init_cdb_limits(struct sd_lun *un); 1324 1325 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 1326 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1327 1328 /* 1329 * Error handling functions 1330 */ 1331 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 1332 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1333 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, 1334 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1335 static void sd_pkt_status_reservation_conflict(struct sd_lun *un, 1336 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1337 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 1338 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1339 1340 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp, 1341 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1342 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 1343 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1344 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp, 1345 struct sd_xbuf *xp, size_t actual_len); 1346 static void sd_decode_sense(struct sd_lun *un, struct buf *bp, 1347 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1348 1349 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp, 1350 void *arg, int code); 1351 1352 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 1353 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1354 static void sd_sense_key_recoverable_error(struct sd_lun *un, 1355 uint8_t *sense_datap, 1356 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1357 static void sd_sense_key_not_ready(struct sd_lun *un, 1358 uint8_t *sense_datap, 1359 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1360 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 1361 uint8_t *sense_datap, 1362 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1363 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 1364 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1365 static void sd_sense_key_unit_attention(struct sd_lun *un, 1366 uint8_t *sense_datap, 1367 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1368 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 1369 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1370 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 1371 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1372 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 1373 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1374 static void sd_sense_key_default(struct sd_lun *un, 1375 uint8_t *sense_datap, 1376 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1377 1378 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp, 1379 void *arg, int flag); 1380 1381 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 1382 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1383 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 1384 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1385 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 1386 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1387 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 1388 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1389 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 1390 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1391 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 1392 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1393 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 1394 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1395 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 1396 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1397 1398 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp); 1399 1400 static void sd_start_stop_unit_callback(void *arg); 1401 static void sd_start_stop_unit_task(void *arg); 1402 1403 static void sd_taskq_create(void); 1404 static void sd_taskq_delete(void); 1405 static void sd_target_change_task(void *arg); 1406 static void sd_log_lun_expansion_event(struct sd_lun *un, int km_flag); 1407 static void sd_media_change_task(void *arg); 1408 1409 static int sd_handle_mchange(struct sd_lun *un); 1410 static int sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag); 1411 static int sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, 1412 uint32_t *lbap, int path_flag); 1413 static int sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 1414 uint32_t *lbap, int path_flag); 1415 static int sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, 1416 int path_flag); 1417 static int sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, 1418 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp); 1419 static int sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag); 1420 static int sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, 1421 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp); 1422 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, 1423 uchar_t usr_cmd, uchar_t *usr_bufp); 1424 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, 1425 struct dk_callback *dkc); 1426 static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp); 1427 static int sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, 1428 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1429 uchar_t *bufaddr, uint_t buflen, int path_flag); 1430 static int sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 1431 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1432 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag); 1433 static int sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, 1434 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag); 1435 static int sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, 1436 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag); 1437 static int sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 1438 size_t buflen, daddr_t start_block, int path_flag); 1439 #define sd_send_scsi_READ(un, bufaddr, buflen, start_block, path_flag) \ 1440 sd_send_scsi_RDWR(un, SCMD_READ, bufaddr, buflen, start_block, \ 1441 path_flag) 1442 #define sd_send_scsi_WRITE(un, bufaddr, buflen, start_block, path_flag) \ 1443 sd_send_scsi_RDWR(un, SCMD_WRITE, bufaddr, buflen, start_block,\ 1444 path_flag) 1445 1446 static int sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, 1447 uint16_t buflen, uchar_t page_code, uchar_t page_control, 1448 uint16_t param_ptr, int path_flag); 1449 1450 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un); 1451 static void sd_free_rqs(struct sd_lun *un); 1452 1453 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, 1454 uchar_t *data, int len, int fmt); 1455 static void sd_panic_for_res_conflict(struct sd_lun *un); 1456 1457 /* 1458 * Disk Ioctl Function Prototypes 1459 */ 1460 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag); 1461 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag); 1462 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag); 1463 1464 /* 1465 * Multi-host Ioctl Prototypes 1466 */ 1467 static int sd_check_mhd(dev_t dev, int interval); 1468 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1469 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt); 1470 static char *sd_sname(uchar_t status); 1471 static void sd_mhd_resvd_recover(void *arg); 1472 static void sd_resv_reclaim_thread(); 1473 static int sd_take_ownership(dev_t dev, struct mhioctkown *p); 1474 static int sd_reserve_release(dev_t dev, int cmd); 1475 static void sd_rmv_resv_reclaim_req(dev_t dev); 1476 static void sd_mhd_reset_notify_cb(caddr_t arg); 1477 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un, 1478 mhioc_inkeys_t *usrp, int flag); 1479 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un, 1480 mhioc_inresvs_t *usrp, int flag); 1481 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag); 1482 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag); 1483 static int sd_mhdioc_release(dev_t dev); 1484 static int sd_mhdioc_register_devid(dev_t dev); 1485 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag); 1486 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag); 1487 1488 /* 1489 * SCSI removable prototypes 1490 */ 1491 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag); 1492 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1493 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1494 static int sr_pause_resume(dev_t dev, int mode); 1495 static int sr_play_msf(dev_t dev, caddr_t data, int flag); 1496 static int sr_play_trkind(dev_t dev, caddr_t data, int flag); 1497 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag); 1498 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag); 1499 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag); 1500 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag); 1501 static int sr_read_cdda(dev_t dev, caddr_t data, int flag); 1502 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag); 1503 static int sr_read_mode1(dev_t dev, caddr_t data, int flag); 1504 static int sr_read_mode2(dev_t dev, caddr_t data, int flag); 1505 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag); 1506 static int sr_sector_mode(dev_t dev, uint32_t blksize); 1507 static int sr_eject(dev_t dev); 1508 static void sr_ejected(register struct sd_lun *un); 1509 static int sr_check_wp(dev_t dev); 1510 static int sd_check_media(dev_t dev, enum dkio_state state); 1511 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1512 static void sd_delayed_cv_broadcast(void *arg); 1513 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag); 1514 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag); 1515 1516 static int sd_log_page_supported(struct sd_lun *un, int log_page); 1517 1518 /* 1519 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions. 1520 */ 1521 static void sd_check_for_writable_cd(struct sd_lun *un, int path_flag); 1522 static int sd_wm_cache_constructor(void *wm, void *un, int flags); 1523 static void sd_wm_cache_destructor(void *wm, void *un); 1524 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb, 1525 daddr_t endb, ushort_t typ); 1526 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb, 1527 daddr_t endb); 1528 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp); 1529 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm); 1530 static void sd_read_modify_write_task(void * arg); 1531 static int 1532 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 1533 struct buf **bpp); 1534 1535 1536 /* 1537 * Function prototypes for failfast support. 1538 */ 1539 static void sd_failfast_flushq(struct sd_lun *un); 1540 static int sd_failfast_flushq_callback(struct buf *bp); 1541 1542 /* 1543 * Function prototypes to check for lsi devices 1544 */ 1545 static void sd_is_lsi(struct sd_lun *un); 1546 1547 /* 1548 * Function prototypes for partial DMA support 1549 */ 1550 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 1551 struct scsi_pkt *pkt, struct sd_xbuf *xp); 1552 1553 1554 /* Function prototypes for cmlb */ 1555 static int sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 1556 diskaddr_t start_block, size_t reqlength, void *tg_cookie); 1557 1558 static int sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie); 1559 1560 /* 1561 * Constants for failfast support: 1562 * 1563 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO 1564 * failfast processing being performed. 1565 * 1566 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing 1567 * failfast processing on all bufs with B_FAILFAST set. 1568 */ 1569 1570 #define SD_FAILFAST_INACTIVE 0 1571 #define SD_FAILFAST_ACTIVE 1 1572 1573 /* 1574 * Bitmask to control behavior of buf(9S) flushes when a transition to 1575 * the failfast state occurs. Optional bits include: 1576 * 1577 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that 1578 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will 1579 * be flushed. 1580 * 1581 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the 1582 * driver, in addition to the regular wait queue. This includes the xbuf 1583 * queues. When clear, only the driver's wait queue will be flushed. 1584 */ 1585 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 1586 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 1587 1588 /* 1589 * The default behavior is to only flush bufs that have B_FAILFAST set, but 1590 * to flush all queues within the driver. 1591 */ 1592 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES; 1593 1594 1595 /* 1596 * SD Testing Fault Injection 1597 */ 1598 #ifdef SD_FAULT_INJECTION 1599 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); 1600 static void sd_faultinjection(struct scsi_pkt *pktp); 1601 static void sd_injection_log(char *buf, struct sd_lun *un); 1602 #endif 1603 1604 /* 1605 * Device driver ops vector 1606 */ 1607 static struct cb_ops sd_cb_ops = { 1608 sdopen, /* open */ 1609 sdclose, /* close */ 1610 sdstrategy, /* strategy */ 1611 nodev, /* print */ 1612 sddump, /* dump */ 1613 sdread, /* read */ 1614 sdwrite, /* write */ 1615 sdioctl, /* ioctl */ 1616 nodev, /* devmap */ 1617 nodev, /* mmap */ 1618 nodev, /* segmap */ 1619 nochpoll, /* poll */ 1620 sd_prop_op, /* cb_prop_op */ 1621 0, /* streamtab */ 1622 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */ 1623 CB_REV, /* cb_rev */ 1624 sdaread, /* async I/O read entry point */ 1625 sdawrite /* async I/O write entry point */ 1626 }; 1627 1628 static struct dev_ops sd_ops = { 1629 DEVO_REV, /* devo_rev, */ 1630 0, /* refcnt */ 1631 sdinfo, /* info */ 1632 nulldev, /* identify */ 1633 sdprobe, /* probe */ 1634 sdattach, /* attach */ 1635 sddetach, /* detach */ 1636 nodev, /* reset */ 1637 &sd_cb_ops, /* driver operations */ 1638 NULL, /* bus operations */ 1639 sdpower /* power */ 1640 }; 1641 1642 1643 /* 1644 * This is the loadable module wrapper. 1645 */ 1646 #include <sys/modctl.h> 1647 1648 static struct modldrv modldrv = { 1649 &mod_driverops, /* Type of module. This one is a driver */ 1650 SD_MODULE_NAME, /* Module name. */ 1651 &sd_ops /* driver ops */ 1652 }; 1653 1654 1655 static struct modlinkage modlinkage = { 1656 MODREV_1, 1657 &modldrv, 1658 NULL 1659 }; 1660 1661 static cmlb_tg_ops_t sd_tgops = { 1662 TG_DK_OPS_VERSION_1, 1663 sd_tg_rdwr, 1664 sd_tg_getinfo 1665 }; 1666 1667 static struct scsi_asq_key_strings sd_additional_codes[] = { 1668 0x81, 0, "Logical Unit is Reserved", 1669 0x85, 0, "Audio Address Not Valid", 1670 0xb6, 0, "Media Load Mechanism Failed", 1671 0xB9, 0, "Audio Play Operation Aborted", 1672 0xbf, 0, "Buffer Overflow for Read All Subcodes Command", 1673 0x53, 2, "Medium removal prevented", 1674 0x6f, 0, "Authentication failed during key exchange", 1675 0x6f, 1, "Key not present", 1676 0x6f, 2, "Key not established", 1677 0x6f, 3, "Read without proper authentication", 1678 0x6f, 4, "Mismatched region to this logical unit", 1679 0x6f, 5, "Region reset count error", 1680 0xffff, 0x0, NULL 1681 }; 1682 1683 1684 /* 1685 * Struct for passing printing information for sense data messages 1686 */ 1687 struct sd_sense_info { 1688 int ssi_severity; 1689 int ssi_pfa_flag; 1690 }; 1691 1692 /* 1693 * Table of function pointers for iostart-side routines. Separate "chains" 1694 * of layered function calls are formed by placing the function pointers 1695 * sequentially in the desired order. Functions are called according to an 1696 * incrementing table index ordering. The last function in each chain must 1697 * be sd_core_iostart(). The corresponding iodone-side routines are expected 1698 * in the sd_iodone_chain[] array. 1699 * 1700 * Note: It may seem more natural to organize both the iostart and iodone 1701 * functions together, into an array of structures (or some similar 1702 * organization) with a common index, rather than two separate arrays which 1703 * must be maintained in synchronization. The purpose of this division is 1704 * to achieve improved performance: individual arrays allows for more 1705 * effective cache line utilization on certain platforms. 1706 */ 1707 1708 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp); 1709 1710 1711 static sd_chain_t sd_iostart_chain[] = { 1712 1713 /* Chain for buf IO for disk drive targets (PM enabled) */ 1714 sd_mapblockaddr_iostart, /* Index: 0 */ 1715 sd_pm_iostart, /* Index: 1 */ 1716 sd_core_iostart, /* Index: 2 */ 1717 1718 /* Chain for buf IO for disk drive targets (PM disabled) */ 1719 sd_mapblockaddr_iostart, /* Index: 3 */ 1720 sd_core_iostart, /* Index: 4 */ 1721 1722 /* Chain for buf IO for removable-media targets (PM enabled) */ 1723 sd_mapblockaddr_iostart, /* Index: 5 */ 1724 sd_mapblocksize_iostart, /* Index: 6 */ 1725 sd_pm_iostart, /* Index: 7 */ 1726 sd_core_iostart, /* Index: 8 */ 1727 1728 /* Chain for buf IO for removable-media targets (PM disabled) */ 1729 sd_mapblockaddr_iostart, /* Index: 9 */ 1730 sd_mapblocksize_iostart, /* Index: 10 */ 1731 sd_core_iostart, /* Index: 11 */ 1732 1733 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1734 sd_mapblockaddr_iostart, /* Index: 12 */ 1735 sd_checksum_iostart, /* Index: 13 */ 1736 sd_pm_iostart, /* Index: 14 */ 1737 sd_core_iostart, /* Index: 15 */ 1738 1739 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1740 sd_mapblockaddr_iostart, /* Index: 16 */ 1741 sd_checksum_iostart, /* Index: 17 */ 1742 sd_core_iostart, /* Index: 18 */ 1743 1744 /* Chain for USCSI commands (all targets) */ 1745 sd_pm_iostart, /* Index: 19 */ 1746 sd_core_iostart, /* Index: 20 */ 1747 1748 /* Chain for checksumming USCSI commands (all targets) */ 1749 sd_checksum_uscsi_iostart, /* Index: 21 */ 1750 sd_pm_iostart, /* Index: 22 */ 1751 sd_core_iostart, /* Index: 23 */ 1752 1753 /* Chain for "direct" USCSI commands (all targets) */ 1754 sd_core_iostart, /* Index: 24 */ 1755 1756 /* Chain for "direct priority" USCSI commands (all targets) */ 1757 sd_core_iostart, /* Index: 25 */ 1758 }; 1759 1760 /* 1761 * Macros to locate the first function of each iostart chain in the 1762 * sd_iostart_chain[] array. These are located by the index in the array. 1763 */ 1764 #define SD_CHAIN_DISK_IOSTART 0 1765 #define SD_CHAIN_DISK_IOSTART_NO_PM 3 1766 #define SD_CHAIN_RMMEDIA_IOSTART 5 1767 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9 1768 #define SD_CHAIN_CHKSUM_IOSTART 12 1769 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16 1770 #define SD_CHAIN_USCSI_CMD_IOSTART 19 1771 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21 1772 #define SD_CHAIN_DIRECT_CMD_IOSTART 24 1773 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25 1774 1775 1776 /* 1777 * Table of function pointers for the iodone-side routines for the driver- 1778 * internal layering mechanism. The calling sequence for iodone routines 1779 * uses a decrementing table index, so the last routine called in a chain 1780 * must be at the lowest array index location for that chain. The last 1781 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs) 1782 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering 1783 * of the functions in an iodone side chain must correspond to the ordering 1784 * of the iostart routines for that chain. Note that there is no iodone 1785 * side routine that corresponds to sd_core_iostart(), so there is no 1786 * entry in the table for this. 1787 */ 1788 1789 static sd_chain_t sd_iodone_chain[] = { 1790 1791 /* Chain for buf IO for disk drive targets (PM enabled) */ 1792 sd_buf_iodone, /* Index: 0 */ 1793 sd_mapblockaddr_iodone, /* Index: 1 */ 1794 sd_pm_iodone, /* Index: 2 */ 1795 1796 /* Chain for buf IO for disk drive targets (PM disabled) */ 1797 sd_buf_iodone, /* Index: 3 */ 1798 sd_mapblockaddr_iodone, /* Index: 4 */ 1799 1800 /* Chain for buf IO for removable-media targets (PM enabled) */ 1801 sd_buf_iodone, /* Index: 5 */ 1802 sd_mapblockaddr_iodone, /* Index: 6 */ 1803 sd_mapblocksize_iodone, /* Index: 7 */ 1804 sd_pm_iodone, /* Index: 8 */ 1805 1806 /* Chain for buf IO for removable-media targets (PM disabled) */ 1807 sd_buf_iodone, /* Index: 9 */ 1808 sd_mapblockaddr_iodone, /* Index: 10 */ 1809 sd_mapblocksize_iodone, /* Index: 11 */ 1810 1811 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1812 sd_buf_iodone, /* Index: 12 */ 1813 sd_mapblockaddr_iodone, /* Index: 13 */ 1814 sd_checksum_iodone, /* Index: 14 */ 1815 sd_pm_iodone, /* Index: 15 */ 1816 1817 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1818 sd_buf_iodone, /* Index: 16 */ 1819 sd_mapblockaddr_iodone, /* Index: 17 */ 1820 sd_checksum_iodone, /* Index: 18 */ 1821 1822 /* Chain for USCSI commands (non-checksum targets) */ 1823 sd_uscsi_iodone, /* Index: 19 */ 1824 sd_pm_iodone, /* Index: 20 */ 1825 1826 /* Chain for USCSI commands (checksum targets) */ 1827 sd_uscsi_iodone, /* Index: 21 */ 1828 sd_checksum_uscsi_iodone, /* Index: 22 */ 1829 sd_pm_iodone, /* Index: 22 */ 1830 1831 /* Chain for "direct" USCSI commands (all targets) */ 1832 sd_uscsi_iodone, /* Index: 24 */ 1833 1834 /* Chain for "direct priority" USCSI commands (all targets) */ 1835 sd_uscsi_iodone, /* Index: 25 */ 1836 }; 1837 1838 1839 /* 1840 * Macros to locate the "first" function in the sd_iodone_chain[] array for 1841 * each iodone-side chain. These are located by the array index, but as the 1842 * iodone side functions are called in a decrementing-index order, the 1843 * highest index number in each chain must be specified (as these correspond 1844 * to the first function in the iodone chain that will be called by the core 1845 * at IO completion time). 1846 */ 1847 1848 #define SD_CHAIN_DISK_IODONE 2 1849 #define SD_CHAIN_DISK_IODONE_NO_PM 4 1850 #define SD_CHAIN_RMMEDIA_IODONE 8 1851 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11 1852 #define SD_CHAIN_CHKSUM_IODONE 15 1853 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18 1854 #define SD_CHAIN_USCSI_CMD_IODONE 20 1855 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22 1856 #define SD_CHAIN_DIRECT_CMD_IODONE 24 1857 #define SD_CHAIN_PRIORITY_CMD_IODONE 25 1858 1859 1860 1861 1862 /* 1863 * Array to map a layering chain index to the appropriate initpkt routine. 1864 * The redundant entries are present so that the index used for accessing 1865 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1866 * with this table as well. 1867 */ 1868 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **); 1869 1870 static sd_initpkt_t sd_initpkt_map[] = { 1871 1872 /* Chain for buf IO for disk drive targets (PM enabled) */ 1873 sd_initpkt_for_buf, /* Index: 0 */ 1874 sd_initpkt_for_buf, /* Index: 1 */ 1875 sd_initpkt_for_buf, /* Index: 2 */ 1876 1877 /* Chain for buf IO for disk drive targets (PM disabled) */ 1878 sd_initpkt_for_buf, /* Index: 3 */ 1879 sd_initpkt_for_buf, /* Index: 4 */ 1880 1881 /* Chain for buf IO for removable-media targets (PM enabled) */ 1882 sd_initpkt_for_buf, /* Index: 5 */ 1883 sd_initpkt_for_buf, /* Index: 6 */ 1884 sd_initpkt_for_buf, /* Index: 7 */ 1885 sd_initpkt_for_buf, /* Index: 8 */ 1886 1887 /* Chain for buf IO for removable-media targets (PM disabled) */ 1888 sd_initpkt_for_buf, /* Index: 9 */ 1889 sd_initpkt_for_buf, /* Index: 10 */ 1890 sd_initpkt_for_buf, /* Index: 11 */ 1891 1892 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1893 sd_initpkt_for_buf, /* Index: 12 */ 1894 sd_initpkt_for_buf, /* Index: 13 */ 1895 sd_initpkt_for_buf, /* Index: 14 */ 1896 sd_initpkt_for_buf, /* Index: 15 */ 1897 1898 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1899 sd_initpkt_for_buf, /* Index: 16 */ 1900 sd_initpkt_for_buf, /* Index: 17 */ 1901 sd_initpkt_for_buf, /* Index: 18 */ 1902 1903 /* Chain for USCSI commands (non-checksum targets) */ 1904 sd_initpkt_for_uscsi, /* Index: 19 */ 1905 sd_initpkt_for_uscsi, /* Index: 20 */ 1906 1907 /* Chain for USCSI commands (checksum targets) */ 1908 sd_initpkt_for_uscsi, /* Index: 21 */ 1909 sd_initpkt_for_uscsi, /* Index: 22 */ 1910 sd_initpkt_for_uscsi, /* Index: 22 */ 1911 1912 /* Chain for "direct" USCSI commands (all targets) */ 1913 sd_initpkt_for_uscsi, /* Index: 24 */ 1914 1915 /* Chain for "direct priority" USCSI commands (all targets) */ 1916 sd_initpkt_for_uscsi, /* Index: 25 */ 1917 1918 }; 1919 1920 1921 /* 1922 * Array to map a layering chain index to the appropriate destroypktpkt routine. 1923 * The redundant entries are present so that the index used for accessing 1924 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1925 * with this table as well. 1926 */ 1927 typedef void (*sd_destroypkt_t)(struct buf *); 1928 1929 static sd_destroypkt_t sd_destroypkt_map[] = { 1930 1931 /* Chain for buf IO for disk drive targets (PM enabled) */ 1932 sd_destroypkt_for_buf, /* Index: 0 */ 1933 sd_destroypkt_for_buf, /* Index: 1 */ 1934 sd_destroypkt_for_buf, /* Index: 2 */ 1935 1936 /* Chain for buf IO for disk drive targets (PM disabled) */ 1937 sd_destroypkt_for_buf, /* Index: 3 */ 1938 sd_destroypkt_for_buf, /* Index: 4 */ 1939 1940 /* Chain for buf IO for removable-media targets (PM enabled) */ 1941 sd_destroypkt_for_buf, /* Index: 5 */ 1942 sd_destroypkt_for_buf, /* Index: 6 */ 1943 sd_destroypkt_for_buf, /* Index: 7 */ 1944 sd_destroypkt_for_buf, /* Index: 8 */ 1945 1946 /* Chain for buf IO for removable-media targets (PM disabled) */ 1947 sd_destroypkt_for_buf, /* Index: 9 */ 1948 sd_destroypkt_for_buf, /* Index: 10 */ 1949 sd_destroypkt_for_buf, /* Index: 11 */ 1950 1951 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1952 sd_destroypkt_for_buf, /* Index: 12 */ 1953 sd_destroypkt_for_buf, /* Index: 13 */ 1954 sd_destroypkt_for_buf, /* Index: 14 */ 1955 sd_destroypkt_for_buf, /* Index: 15 */ 1956 1957 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1958 sd_destroypkt_for_buf, /* Index: 16 */ 1959 sd_destroypkt_for_buf, /* Index: 17 */ 1960 sd_destroypkt_for_buf, /* Index: 18 */ 1961 1962 /* Chain for USCSI commands (non-checksum targets) */ 1963 sd_destroypkt_for_uscsi, /* Index: 19 */ 1964 sd_destroypkt_for_uscsi, /* Index: 20 */ 1965 1966 /* Chain for USCSI commands (checksum targets) */ 1967 sd_destroypkt_for_uscsi, /* Index: 21 */ 1968 sd_destroypkt_for_uscsi, /* Index: 22 */ 1969 sd_destroypkt_for_uscsi, /* Index: 22 */ 1970 1971 /* Chain for "direct" USCSI commands (all targets) */ 1972 sd_destroypkt_for_uscsi, /* Index: 24 */ 1973 1974 /* Chain for "direct priority" USCSI commands (all targets) */ 1975 sd_destroypkt_for_uscsi, /* Index: 25 */ 1976 1977 }; 1978 1979 1980 1981 /* 1982 * Array to map a layering chain index to the appropriate chain "type". 1983 * The chain type indicates a specific property/usage of the chain. 1984 * The redundant entries are present so that the index used for accessing 1985 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1986 * with this table as well. 1987 */ 1988 1989 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */ 1990 #define SD_CHAIN_BUFIO 1 /* regular buf IO */ 1991 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */ 1992 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */ 1993 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */ 1994 /* (for error recovery) */ 1995 1996 static int sd_chain_type_map[] = { 1997 1998 /* Chain for buf IO for disk drive targets (PM enabled) */ 1999 SD_CHAIN_BUFIO, /* Index: 0 */ 2000 SD_CHAIN_BUFIO, /* Index: 1 */ 2001 SD_CHAIN_BUFIO, /* Index: 2 */ 2002 2003 /* Chain for buf IO for disk drive targets (PM disabled) */ 2004 SD_CHAIN_BUFIO, /* Index: 3 */ 2005 SD_CHAIN_BUFIO, /* Index: 4 */ 2006 2007 /* Chain for buf IO for removable-media targets (PM enabled) */ 2008 SD_CHAIN_BUFIO, /* Index: 5 */ 2009 SD_CHAIN_BUFIO, /* Index: 6 */ 2010 SD_CHAIN_BUFIO, /* Index: 7 */ 2011 SD_CHAIN_BUFIO, /* Index: 8 */ 2012 2013 /* Chain for buf IO for removable-media targets (PM disabled) */ 2014 SD_CHAIN_BUFIO, /* Index: 9 */ 2015 SD_CHAIN_BUFIO, /* Index: 10 */ 2016 SD_CHAIN_BUFIO, /* Index: 11 */ 2017 2018 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2019 SD_CHAIN_BUFIO, /* Index: 12 */ 2020 SD_CHAIN_BUFIO, /* Index: 13 */ 2021 SD_CHAIN_BUFIO, /* Index: 14 */ 2022 SD_CHAIN_BUFIO, /* Index: 15 */ 2023 2024 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2025 SD_CHAIN_BUFIO, /* Index: 16 */ 2026 SD_CHAIN_BUFIO, /* Index: 17 */ 2027 SD_CHAIN_BUFIO, /* Index: 18 */ 2028 2029 /* Chain for USCSI commands (non-checksum targets) */ 2030 SD_CHAIN_USCSI, /* Index: 19 */ 2031 SD_CHAIN_USCSI, /* Index: 20 */ 2032 2033 /* Chain for USCSI commands (checksum targets) */ 2034 SD_CHAIN_USCSI, /* Index: 21 */ 2035 SD_CHAIN_USCSI, /* Index: 22 */ 2036 SD_CHAIN_USCSI, /* Index: 22 */ 2037 2038 /* Chain for "direct" USCSI commands (all targets) */ 2039 SD_CHAIN_DIRECT, /* Index: 24 */ 2040 2041 /* Chain for "direct priority" USCSI commands (all targets) */ 2042 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */ 2043 }; 2044 2045 2046 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */ 2047 #define SD_IS_BUFIO(xp) \ 2048 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO) 2049 2050 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */ 2051 #define SD_IS_DIRECT_PRIORITY(xp) \ 2052 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY) 2053 2054 2055 2056 /* 2057 * Struct, array, and macros to map a specific chain to the appropriate 2058 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays. 2059 * 2060 * The sd_chain_index_map[] array is used at attach time to set the various 2061 * un_xxx_chain type members of the sd_lun softstate to the specific layering 2062 * chain to be used with the instance. This allows different instances to use 2063 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart 2064 * and xb_chain_iodone index values in the sd_xbuf are initialized to these 2065 * values at sd_xbuf init time, this allows (1) layering chains may be changed 2066 * dynamically & without the use of locking; and (2) a layer may update the 2067 * xb_chain_io[start|done] member in a given xbuf with its current index value, 2068 * to allow for deferred processing of an IO within the same chain from a 2069 * different execution context. 2070 */ 2071 2072 struct sd_chain_index { 2073 int sci_iostart_index; 2074 int sci_iodone_index; 2075 }; 2076 2077 static struct sd_chain_index sd_chain_index_map[] = { 2078 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE }, 2079 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM }, 2080 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE }, 2081 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM }, 2082 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE }, 2083 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM }, 2084 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE }, 2085 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE }, 2086 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE }, 2087 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE }, 2088 }; 2089 2090 2091 /* 2092 * The following are indexes into the sd_chain_index_map[] array. 2093 */ 2094 2095 /* un->un_buf_chain_type must be set to one of these */ 2096 #define SD_CHAIN_INFO_DISK 0 2097 #define SD_CHAIN_INFO_DISK_NO_PM 1 2098 #define SD_CHAIN_INFO_RMMEDIA 2 2099 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3 2100 #define SD_CHAIN_INFO_CHKSUM 4 2101 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5 2102 2103 /* un->un_uscsi_chain_type must be set to one of these */ 2104 #define SD_CHAIN_INFO_USCSI_CMD 6 2105 /* USCSI with PM disabled is the same as DIRECT */ 2106 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8 2107 #define SD_CHAIN_INFO_USCSI_CHKSUM 7 2108 2109 /* un->un_direct_chain_type must be set to one of these */ 2110 #define SD_CHAIN_INFO_DIRECT_CMD 8 2111 2112 /* un->un_priority_chain_type must be set to one of these */ 2113 #define SD_CHAIN_INFO_PRIORITY_CMD 9 2114 2115 /* size for devid inquiries */ 2116 #define MAX_INQUIRY_SIZE 0xF0 2117 2118 /* 2119 * Macros used by functions to pass a given buf(9S) struct along to the 2120 * next function in the layering chain for further processing. 2121 * 2122 * In the following macros, passing more than three arguments to the called 2123 * routines causes the optimizer for the SPARC compiler to stop doing tail 2124 * call elimination which results in significant performance degradation. 2125 */ 2126 #define SD_BEGIN_IOSTART(index, un, bp) \ 2127 ((*(sd_iostart_chain[index]))(index, un, bp)) 2128 2129 #define SD_BEGIN_IODONE(index, un, bp) \ 2130 ((*(sd_iodone_chain[index]))(index, un, bp)) 2131 2132 #define SD_NEXT_IOSTART(index, un, bp) \ 2133 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp)) 2134 2135 #define SD_NEXT_IODONE(index, un, bp) \ 2136 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp)) 2137 2138 /* 2139 * Function: _init 2140 * 2141 * Description: This is the driver _init(9E) entry point. 2142 * 2143 * Return Code: Returns the value from mod_install(9F) or 2144 * ddi_soft_state_init(9F) as appropriate. 2145 * 2146 * Context: Called when driver module loaded. 2147 */ 2148 2149 int 2150 _init(void) 2151 { 2152 int err; 2153 2154 /* establish driver name from module name */ 2155 sd_label = (char *)mod_modname(&modlinkage); 2156 2157 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun), 2158 SD_MAXUNIT); 2159 2160 if (err != 0) { 2161 return (err); 2162 } 2163 2164 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL); 2165 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL); 2166 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL); 2167 2168 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL); 2169 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL); 2170 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL); 2171 2172 /* 2173 * it's ok to init here even for fibre device 2174 */ 2175 sd_scsi_probe_cache_init(); 2176 2177 sd_scsi_target_lun_init(); 2178 2179 /* 2180 * Creating taskq before mod_install ensures that all callers (threads) 2181 * that enter the module after a successful mod_install encounter 2182 * a valid taskq. 2183 */ 2184 sd_taskq_create(); 2185 2186 err = mod_install(&modlinkage); 2187 if (err != 0) { 2188 /* delete taskq if install fails */ 2189 sd_taskq_delete(); 2190 2191 mutex_destroy(&sd_detach_mutex); 2192 mutex_destroy(&sd_log_mutex); 2193 mutex_destroy(&sd_label_mutex); 2194 2195 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2196 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2197 cv_destroy(&sd_tr.srq_inprocess_cv); 2198 2199 sd_scsi_probe_cache_fini(); 2200 2201 sd_scsi_target_lun_fini(); 2202 2203 ddi_soft_state_fini(&sd_state); 2204 return (err); 2205 } 2206 2207 return (err); 2208 } 2209 2210 2211 /* 2212 * Function: _fini 2213 * 2214 * Description: This is the driver _fini(9E) entry point. 2215 * 2216 * Return Code: Returns the value from mod_remove(9F) 2217 * 2218 * Context: Called when driver module is unloaded. 2219 */ 2220 2221 int 2222 _fini(void) 2223 { 2224 int err; 2225 2226 if ((err = mod_remove(&modlinkage)) != 0) { 2227 return (err); 2228 } 2229 2230 sd_taskq_delete(); 2231 2232 mutex_destroy(&sd_detach_mutex); 2233 mutex_destroy(&sd_log_mutex); 2234 mutex_destroy(&sd_label_mutex); 2235 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2236 2237 sd_scsi_probe_cache_fini(); 2238 2239 sd_scsi_target_lun_fini(); 2240 2241 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2242 cv_destroy(&sd_tr.srq_inprocess_cv); 2243 2244 ddi_soft_state_fini(&sd_state); 2245 2246 return (err); 2247 } 2248 2249 2250 /* 2251 * Function: _info 2252 * 2253 * Description: This is the driver _info(9E) entry point. 2254 * 2255 * Arguments: modinfop - pointer to the driver modinfo structure 2256 * 2257 * Return Code: Returns the value from mod_info(9F). 2258 * 2259 * Context: Kernel thread context 2260 */ 2261 2262 int 2263 _info(struct modinfo *modinfop) 2264 { 2265 return (mod_info(&modlinkage, modinfop)); 2266 } 2267 2268 2269 /* 2270 * The following routines implement the driver message logging facility. 2271 * They provide component- and level- based debug output filtering. 2272 * Output may also be restricted to messages for a single instance by 2273 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set 2274 * to NULL, then messages for all instances are printed. 2275 * 2276 * These routines have been cloned from each other due to the language 2277 * constraints of macros and variable argument list processing. 2278 */ 2279 2280 2281 /* 2282 * Function: sd_log_err 2283 * 2284 * Description: This routine is called by the SD_ERROR macro for debug 2285 * logging of error conditions. 2286 * 2287 * Arguments: comp - driver component being logged 2288 * dev - pointer to driver info structure 2289 * fmt - error string and format to be logged 2290 */ 2291 2292 static void 2293 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...) 2294 { 2295 va_list ap; 2296 dev_info_t *dev; 2297 2298 ASSERT(un != NULL); 2299 dev = SD_DEVINFO(un); 2300 ASSERT(dev != NULL); 2301 2302 /* 2303 * Filter messages based on the global component and level masks. 2304 * Also print if un matches the value of sd_debug_un, or if 2305 * sd_debug_un is set to NULL. 2306 */ 2307 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) && 2308 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2309 mutex_enter(&sd_log_mutex); 2310 va_start(ap, fmt); 2311 (void) vsprintf(sd_log_buf, fmt, ap); 2312 va_end(ap); 2313 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2314 mutex_exit(&sd_log_mutex); 2315 } 2316 #ifdef SD_FAULT_INJECTION 2317 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2318 if (un->sd_injection_mask & comp) { 2319 mutex_enter(&sd_log_mutex); 2320 va_start(ap, fmt); 2321 (void) vsprintf(sd_log_buf, fmt, ap); 2322 va_end(ap); 2323 sd_injection_log(sd_log_buf, un); 2324 mutex_exit(&sd_log_mutex); 2325 } 2326 #endif 2327 } 2328 2329 2330 /* 2331 * Function: sd_log_info 2332 * 2333 * Description: This routine is called by the SD_INFO macro for debug 2334 * logging of general purpose informational conditions. 2335 * 2336 * Arguments: comp - driver component being logged 2337 * dev - pointer to driver info structure 2338 * fmt - info string and format to be logged 2339 */ 2340 2341 static void 2342 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...) 2343 { 2344 va_list ap; 2345 dev_info_t *dev; 2346 2347 ASSERT(un != NULL); 2348 dev = SD_DEVINFO(un); 2349 ASSERT(dev != NULL); 2350 2351 /* 2352 * Filter messages based on the global component and level masks. 2353 * Also print if un matches the value of sd_debug_un, or if 2354 * sd_debug_un is set to NULL. 2355 */ 2356 if ((sd_component_mask & component) && 2357 (sd_level_mask & SD_LOGMASK_INFO) && 2358 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2359 mutex_enter(&sd_log_mutex); 2360 va_start(ap, fmt); 2361 (void) vsprintf(sd_log_buf, fmt, ap); 2362 va_end(ap); 2363 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2364 mutex_exit(&sd_log_mutex); 2365 } 2366 #ifdef SD_FAULT_INJECTION 2367 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2368 if (un->sd_injection_mask & component) { 2369 mutex_enter(&sd_log_mutex); 2370 va_start(ap, fmt); 2371 (void) vsprintf(sd_log_buf, fmt, ap); 2372 va_end(ap); 2373 sd_injection_log(sd_log_buf, un); 2374 mutex_exit(&sd_log_mutex); 2375 } 2376 #endif 2377 } 2378 2379 2380 /* 2381 * Function: sd_log_trace 2382 * 2383 * Description: This routine is called by the SD_TRACE macro for debug 2384 * logging of trace conditions (i.e. function entry/exit). 2385 * 2386 * Arguments: comp - driver component being logged 2387 * dev - pointer to driver info structure 2388 * fmt - trace string and format to be logged 2389 */ 2390 2391 static void 2392 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...) 2393 { 2394 va_list ap; 2395 dev_info_t *dev; 2396 2397 ASSERT(un != NULL); 2398 dev = SD_DEVINFO(un); 2399 ASSERT(dev != NULL); 2400 2401 /* 2402 * Filter messages based on the global component and level masks. 2403 * Also print if un matches the value of sd_debug_un, or if 2404 * sd_debug_un is set to NULL. 2405 */ 2406 if ((sd_component_mask & component) && 2407 (sd_level_mask & SD_LOGMASK_TRACE) && 2408 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2409 mutex_enter(&sd_log_mutex); 2410 va_start(ap, fmt); 2411 (void) vsprintf(sd_log_buf, fmt, ap); 2412 va_end(ap); 2413 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2414 mutex_exit(&sd_log_mutex); 2415 } 2416 #ifdef SD_FAULT_INJECTION 2417 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2418 if (un->sd_injection_mask & component) { 2419 mutex_enter(&sd_log_mutex); 2420 va_start(ap, fmt); 2421 (void) vsprintf(sd_log_buf, fmt, ap); 2422 va_end(ap); 2423 sd_injection_log(sd_log_buf, un); 2424 mutex_exit(&sd_log_mutex); 2425 } 2426 #endif 2427 } 2428 2429 2430 /* 2431 * Function: sdprobe 2432 * 2433 * Description: This is the driver probe(9e) entry point function. 2434 * 2435 * Arguments: devi - opaque device info handle 2436 * 2437 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful. 2438 * DDI_PROBE_FAILURE: If the probe failed. 2439 * DDI_PROBE_PARTIAL: If the instance is not present now, 2440 * but may be present in the future. 2441 */ 2442 2443 static int 2444 sdprobe(dev_info_t *devi) 2445 { 2446 struct scsi_device *devp; 2447 int rval; 2448 int instance; 2449 2450 /* 2451 * if it wasn't for pln, sdprobe could actually be nulldev 2452 * in the "__fibre" case. 2453 */ 2454 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) { 2455 return (DDI_PROBE_DONTCARE); 2456 } 2457 2458 devp = ddi_get_driver_private(devi); 2459 2460 if (devp == NULL) { 2461 /* Ooops... nexus driver is mis-configured... */ 2462 return (DDI_PROBE_FAILURE); 2463 } 2464 2465 instance = ddi_get_instance(devi); 2466 2467 if (ddi_get_soft_state(sd_state, instance) != NULL) { 2468 return (DDI_PROBE_PARTIAL); 2469 } 2470 2471 /* 2472 * Call the SCSA utility probe routine to see if we actually 2473 * have a target at this SCSI nexus. 2474 */ 2475 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) { 2476 case SCSIPROBE_EXISTS: 2477 switch (devp->sd_inq->inq_dtype) { 2478 case DTYPE_DIRECT: 2479 rval = DDI_PROBE_SUCCESS; 2480 break; 2481 case DTYPE_RODIRECT: 2482 /* CDs etc. Can be removable media */ 2483 rval = DDI_PROBE_SUCCESS; 2484 break; 2485 case DTYPE_OPTICAL: 2486 /* 2487 * Rewritable optical driver HP115AA 2488 * Can also be removable media 2489 */ 2490 2491 /* 2492 * Do not attempt to bind to DTYPE_OPTICAL if 2493 * pre solaris 9 sparc sd behavior is required 2494 * 2495 * If first time through and sd_dtype_optical_bind 2496 * has not been set in /etc/system check properties 2497 */ 2498 2499 if (sd_dtype_optical_bind < 0) { 2500 sd_dtype_optical_bind = ddi_prop_get_int 2501 (DDI_DEV_T_ANY, devi, 0, 2502 "optical-device-bind", 1); 2503 } 2504 2505 if (sd_dtype_optical_bind == 0) { 2506 rval = DDI_PROBE_FAILURE; 2507 } else { 2508 rval = DDI_PROBE_SUCCESS; 2509 } 2510 break; 2511 2512 case DTYPE_NOTPRESENT: 2513 default: 2514 rval = DDI_PROBE_FAILURE; 2515 break; 2516 } 2517 break; 2518 default: 2519 rval = DDI_PROBE_PARTIAL; 2520 break; 2521 } 2522 2523 /* 2524 * This routine checks for resource allocation prior to freeing, 2525 * so it will take care of the "smart probing" case where a 2526 * scsi_probe() may or may not have been issued and will *not* 2527 * free previously-freed resources. 2528 */ 2529 scsi_unprobe(devp); 2530 return (rval); 2531 } 2532 2533 2534 /* 2535 * Function: sdinfo 2536 * 2537 * Description: This is the driver getinfo(9e) entry point function. 2538 * Given the device number, return the devinfo pointer from 2539 * the scsi_device structure or the instance number 2540 * associated with the dev_t. 2541 * 2542 * Arguments: dip - pointer to device info structure 2543 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO, 2544 * DDI_INFO_DEVT2INSTANCE) 2545 * arg - driver dev_t 2546 * resultp - user buffer for request response 2547 * 2548 * Return Code: DDI_SUCCESS 2549 * DDI_FAILURE 2550 */ 2551 /* ARGSUSED */ 2552 static int 2553 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2554 { 2555 struct sd_lun *un; 2556 dev_t dev; 2557 int instance; 2558 int error; 2559 2560 switch (infocmd) { 2561 case DDI_INFO_DEVT2DEVINFO: 2562 dev = (dev_t)arg; 2563 instance = SDUNIT(dev); 2564 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 2565 return (DDI_FAILURE); 2566 } 2567 *result = (void *) SD_DEVINFO(un); 2568 error = DDI_SUCCESS; 2569 break; 2570 case DDI_INFO_DEVT2INSTANCE: 2571 dev = (dev_t)arg; 2572 instance = SDUNIT(dev); 2573 *result = (void *)(uintptr_t)instance; 2574 error = DDI_SUCCESS; 2575 break; 2576 default: 2577 error = DDI_FAILURE; 2578 } 2579 return (error); 2580 } 2581 2582 /* 2583 * Function: sd_prop_op 2584 * 2585 * Description: This is the driver prop_op(9e) entry point function. 2586 * Return the number of blocks for the partition in question 2587 * or forward the request to the property facilities. 2588 * 2589 * Arguments: dev - device number 2590 * dip - pointer to device info structure 2591 * prop_op - property operator 2592 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent 2593 * name - pointer to property name 2594 * valuep - pointer or address of the user buffer 2595 * lengthp - property length 2596 * 2597 * Return Code: DDI_PROP_SUCCESS 2598 * DDI_PROP_NOT_FOUND 2599 * DDI_PROP_UNDEFINED 2600 * DDI_PROP_NO_MEMORY 2601 * DDI_PROP_BUF_TOO_SMALL 2602 */ 2603 2604 static int 2605 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 2606 char *name, caddr_t valuep, int *lengthp) 2607 { 2608 struct sd_lun *un; 2609 2610 if ((un = ddi_get_soft_state(sd_state, ddi_get_instance(dip))) == NULL) 2611 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 2612 name, valuep, lengthp)); 2613 2614 return (cmlb_prop_op(un->un_cmlbhandle, 2615 dev, dip, prop_op, mod_flags, name, valuep, lengthp, 2616 SDPART(dev), (void *)SD_PATH_DIRECT)); 2617 } 2618 2619 /* 2620 * The following functions are for smart probing: 2621 * sd_scsi_probe_cache_init() 2622 * sd_scsi_probe_cache_fini() 2623 * sd_scsi_clear_probe_cache() 2624 * sd_scsi_probe_with_cache() 2625 */ 2626 2627 /* 2628 * Function: sd_scsi_probe_cache_init 2629 * 2630 * Description: Initializes the probe response cache mutex and head pointer. 2631 * 2632 * Context: Kernel thread context 2633 */ 2634 2635 static void 2636 sd_scsi_probe_cache_init(void) 2637 { 2638 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL); 2639 sd_scsi_probe_cache_head = NULL; 2640 } 2641 2642 2643 /* 2644 * Function: sd_scsi_probe_cache_fini 2645 * 2646 * Description: Frees all resources associated with the probe response cache. 2647 * 2648 * Context: Kernel thread context 2649 */ 2650 2651 static void 2652 sd_scsi_probe_cache_fini(void) 2653 { 2654 struct sd_scsi_probe_cache *cp; 2655 struct sd_scsi_probe_cache *ncp; 2656 2657 /* Clean up our smart probing linked list */ 2658 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) { 2659 ncp = cp->next; 2660 kmem_free(cp, sizeof (struct sd_scsi_probe_cache)); 2661 } 2662 sd_scsi_probe_cache_head = NULL; 2663 mutex_destroy(&sd_scsi_probe_cache_mutex); 2664 } 2665 2666 2667 /* 2668 * Function: sd_scsi_clear_probe_cache 2669 * 2670 * Description: This routine clears the probe response cache. This is 2671 * done when open() returns ENXIO so that when deferred 2672 * attach is attempted (possibly after a device has been 2673 * turned on) we will retry the probe. Since we don't know 2674 * which target we failed to open, we just clear the 2675 * entire cache. 2676 * 2677 * Context: Kernel thread context 2678 */ 2679 2680 static void 2681 sd_scsi_clear_probe_cache(void) 2682 { 2683 struct sd_scsi_probe_cache *cp; 2684 int i; 2685 2686 mutex_enter(&sd_scsi_probe_cache_mutex); 2687 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2688 /* 2689 * Reset all entries to SCSIPROBE_EXISTS. This will 2690 * force probing to be performed the next time 2691 * sd_scsi_probe_with_cache is called. 2692 */ 2693 for (i = 0; i < NTARGETS_WIDE; i++) { 2694 cp->cache[i] = SCSIPROBE_EXISTS; 2695 } 2696 } 2697 mutex_exit(&sd_scsi_probe_cache_mutex); 2698 } 2699 2700 2701 /* 2702 * Function: sd_scsi_probe_with_cache 2703 * 2704 * Description: This routine implements support for a scsi device probe 2705 * with cache. The driver maintains a cache of the target 2706 * responses to scsi probes. If we get no response from a 2707 * target during a probe inquiry, we remember that, and we 2708 * avoid additional calls to scsi_probe on non-zero LUNs 2709 * on the same target until the cache is cleared. By doing 2710 * so we avoid the 1/4 sec selection timeout for nonzero 2711 * LUNs. lun0 of a target is always probed. 2712 * 2713 * Arguments: devp - Pointer to a scsi_device(9S) structure 2714 * waitfunc - indicates what the allocator routines should 2715 * do when resources are not available. This value 2716 * is passed on to scsi_probe() when that routine 2717 * is called. 2718 * 2719 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache; 2720 * otherwise the value returned by scsi_probe(9F). 2721 * 2722 * Context: Kernel thread context 2723 */ 2724 2725 static int 2726 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)()) 2727 { 2728 struct sd_scsi_probe_cache *cp; 2729 dev_info_t *pdip = ddi_get_parent(devp->sd_dev); 2730 int lun, tgt; 2731 2732 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2733 SCSI_ADDR_PROP_LUN, 0); 2734 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2735 SCSI_ADDR_PROP_TARGET, -1); 2736 2737 /* Make sure caching enabled and target in range */ 2738 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) { 2739 /* do it the old way (no cache) */ 2740 return (scsi_probe(devp, waitfn)); 2741 } 2742 2743 mutex_enter(&sd_scsi_probe_cache_mutex); 2744 2745 /* Find the cache for this scsi bus instance */ 2746 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2747 if (cp->pdip == pdip) { 2748 break; 2749 } 2750 } 2751 2752 /* If we can't find a cache for this pdip, create one */ 2753 if (cp == NULL) { 2754 int i; 2755 2756 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache), 2757 KM_SLEEP); 2758 cp->pdip = pdip; 2759 cp->next = sd_scsi_probe_cache_head; 2760 sd_scsi_probe_cache_head = cp; 2761 for (i = 0; i < NTARGETS_WIDE; i++) { 2762 cp->cache[i] = SCSIPROBE_EXISTS; 2763 } 2764 } 2765 2766 mutex_exit(&sd_scsi_probe_cache_mutex); 2767 2768 /* Recompute the cache for this target if LUN zero */ 2769 if (lun == 0) { 2770 cp->cache[tgt] = SCSIPROBE_EXISTS; 2771 } 2772 2773 /* Don't probe if cache remembers a NORESP from a previous LUN. */ 2774 if (cp->cache[tgt] != SCSIPROBE_EXISTS) { 2775 return (SCSIPROBE_NORESP); 2776 } 2777 2778 /* Do the actual probe; save & return the result */ 2779 return (cp->cache[tgt] = scsi_probe(devp, waitfn)); 2780 } 2781 2782 2783 /* 2784 * Function: sd_scsi_target_lun_init 2785 * 2786 * Description: Initializes the attached lun chain mutex and head pointer. 2787 * 2788 * Context: Kernel thread context 2789 */ 2790 2791 static void 2792 sd_scsi_target_lun_init(void) 2793 { 2794 mutex_init(&sd_scsi_target_lun_mutex, NULL, MUTEX_DRIVER, NULL); 2795 sd_scsi_target_lun_head = NULL; 2796 } 2797 2798 2799 /* 2800 * Function: sd_scsi_target_lun_fini 2801 * 2802 * Description: Frees all resources associated with the attached lun 2803 * chain 2804 * 2805 * Context: Kernel thread context 2806 */ 2807 2808 static void 2809 sd_scsi_target_lun_fini(void) 2810 { 2811 struct sd_scsi_hba_tgt_lun *cp; 2812 struct sd_scsi_hba_tgt_lun *ncp; 2813 2814 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = ncp) { 2815 ncp = cp->next; 2816 kmem_free(cp, sizeof (struct sd_scsi_hba_tgt_lun)); 2817 } 2818 sd_scsi_target_lun_head = NULL; 2819 mutex_destroy(&sd_scsi_target_lun_mutex); 2820 } 2821 2822 2823 /* 2824 * Function: sd_scsi_get_target_lun_count 2825 * 2826 * Description: This routine will check in the attached lun chain to see 2827 * how many luns are attached on the required SCSI controller 2828 * and target. Currently, some capabilities like tagged queue 2829 * are supported per target based by HBA. So all luns in a 2830 * target have the same capabilities. Based on this assumption, 2831 * sd should only set these capabilities once per target. This 2832 * function is called when sd needs to decide how many luns 2833 * already attached on a target. 2834 * 2835 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2836 * controller device. 2837 * target - The target ID on the controller's SCSI bus. 2838 * 2839 * Return Code: The number of luns attached on the required target and 2840 * controller. 2841 * -1 if target ID is not in parallel SCSI scope or the given 2842 * dip is not in the chain. 2843 * 2844 * Context: Kernel thread context 2845 */ 2846 2847 static int 2848 sd_scsi_get_target_lun_count(dev_info_t *dip, int target) 2849 { 2850 struct sd_scsi_hba_tgt_lun *cp; 2851 2852 if ((target < 0) || (target >= NTARGETS_WIDE)) { 2853 return (-1); 2854 } 2855 2856 mutex_enter(&sd_scsi_target_lun_mutex); 2857 2858 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2859 if (cp->pdip == dip) { 2860 break; 2861 } 2862 } 2863 2864 mutex_exit(&sd_scsi_target_lun_mutex); 2865 2866 if (cp == NULL) { 2867 return (-1); 2868 } 2869 2870 return (cp->nlun[target]); 2871 } 2872 2873 2874 /* 2875 * Function: sd_scsi_update_lun_on_target 2876 * 2877 * Description: This routine is used to update the attached lun chain when a 2878 * lun is attached or detached on a target. 2879 * 2880 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2881 * controller device. 2882 * target - The target ID on the controller's SCSI bus. 2883 * flag - Indicate the lun is attached or detached. 2884 * 2885 * Context: Kernel thread context 2886 */ 2887 2888 static void 2889 sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag) 2890 { 2891 struct sd_scsi_hba_tgt_lun *cp; 2892 2893 mutex_enter(&sd_scsi_target_lun_mutex); 2894 2895 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2896 if (cp->pdip == dip) { 2897 break; 2898 } 2899 } 2900 2901 if ((cp == NULL) && (flag == SD_SCSI_LUN_ATTACH)) { 2902 cp = kmem_zalloc(sizeof (struct sd_scsi_hba_tgt_lun), 2903 KM_SLEEP); 2904 cp->pdip = dip; 2905 cp->next = sd_scsi_target_lun_head; 2906 sd_scsi_target_lun_head = cp; 2907 } 2908 2909 mutex_exit(&sd_scsi_target_lun_mutex); 2910 2911 if (cp != NULL) { 2912 if (flag == SD_SCSI_LUN_ATTACH) { 2913 cp->nlun[target] ++; 2914 } else { 2915 cp->nlun[target] --; 2916 } 2917 } 2918 } 2919 2920 2921 /* 2922 * Function: sd_spin_up_unit 2923 * 2924 * Description: Issues the following commands to spin-up the device: 2925 * START STOP UNIT, and INQUIRY. 2926 * 2927 * Arguments: un - driver soft state (unit) structure 2928 * 2929 * Return Code: 0 - success 2930 * EIO - failure 2931 * EACCES - reservation conflict 2932 * 2933 * Context: Kernel thread context 2934 */ 2935 2936 static int 2937 sd_spin_up_unit(struct sd_lun *un) 2938 { 2939 size_t resid = 0; 2940 int has_conflict = FALSE; 2941 uchar_t *bufaddr; 2942 2943 ASSERT(un != NULL); 2944 2945 /* 2946 * Send a throwaway START UNIT command. 2947 * 2948 * If we fail on this, we don't care presently what precisely 2949 * is wrong. EMC's arrays will also fail this with a check 2950 * condition (0x2/0x4/0x3) if the device is "inactive," but 2951 * we don't want to fail the attach because it may become 2952 * "active" later. 2953 */ 2954 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, SD_PATH_DIRECT) 2955 == EACCES) 2956 has_conflict = TRUE; 2957 2958 /* 2959 * Send another INQUIRY command to the target. This is necessary for 2960 * non-removable media direct access devices because their INQUIRY data 2961 * may not be fully qualified until they are spun up (perhaps via the 2962 * START command above). Note: This seems to be needed for some 2963 * legacy devices only.) The INQUIRY command should succeed even if a 2964 * Reservation Conflict is present. 2965 */ 2966 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP); 2967 if (sd_send_scsi_INQUIRY(un, bufaddr, SUN_INQSIZE, 0, 0, &resid) != 0) { 2968 kmem_free(bufaddr, SUN_INQSIZE); 2969 return (EIO); 2970 } 2971 2972 /* 2973 * If we got enough INQUIRY data, copy it over the old INQUIRY data. 2974 * Note that this routine does not return a failure here even if the 2975 * INQUIRY command did not return any data. This is a legacy behavior. 2976 */ 2977 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) { 2978 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE); 2979 } 2980 2981 kmem_free(bufaddr, SUN_INQSIZE); 2982 2983 /* If we hit a reservation conflict above, tell the caller. */ 2984 if (has_conflict == TRUE) { 2985 return (EACCES); 2986 } 2987 2988 return (0); 2989 } 2990 2991 #ifdef _LP64 2992 /* 2993 * Function: sd_enable_descr_sense 2994 * 2995 * Description: This routine attempts to select descriptor sense format 2996 * using the Control mode page. Devices that support 64 bit 2997 * LBAs (for >2TB luns) should also implement descriptor 2998 * sense data so we will call this function whenever we see 2999 * a lun larger than 2TB. If for some reason the device 3000 * supports 64 bit LBAs but doesn't support descriptor sense 3001 * presumably the mode select will fail. Everything will 3002 * continue to work normally except that we will not get 3003 * complete sense data for commands that fail with an LBA 3004 * larger than 32 bits. 3005 * 3006 * Arguments: un - driver soft state (unit) structure 3007 * 3008 * Context: Kernel thread context only 3009 */ 3010 3011 static void 3012 sd_enable_descr_sense(struct sd_lun *un) 3013 { 3014 uchar_t *header; 3015 struct mode_control_scsi3 *ctrl_bufp; 3016 size_t buflen; 3017 size_t bd_len; 3018 3019 /* 3020 * Read MODE SENSE page 0xA, Control Mode Page 3021 */ 3022 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH + 3023 sizeof (struct mode_control_scsi3); 3024 header = kmem_zalloc(buflen, KM_SLEEP); 3025 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 3026 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT) != 0) { 3027 SD_ERROR(SD_LOG_COMMON, un, 3028 "sd_enable_descr_sense: mode sense ctrl page failed\n"); 3029 goto eds_exit; 3030 } 3031 3032 /* 3033 * Determine size of Block Descriptors in order to locate 3034 * the mode page data. ATAPI devices return 0, SCSI devices 3035 * should return MODE_BLK_DESC_LENGTH. 3036 */ 3037 bd_len = ((struct mode_header *)header)->bdesc_length; 3038 3039 /* Clear the mode data length field for MODE SELECT */ 3040 ((struct mode_header *)header)->length = 0; 3041 3042 ctrl_bufp = (struct mode_control_scsi3 *) 3043 (header + MODE_HEADER_LENGTH + bd_len); 3044 3045 /* 3046 * If the page length is smaller than the expected value, 3047 * the target device doesn't support D_SENSE. Bail out here. 3048 */ 3049 if (ctrl_bufp->mode_page.length < 3050 sizeof (struct mode_control_scsi3) - 2) { 3051 SD_ERROR(SD_LOG_COMMON, un, 3052 "sd_enable_descr_sense: enable D_SENSE failed\n"); 3053 goto eds_exit; 3054 } 3055 3056 /* 3057 * Clear PS bit for MODE SELECT 3058 */ 3059 ctrl_bufp->mode_page.ps = 0; 3060 3061 /* 3062 * Set D_SENSE to enable descriptor sense format. 3063 */ 3064 ctrl_bufp->d_sense = 1; 3065 3066 /* 3067 * Use MODE SELECT to commit the change to the D_SENSE bit 3068 */ 3069 if (sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 3070 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT) != 0) { 3071 SD_INFO(SD_LOG_COMMON, un, 3072 "sd_enable_descr_sense: mode select ctrl page failed\n"); 3073 goto eds_exit; 3074 } 3075 3076 eds_exit: 3077 kmem_free(header, buflen); 3078 } 3079 3080 /* 3081 * Function: sd_reenable_dsense_task 3082 * 3083 * Description: Re-enable descriptor sense after device or bus reset 3084 * 3085 * Context: Executes in a taskq() thread context 3086 */ 3087 static void 3088 sd_reenable_dsense_task(void *arg) 3089 { 3090 struct sd_lun *un = arg; 3091 3092 ASSERT(un != NULL); 3093 sd_enable_descr_sense(un); 3094 } 3095 #endif /* _LP64 */ 3096 3097 /* 3098 * Function: sd_set_mmc_caps 3099 * 3100 * Description: This routine determines if the device is MMC compliant and if 3101 * the device supports CDDA via a mode sense of the CDVD 3102 * capabilities mode page. Also checks if the device is a 3103 * dvdram writable device. 3104 * 3105 * Arguments: un - driver soft state (unit) structure 3106 * 3107 * Context: Kernel thread context only 3108 */ 3109 3110 static void 3111 sd_set_mmc_caps(struct sd_lun *un) 3112 { 3113 struct mode_header_grp2 *sense_mhp; 3114 uchar_t *sense_page; 3115 caddr_t buf; 3116 int bd_len; 3117 int status; 3118 struct uscsi_cmd com; 3119 int rtn; 3120 uchar_t *out_data_rw, *out_data_hd; 3121 uchar_t *rqbuf_rw, *rqbuf_hd; 3122 3123 ASSERT(un != NULL); 3124 3125 /* 3126 * The flags which will be set in this function are - mmc compliant, 3127 * dvdram writable device, cdda support. Initialize them to FALSE 3128 * and if a capability is detected - it will be set to TRUE. 3129 */ 3130 un->un_f_mmc_cap = FALSE; 3131 un->un_f_dvdram_writable_device = FALSE; 3132 un->un_f_cfg_cdda = FALSE; 3133 3134 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3135 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 3136 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 3137 3138 if (status != 0) { 3139 /* command failed; just return */ 3140 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3141 return; 3142 } 3143 /* 3144 * If the mode sense request for the CDROM CAPABILITIES 3145 * page (0x2A) succeeds the device is assumed to be MMC. 3146 */ 3147 un->un_f_mmc_cap = TRUE; 3148 3149 /* Get to the page data */ 3150 sense_mhp = (struct mode_header_grp2 *)buf; 3151 bd_len = (sense_mhp->bdesc_length_hi << 8) | 3152 sense_mhp->bdesc_length_lo; 3153 if (bd_len > MODE_BLK_DESC_LENGTH) { 3154 /* 3155 * We did not get back the expected block descriptor 3156 * length so we cannot determine if the device supports 3157 * CDDA. However, we still indicate the device is MMC 3158 * according to the successful response to the page 3159 * 0x2A mode sense request. 3160 */ 3161 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3162 "sd_set_mmc_caps: Mode Sense returned " 3163 "invalid block descriptor length\n"); 3164 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3165 return; 3166 } 3167 3168 /* See if read CDDA is supported */ 3169 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + 3170 bd_len); 3171 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE; 3172 3173 /* See if writing DVD RAM is supported. */ 3174 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE; 3175 if (un->un_f_dvdram_writable_device == TRUE) { 3176 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3177 return; 3178 } 3179 3180 /* 3181 * If the device presents DVD or CD capabilities in the mode 3182 * page, we can return here since a RRD will not have 3183 * these capabilities. 3184 */ 3185 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3186 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3187 return; 3188 } 3189 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3190 3191 /* 3192 * If un->un_f_dvdram_writable_device is still FALSE, 3193 * check for a Removable Rigid Disk (RRD). A RRD 3194 * device is identified by the features RANDOM_WRITABLE and 3195 * HARDWARE_DEFECT_MANAGEMENT. 3196 */ 3197 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3198 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3199 3200 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3201 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3202 RANDOM_WRITABLE, SD_PATH_STANDARD); 3203 if (rtn != 0) { 3204 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3205 kmem_free(rqbuf_rw, SENSE_LENGTH); 3206 return; 3207 } 3208 3209 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3210 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3211 3212 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3213 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3214 HARDWARE_DEFECT_MANAGEMENT, SD_PATH_STANDARD); 3215 if (rtn == 0) { 3216 /* 3217 * We have good information, check for random writable 3218 * and hardware defect features. 3219 */ 3220 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3221 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) { 3222 un->un_f_dvdram_writable_device = TRUE; 3223 } 3224 } 3225 3226 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3227 kmem_free(rqbuf_rw, SENSE_LENGTH); 3228 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3229 kmem_free(rqbuf_hd, SENSE_LENGTH); 3230 } 3231 3232 /* 3233 * Function: sd_check_for_writable_cd 3234 * 3235 * Description: This routine determines if the media in the device is 3236 * writable or not. It uses the get configuration command (0x46) 3237 * to determine if the media is writable 3238 * 3239 * Arguments: un - driver soft state (unit) structure 3240 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" 3241 * chain and the normal command waitq, or 3242 * SD_PATH_DIRECT_PRIORITY to use the USCSI 3243 * "direct" chain and bypass the normal command 3244 * waitq. 3245 * 3246 * Context: Never called at interrupt context. 3247 */ 3248 3249 static void 3250 sd_check_for_writable_cd(struct sd_lun *un, int path_flag) 3251 { 3252 struct uscsi_cmd com; 3253 uchar_t *out_data; 3254 uchar_t *rqbuf; 3255 int rtn; 3256 uchar_t *out_data_rw, *out_data_hd; 3257 uchar_t *rqbuf_rw, *rqbuf_hd; 3258 struct mode_header_grp2 *sense_mhp; 3259 uchar_t *sense_page; 3260 caddr_t buf; 3261 int bd_len; 3262 int status; 3263 3264 ASSERT(un != NULL); 3265 ASSERT(mutex_owned(SD_MUTEX(un))); 3266 3267 /* 3268 * Initialize the writable media to false, if configuration info. 3269 * tells us otherwise then only we will set it. 3270 */ 3271 un->un_f_mmc_writable_media = FALSE; 3272 mutex_exit(SD_MUTEX(un)); 3273 3274 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 3275 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3276 3277 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, SENSE_LENGTH, 3278 out_data, SD_PROFILE_HEADER_LEN, path_flag); 3279 3280 mutex_enter(SD_MUTEX(un)); 3281 if (rtn == 0) { 3282 /* 3283 * We have good information, check for writable DVD. 3284 */ 3285 if ((out_data[6] == 0) && (out_data[7] == 0x12)) { 3286 un->un_f_mmc_writable_media = TRUE; 3287 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3288 kmem_free(rqbuf, SENSE_LENGTH); 3289 return; 3290 } 3291 } 3292 3293 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3294 kmem_free(rqbuf, SENSE_LENGTH); 3295 3296 /* 3297 * Determine if this is a RRD type device. 3298 */ 3299 mutex_exit(SD_MUTEX(un)); 3300 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3301 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 3302 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, path_flag); 3303 mutex_enter(SD_MUTEX(un)); 3304 if (status != 0) { 3305 /* command failed; just return */ 3306 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3307 return; 3308 } 3309 3310 /* Get to the page data */ 3311 sense_mhp = (struct mode_header_grp2 *)buf; 3312 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 3313 if (bd_len > MODE_BLK_DESC_LENGTH) { 3314 /* 3315 * We did not get back the expected block descriptor length so 3316 * we cannot check the mode page. 3317 */ 3318 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3319 "sd_check_for_writable_cd: Mode Sense returned " 3320 "invalid block descriptor length\n"); 3321 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3322 return; 3323 } 3324 3325 /* 3326 * If the device presents DVD or CD capabilities in the mode 3327 * page, we can return here since a RRD device will not have 3328 * these capabilities. 3329 */ 3330 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len); 3331 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3332 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3333 return; 3334 } 3335 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3336 3337 /* 3338 * If un->un_f_mmc_writable_media is still FALSE, 3339 * check for RRD type media. A RRD device is identified 3340 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT. 3341 */ 3342 mutex_exit(SD_MUTEX(un)); 3343 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3344 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3345 3346 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3347 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3348 RANDOM_WRITABLE, path_flag); 3349 if (rtn != 0) { 3350 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3351 kmem_free(rqbuf_rw, SENSE_LENGTH); 3352 mutex_enter(SD_MUTEX(un)); 3353 return; 3354 } 3355 3356 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3357 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3358 3359 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3360 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3361 HARDWARE_DEFECT_MANAGEMENT, path_flag); 3362 mutex_enter(SD_MUTEX(un)); 3363 if (rtn == 0) { 3364 /* 3365 * We have good information, check for random writable 3366 * and hardware defect features as current. 3367 */ 3368 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3369 (out_data_rw[10] & 0x1) && 3370 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) && 3371 (out_data_hd[10] & 0x1)) { 3372 un->un_f_mmc_writable_media = TRUE; 3373 } 3374 } 3375 3376 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3377 kmem_free(rqbuf_rw, SENSE_LENGTH); 3378 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3379 kmem_free(rqbuf_hd, SENSE_LENGTH); 3380 } 3381 3382 /* 3383 * Function: sd_read_unit_properties 3384 * 3385 * Description: The following implements a property lookup mechanism. 3386 * Properties for particular disks (keyed on vendor, model 3387 * and rev numbers) are sought in the sd.conf file via 3388 * sd_process_sdconf_file(), and if not found there, are 3389 * looked for in a list hardcoded in this driver via 3390 * sd_process_sdconf_table() Once located the properties 3391 * are used to update the driver unit structure. 3392 * 3393 * Arguments: un - driver soft state (unit) structure 3394 */ 3395 3396 static void 3397 sd_read_unit_properties(struct sd_lun *un) 3398 { 3399 /* 3400 * sd_process_sdconf_file returns SD_FAILURE if it cannot find 3401 * the "sd-config-list" property (from the sd.conf file) or if 3402 * there was not a match for the inquiry vid/pid. If this event 3403 * occurs the static driver configuration table is searched for 3404 * a match. 3405 */ 3406 ASSERT(un != NULL); 3407 if (sd_process_sdconf_file(un) == SD_FAILURE) { 3408 sd_process_sdconf_table(un); 3409 } 3410 3411 /* check for LSI device */ 3412 sd_is_lsi(un); 3413 3414 3415 } 3416 3417 3418 /* 3419 * Function: sd_process_sdconf_file 3420 * 3421 * Description: Use ddi_prop_lookup(9F) to obtain the properties from the 3422 * driver's config file (ie, sd.conf) and update the driver 3423 * soft state structure accordingly. 3424 * 3425 * Arguments: un - driver soft state (unit) structure 3426 * 3427 * Return Code: SD_SUCCESS - The properties were successfully set according 3428 * to the driver configuration file. 3429 * SD_FAILURE - The driver config list was not obtained or 3430 * there was no vid/pid match. This indicates that 3431 * the static config table should be used. 3432 * 3433 * The config file has a property, "sd-config-list". Currently we support 3434 * two kinds of formats. For both formats, the value of this property 3435 * is a list of duplets: 3436 * 3437 * sd-config-list= 3438 * <duplet>, 3439 * [,<duplet>]*; 3440 * 3441 * For the improved format, where 3442 * 3443 * <duplet>:= "<vid+pid>","<tunable-list>" 3444 * 3445 * and 3446 * 3447 * <tunable-list>:= <tunable> [, <tunable> ]*; 3448 * <tunable> = <name> : <value> 3449 * 3450 * The <vid+pid> is the string that is returned by the target device on a 3451 * SCSI inquiry command, the <tunable-list> contains one or more tunables 3452 * to apply to all target devices with the specified <vid+pid>. 3453 * 3454 * Each <tunable> is a "<name> : <value>" pair. 3455 * 3456 * For the old format, the structure of each duplet is as follows: 3457 * 3458 * <duplet>:= "<vid+pid>","<data-property-name_list>" 3459 * 3460 * The first entry of the duplet is the device ID string (the concatenated 3461 * vid & pid; not to be confused with a device_id). This is defined in 3462 * the same way as in the sd_disk_table. 3463 * 3464 * The second part of the duplet is a string that identifies a 3465 * data-property-name-list. The data-property-name-list is defined as 3466 * follows: 3467 * 3468 * <data-property-name-list>:=<data-property-name> [<data-property-name>] 3469 * 3470 * The syntax of <data-property-name> depends on the <version> field. 3471 * 3472 * If version = SD_CONF_VERSION_1 we have the following syntax: 3473 * 3474 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3475 * 3476 * where the prop0 value will be used to set prop0 if bit0 set in the 3477 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1 3478 * 3479 */ 3480 3481 static int 3482 sd_process_sdconf_file(struct sd_lun *un) 3483 { 3484 char **config_list = NULL; 3485 uint_t nelements; 3486 char *vidptr; 3487 int vidlen; 3488 char *dnlist_ptr; 3489 char *dataname_ptr; 3490 char *dataname_lasts; 3491 int *data_list = NULL; 3492 uint_t data_list_len; 3493 int rval = SD_FAILURE; 3494 int i; 3495 3496 ASSERT(un != NULL); 3497 3498 /* Obtain the configuration list associated with the .conf file */ 3499 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, SD_DEVINFO(un), 3500 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, sd_config_list, 3501 &config_list, &nelements) != DDI_PROP_SUCCESS) { 3502 return (SD_FAILURE); 3503 } 3504 3505 /* 3506 * Compare vids in each duplet to the inquiry vid - if a match is 3507 * made, get the data value and update the soft state structure 3508 * accordingly. 3509 * 3510 * Each duplet should show as a pair of strings, return SD_FAILURE 3511 * otherwise. 3512 */ 3513 if (nelements & 1) { 3514 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3515 "sd-config-list should show as pairs of strings.\n"); 3516 if (config_list) 3517 ddi_prop_free(config_list); 3518 return (SD_FAILURE); 3519 } 3520 3521 for (i = 0; i < nelements; i += 2) { 3522 /* 3523 * Note: The assumption here is that each vid entry is on 3524 * a unique line from its associated duplet. 3525 */ 3526 vidptr = config_list[i]; 3527 vidlen = (int)strlen(vidptr); 3528 if ((vidlen == 0) || 3529 (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS)) { 3530 continue; 3531 } 3532 3533 /* 3534 * dnlist contains 1 or more blank separated 3535 * data-property-name entries 3536 */ 3537 dnlist_ptr = config_list[i + 1]; 3538 3539 if (strchr(dnlist_ptr, ':') != NULL) { 3540 /* 3541 * Decode the improved format sd-config-list. 3542 */ 3543 sd_nvpair_str_decode(un, dnlist_ptr); 3544 } else { 3545 /* 3546 * The old format sd-config-list, loop through all 3547 * data-property-name entries in the 3548 * data-property-name-list 3549 * setting the properties for each. 3550 */ 3551 for (dataname_ptr = sd_strtok_r(dnlist_ptr, " \t", 3552 &dataname_lasts); dataname_ptr != NULL; 3553 dataname_ptr = sd_strtok_r(NULL, " \t", 3554 &dataname_lasts)) { 3555 int version; 3556 3557 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3558 "sd_process_sdconf_file: disk:%s, " 3559 "data:%s\n", vidptr, dataname_ptr); 3560 3561 /* Get the data list */ 3562 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 3563 SD_DEVINFO(un), 0, dataname_ptr, &data_list, 3564 &data_list_len) != DDI_PROP_SUCCESS) { 3565 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3566 "sd_process_sdconf_file: data " 3567 "property (%s) has no value\n", 3568 dataname_ptr); 3569 continue; 3570 } 3571 3572 version = data_list[0]; 3573 3574 if (version == SD_CONF_VERSION_1) { 3575 sd_tunables values; 3576 3577 /* Set the properties */ 3578 if (sd_chk_vers1_data(un, data_list[1], 3579 &data_list[2], data_list_len, 3580 dataname_ptr) == SD_SUCCESS) { 3581 sd_get_tunables_from_conf(un, 3582 data_list[1], &data_list[2], 3583 &values); 3584 sd_set_vers1_properties(un, 3585 data_list[1], &values); 3586 rval = SD_SUCCESS; 3587 } else { 3588 rval = SD_FAILURE; 3589 } 3590 } else { 3591 scsi_log(SD_DEVINFO(un), sd_label, 3592 CE_WARN, "data property %s version " 3593 "0x%x is invalid.", 3594 dataname_ptr, version); 3595 rval = SD_FAILURE; 3596 } 3597 if (data_list) 3598 ddi_prop_free(data_list); 3599 } 3600 } 3601 } 3602 3603 /* free up the memory allocated by ddi_prop_lookup_string_array(). */ 3604 if (config_list) { 3605 ddi_prop_free(config_list); 3606 } 3607 3608 return (rval); 3609 } 3610 3611 /* 3612 * Function: sd_nvpair_str_decode() 3613 * 3614 * Description: Parse the improved format sd-config-list to get 3615 * each entry of tunable, which includes a name-value pair. 3616 * Then call sd_set_properties() to set the property. 3617 * 3618 * Arguments: un - driver soft state (unit) structure 3619 * nvpair_str - the tunable list 3620 */ 3621 static void 3622 sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str) 3623 { 3624 char *nv, *name, *value, *token; 3625 char *nv_lasts, *v_lasts, *x_lasts; 3626 3627 for (nv = sd_strtok_r(nvpair_str, ",", &nv_lasts); nv != NULL; 3628 nv = sd_strtok_r(NULL, ",", &nv_lasts)) { 3629 token = sd_strtok_r(nv, ":", &v_lasts); 3630 name = sd_strtok_r(token, " \t", &x_lasts); 3631 token = sd_strtok_r(NULL, ":", &v_lasts); 3632 value = sd_strtok_r(token, " \t", &x_lasts); 3633 if (name == NULL || value == NULL) { 3634 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3635 "sd_nvpair_str_decode: " 3636 "name or value is not valid!\n"); 3637 } else { 3638 sd_set_properties(un, name, value); 3639 } 3640 } 3641 } 3642 3643 /* 3644 * Function: sd_strtok_r() 3645 * 3646 * Description: This function uses strpbrk and strspn to break 3647 * string into tokens on sequentially subsequent calls. Return 3648 * NULL when no non-separator characters remain. The first 3649 * argument is NULL for subsequent calls. 3650 */ 3651 static char * 3652 sd_strtok_r(char *string, const char *sepset, char **lasts) 3653 { 3654 char *q, *r; 3655 3656 /* First or subsequent call */ 3657 if (string == NULL) 3658 string = *lasts; 3659 3660 if (string == NULL) 3661 return (NULL); 3662 3663 /* Skip leading separators */ 3664 q = string + strspn(string, sepset); 3665 3666 if (*q == '\0') 3667 return (NULL); 3668 3669 if ((r = strpbrk(q, sepset)) == NULL) 3670 *lasts = NULL; 3671 else { 3672 *r = '\0'; 3673 *lasts = r + 1; 3674 } 3675 return (q); 3676 } 3677 3678 /* 3679 * Function: sd_set_properties() 3680 * 3681 * Description: Set device properties based on the improved 3682 * format sd-config-list. 3683 * 3684 * Arguments: un - driver soft state (unit) structure 3685 * name - supported tunable name 3686 * value - tunable value 3687 */ 3688 static void 3689 sd_set_properties(struct sd_lun *un, char *name, char *value) 3690 { 3691 char *endptr = NULL; 3692 long val = 0; 3693 3694 if (strcasecmp(name, "cache-nonvolatile") == 0) { 3695 if (strcasecmp(value, "true") == 0) { 3696 un->un_f_suppress_cache_flush = TRUE; 3697 } else if (strcasecmp(value, "false") == 0) { 3698 un->un_f_suppress_cache_flush = FALSE; 3699 } else { 3700 goto value_invalid; 3701 } 3702 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3703 "suppress_cache_flush flag set to %d\n", 3704 un->un_f_suppress_cache_flush); 3705 return; 3706 } 3707 3708 if (strcasecmp(name, "controller-type") == 0) { 3709 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3710 un->un_ctype = val; 3711 } else { 3712 goto value_invalid; 3713 } 3714 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3715 "ctype set to %d\n", un->un_ctype); 3716 return; 3717 } 3718 3719 if (strcasecmp(name, "delay-busy") == 0) { 3720 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3721 un->un_busy_timeout = drv_usectohz(val / 1000); 3722 } else { 3723 goto value_invalid; 3724 } 3725 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3726 "busy_timeout set to %d\n", un->un_busy_timeout); 3727 return; 3728 } 3729 3730 if (strcasecmp(name, "disksort") == 0) { 3731 if (strcasecmp(value, "true") == 0) { 3732 un->un_f_disksort_disabled = FALSE; 3733 } else if (strcasecmp(value, "false") == 0) { 3734 un->un_f_disksort_disabled = TRUE; 3735 } else { 3736 goto value_invalid; 3737 } 3738 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3739 "disksort disabled flag set to %d\n", 3740 un->un_f_disksort_disabled); 3741 return; 3742 } 3743 3744 if (strcasecmp(name, "timeout-releasereservation") == 0) { 3745 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3746 un->un_reserve_release_time = val; 3747 } else { 3748 goto value_invalid; 3749 } 3750 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3751 "reservation release timeout set to %d\n", 3752 un->un_reserve_release_time); 3753 return; 3754 } 3755 3756 if (strcasecmp(name, "reset-lun") == 0) { 3757 if (strcasecmp(value, "true") == 0) { 3758 un->un_f_lun_reset_enabled = TRUE; 3759 } else if (strcasecmp(value, "false") == 0) { 3760 un->un_f_lun_reset_enabled = FALSE; 3761 } else { 3762 goto value_invalid; 3763 } 3764 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3765 "lun reset enabled flag set to %d\n", 3766 un->un_f_lun_reset_enabled); 3767 return; 3768 } 3769 3770 if (strcasecmp(name, "retries-busy") == 0) { 3771 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3772 un->un_busy_retry_count = val; 3773 } else { 3774 goto value_invalid; 3775 } 3776 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3777 "busy retry count set to %d\n", un->un_busy_retry_count); 3778 return; 3779 } 3780 3781 if (strcasecmp(name, "retries-timeout") == 0) { 3782 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3783 un->un_retry_count = val; 3784 } else { 3785 goto value_invalid; 3786 } 3787 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3788 "timeout retry count set to %d\n", un->un_retry_count); 3789 return; 3790 } 3791 3792 if (strcasecmp(name, "retries-notready") == 0) { 3793 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3794 un->un_notready_retry_count = val; 3795 } else { 3796 goto value_invalid; 3797 } 3798 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3799 "notready retry count set to %d\n", 3800 un->un_notready_retry_count); 3801 return; 3802 } 3803 3804 if (strcasecmp(name, "retries-reset") == 0) { 3805 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3806 un->un_reset_retry_count = val; 3807 } else { 3808 goto value_invalid; 3809 } 3810 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3811 "reset retry count set to %d\n", 3812 un->un_reset_retry_count); 3813 return; 3814 } 3815 3816 if (strcasecmp(name, "throttle-max") == 0) { 3817 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3818 un->un_saved_throttle = un->un_throttle = val; 3819 } else { 3820 goto value_invalid; 3821 } 3822 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3823 "throttle set to %d\n", un->un_throttle); 3824 } 3825 3826 if (strcasecmp(name, "throttle-min") == 0) { 3827 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3828 un->un_min_throttle = val; 3829 } else { 3830 goto value_invalid; 3831 } 3832 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3833 "min throttle set to %d\n", un->un_min_throttle); 3834 } 3835 3836 /* 3837 * Validate the throttle values. 3838 * If any of the numbers are invalid, set everything to defaults. 3839 */ 3840 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 3841 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 3842 (un->un_min_throttle > un->un_throttle)) { 3843 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 3844 un->un_min_throttle = sd_min_throttle; 3845 } 3846 return; 3847 3848 value_invalid: 3849 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3850 "value of prop %s is invalid\n", name); 3851 } 3852 3853 /* 3854 * Function: sd_get_tunables_from_conf() 3855 * 3856 * 3857 * This function reads the data list from the sd.conf file and pulls 3858 * the values that can have numeric values as arguments and places 3859 * the values in the appropriate sd_tunables member. 3860 * Since the order of the data list members varies across platforms 3861 * This function reads them from the data list in a platform specific 3862 * order and places them into the correct sd_tunable member that is 3863 * consistent across all platforms. 3864 */ 3865 static void 3866 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list, 3867 sd_tunables *values) 3868 { 3869 int i; 3870 int mask; 3871 3872 bzero(values, sizeof (sd_tunables)); 3873 3874 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3875 3876 mask = 1 << i; 3877 if (mask > flags) { 3878 break; 3879 } 3880 3881 switch (mask & flags) { 3882 case 0: /* This mask bit not set in flags */ 3883 continue; 3884 case SD_CONF_BSET_THROTTLE: 3885 values->sdt_throttle = data_list[i]; 3886 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3887 "sd_get_tunables_from_conf: throttle = %d\n", 3888 values->sdt_throttle); 3889 break; 3890 case SD_CONF_BSET_CTYPE: 3891 values->sdt_ctype = data_list[i]; 3892 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3893 "sd_get_tunables_from_conf: ctype = %d\n", 3894 values->sdt_ctype); 3895 break; 3896 case SD_CONF_BSET_NRR_COUNT: 3897 values->sdt_not_rdy_retries = data_list[i]; 3898 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3899 "sd_get_tunables_from_conf: not_rdy_retries = %d\n", 3900 values->sdt_not_rdy_retries); 3901 break; 3902 case SD_CONF_BSET_BSY_RETRY_COUNT: 3903 values->sdt_busy_retries = data_list[i]; 3904 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3905 "sd_get_tunables_from_conf: busy_retries = %d\n", 3906 values->sdt_busy_retries); 3907 break; 3908 case SD_CONF_BSET_RST_RETRIES: 3909 values->sdt_reset_retries = data_list[i]; 3910 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3911 "sd_get_tunables_from_conf: reset_retries = %d\n", 3912 values->sdt_reset_retries); 3913 break; 3914 case SD_CONF_BSET_RSV_REL_TIME: 3915 values->sdt_reserv_rel_time = data_list[i]; 3916 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3917 "sd_get_tunables_from_conf: reserv_rel_time = %d\n", 3918 values->sdt_reserv_rel_time); 3919 break; 3920 case SD_CONF_BSET_MIN_THROTTLE: 3921 values->sdt_min_throttle = data_list[i]; 3922 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3923 "sd_get_tunables_from_conf: min_throttle = %d\n", 3924 values->sdt_min_throttle); 3925 break; 3926 case SD_CONF_BSET_DISKSORT_DISABLED: 3927 values->sdt_disk_sort_dis = data_list[i]; 3928 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3929 "sd_get_tunables_from_conf: disk_sort_dis = %d\n", 3930 values->sdt_disk_sort_dis); 3931 break; 3932 case SD_CONF_BSET_LUN_RESET_ENABLED: 3933 values->sdt_lun_reset_enable = data_list[i]; 3934 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3935 "sd_get_tunables_from_conf: lun_reset_enable = %d" 3936 "\n", values->sdt_lun_reset_enable); 3937 break; 3938 case SD_CONF_BSET_CACHE_IS_NV: 3939 values->sdt_suppress_cache_flush = data_list[i]; 3940 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3941 "sd_get_tunables_from_conf: \ 3942 suppress_cache_flush = %d" 3943 "\n", values->sdt_suppress_cache_flush); 3944 break; 3945 } 3946 } 3947 } 3948 3949 /* 3950 * Function: sd_process_sdconf_table 3951 * 3952 * Description: Search the static configuration table for a match on the 3953 * inquiry vid/pid and update the driver soft state structure 3954 * according to the table property values for the device. 3955 * 3956 * The form of a configuration table entry is: 3957 * <vid+pid>,<flags>,<property-data> 3958 * "SEAGATE ST42400N",1,0x40000, 3959 * 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1; 3960 * 3961 * Arguments: un - driver soft state (unit) structure 3962 */ 3963 3964 static void 3965 sd_process_sdconf_table(struct sd_lun *un) 3966 { 3967 char *id = NULL; 3968 int table_index; 3969 int idlen; 3970 3971 ASSERT(un != NULL); 3972 for (table_index = 0; table_index < sd_disk_table_size; 3973 table_index++) { 3974 id = sd_disk_table[table_index].device_id; 3975 idlen = strlen(id); 3976 if (idlen == 0) { 3977 continue; 3978 } 3979 3980 /* 3981 * The static configuration table currently does not 3982 * implement version 10 properties. Additionally, 3983 * multiple data-property-name entries are not 3984 * implemented in the static configuration table. 3985 */ 3986 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 3987 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3988 "sd_process_sdconf_table: disk %s\n", id); 3989 sd_set_vers1_properties(un, 3990 sd_disk_table[table_index].flags, 3991 sd_disk_table[table_index].properties); 3992 break; 3993 } 3994 } 3995 } 3996 3997 3998 /* 3999 * Function: sd_sdconf_id_match 4000 * 4001 * Description: This local function implements a case sensitive vid/pid 4002 * comparison as well as the boundary cases of wild card and 4003 * multiple blanks. 4004 * 4005 * Note: An implicit assumption made here is that the scsi 4006 * inquiry structure will always keep the vid, pid and 4007 * revision strings in consecutive sequence, so they can be 4008 * read as a single string. If this assumption is not the 4009 * case, a separate string, to be used for the check, needs 4010 * to be built with these strings concatenated. 4011 * 4012 * Arguments: un - driver soft state (unit) structure 4013 * id - table or config file vid/pid 4014 * idlen - length of the vid/pid (bytes) 4015 * 4016 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 4017 * SD_FAILURE - Indicates no match with the inquiry vid/pid 4018 */ 4019 4020 static int 4021 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen) 4022 { 4023 struct scsi_inquiry *sd_inq; 4024 int rval = SD_SUCCESS; 4025 4026 ASSERT(un != NULL); 4027 sd_inq = un->un_sd->sd_inq; 4028 ASSERT(id != NULL); 4029 4030 /* 4031 * We use the inq_vid as a pointer to a buffer containing the 4032 * vid and pid and use the entire vid/pid length of the table 4033 * entry for the comparison. This works because the inq_pid 4034 * data member follows inq_vid in the scsi_inquiry structure. 4035 */ 4036 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) { 4037 /* 4038 * The user id string is compared to the inquiry vid/pid 4039 * using a case insensitive comparison and ignoring 4040 * multiple spaces. 4041 */ 4042 rval = sd_blank_cmp(un, id, idlen); 4043 if (rval != SD_SUCCESS) { 4044 /* 4045 * User id strings that start and end with a "*" 4046 * are a special case. These do not have a 4047 * specific vendor, and the product string can 4048 * appear anywhere in the 16 byte PID portion of 4049 * the inquiry data. This is a simple strstr() 4050 * type search for the user id in the inquiry data. 4051 */ 4052 if ((id[0] == '*') && (id[idlen - 1] == '*')) { 4053 char *pidptr = &id[1]; 4054 int i; 4055 int j; 4056 int pidstrlen = idlen - 2; 4057 j = sizeof (SD_INQUIRY(un)->inq_pid) - 4058 pidstrlen; 4059 4060 if (j < 0) { 4061 return (SD_FAILURE); 4062 } 4063 for (i = 0; i < j; i++) { 4064 if (bcmp(&SD_INQUIRY(un)->inq_pid[i], 4065 pidptr, pidstrlen) == 0) { 4066 rval = SD_SUCCESS; 4067 break; 4068 } 4069 } 4070 } 4071 } 4072 } 4073 return (rval); 4074 } 4075 4076 4077 /* 4078 * Function: sd_blank_cmp 4079 * 4080 * Description: If the id string starts and ends with a space, treat 4081 * multiple consecutive spaces as equivalent to a single 4082 * space. For example, this causes a sd_disk_table entry 4083 * of " NEC CDROM " to match a device's id string of 4084 * "NEC CDROM". 4085 * 4086 * Note: The success exit condition for this routine is if 4087 * the pointer to the table entry is '\0' and the cnt of 4088 * the inquiry length is zero. This will happen if the inquiry 4089 * string returned by the device is padded with spaces to be 4090 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The 4091 * SCSI spec states that the inquiry string is to be padded with 4092 * spaces. 4093 * 4094 * Arguments: un - driver soft state (unit) structure 4095 * id - table or config file vid/pid 4096 * idlen - length of the vid/pid (bytes) 4097 * 4098 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 4099 * SD_FAILURE - Indicates no match with the inquiry vid/pid 4100 */ 4101 4102 static int 4103 sd_blank_cmp(struct sd_lun *un, char *id, int idlen) 4104 { 4105 char *p1; 4106 char *p2; 4107 int cnt; 4108 cnt = sizeof (SD_INQUIRY(un)->inq_vid) + 4109 sizeof (SD_INQUIRY(un)->inq_pid); 4110 4111 ASSERT(un != NULL); 4112 p2 = un->un_sd->sd_inq->inq_vid; 4113 ASSERT(id != NULL); 4114 p1 = id; 4115 4116 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) { 4117 /* 4118 * Note: string p1 is terminated by a NUL but string p2 4119 * isn't. The end of p2 is determined by cnt. 4120 */ 4121 for (;;) { 4122 /* skip over any extra blanks in both strings */ 4123 while ((*p1 != '\0') && (*p1 == ' ')) { 4124 p1++; 4125 } 4126 while ((cnt != 0) && (*p2 == ' ')) { 4127 p2++; 4128 cnt--; 4129 } 4130 4131 /* compare the two strings */ 4132 if ((cnt == 0) || 4133 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) { 4134 break; 4135 } 4136 while ((cnt > 0) && 4137 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) { 4138 p1++; 4139 p2++; 4140 cnt--; 4141 } 4142 } 4143 } 4144 4145 /* return SD_SUCCESS if both strings match */ 4146 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE); 4147 } 4148 4149 4150 /* 4151 * Function: sd_chk_vers1_data 4152 * 4153 * Description: Verify the version 1 device properties provided by the 4154 * user via the configuration file 4155 * 4156 * Arguments: un - driver soft state (unit) structure 4157 * flags - integer mask indicating properties to be set 4158 * prop_list - integer list of property values 4159 * list_len - number of the elements 4160 * 4161 * Return Code: SD_SUCCESS - Indicates the user provided data is valid 4162 * SD_FAILURE - Indicates the user provided data is invalid 4163 */ 4164 4165 static int 4166 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 4167 int list_len, char *dataname_ptr) 4168 { 4169 int i; 4170 int mask = 1; 4171 int index = 0; 4172 4173 ASSERT(un != NULL); 4174 4175 /* Check for a NULL property name and list */ 4176 if (dataname_ptr == NULL) { 4177 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4178 "sd_chk_vers1_data: NULL data property name."); 4179 return (SD_FAILURE); 4180 } 4181 if (prop_list == NULL) { 4182 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4183 "sd_chk_vers1_data: %s NULL data property list.", 4184 dataname_ptr); 4185 return (SD_FAILURE); 4186 } 4187 4188 /* Display a warning if undefined bits are set in the flags */ 4189 if (flags & ~SD_CONF_BIT_MASK) { 4190 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4191 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. " 4192 "Properties not set.", 4193 (flags & ~SD_CONF_BIT_MASK), dataname_ptr); 4194 return (SD_FAILURE); 4195 } 4196 4197 /* 4198 * Verify the length of the list by identifying the highest bit set 4199 * in the flags and validating that the property list has a length 4200 * up to the index of this bit. 4201 */ 4202 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 4203 if (flags & mask) { 4204 index++; 4205 } 4206 mask = 1 << i; 4207 } 4208 if (list_len < (index + 2)) { 4209 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4210 "sd_chk_vers1_data: " 4211 "Data property list %s size is incorrect. " 4212 "Properties not set.", dataname_ptr); 4213 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: " 4214 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS); 4215 return (SD_FAILURE); 4216 } 4217 return (SD_SUCCESS); 4218 } 4219 4220 4221 /* 4222 * Function: sd_set_vers1_properties 4223 * 4224 * Description: Set version 1 device properties based on a property list 4225 * retrieved from the driver configuration file or static 4226 * configuration table. Version 1 properties have the format: 4227 * 4228 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 4229 * 4230 * where the prop0 value will be used to set prop0 if bit0 4231 * is set in the flags 4232 * 4233 * Arguments: un - driver soft state (unit) structure 4234 * flags - integer mask indicating properties to be set 4235 * prop_list - integer list of property values 4236 */ 4237 4238 static void 4239 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list) 4240 { 4241 ASSERT(un != NULL); 4242 4243 /* 4244 * Set the flag to indicate cache is to be disabled. An attempt 4245 * to disable the cache via sd_cache_control() will be made 4246 * later during attach once the basic initialization is complete. 4247 */ 4248 if (flags & SD_CONF_BSET_NOCACHE) { 4249 un->un_f_opt_disable_cache = TRUE; 4250 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4251 "sd_set_vers1_properties: caching disabled flag set\n"); 4252 } 4253 4254 /* CD-specific configuration parameters */ 4255 if (flags & SD_CONF_BSET_PLAYMSF_BCD) { 4256 un->un_f_cfg_playmsf_bcd = TRUE; 4257 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4258 "sd_set_vers1_properties: playmsf_bcd set\n"); 4259 } 4260 if (flags & SD_CONF_BSET_READSUB_BCD) { 4261 un->un_f_cfg_readsub_bcd = TRUE; 4262 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4263 "sd_set_vers1_properties: readsub_bcd set\n"); 4264 } 4265 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) { 4266 un->un_f_cfg_read_toc_trk_bcd = TRUE; 4267 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4268 "sd_set_vers1_properties: read_toc_trk_bcd set\n"); 4269 } 4270 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) { 4271 un->un_f_cfg_read_toc_addr_bcd = TRUE; 4272 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4273 "sd_set_vers1_properties: read_toc_addr_bcd set\n"); 4274 } 4275 if (flags & SD_CONF_BSET_NO_READ_HEADER) { 4276 un->un_f_cfg_no_read_header = TRUE; 4277 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4278 "sd_set_vers1_properties: no_read_header set\n"); 4279 } 4280 if (flags & SD_CONF_BSET_READ_CD_XD4) { 4281 un->un_f_cfg_read_cd_xd4 = TRUE; 4282 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4283 "sd_set_vers1_properties: read_cd_xd4 set\n"); 4284 } 4285 4286 /* Support for devices which do not have valid/unique serial numbers */ 4287 if (flags & SD_CONF_BSET_FAB_DEVID) { 4288 un->un_f_opt_fab_devid = TRUE; 4289 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4290 "sd_set_vers1_properties: fab_devid bit set\n"); 4291 } 4292 4293 /* Support for user throttle configuration */ 4294 if (flags & SD_CONF_BSET_THROTTLE) { 4295 ASSERT(prop_list != NULL); 4296 un->un_saved_throttle = un->un_throttle = 4297 prop_list->sdt_throttle; 4298 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4299 "sd_set_vers1_properties: throttle set to %d\n", 4300 prop_list->sdt_throttle); 4301 } 4302 4303 /* Set the per disk retry count according to the conf file or table. */ 4304 if (flags & SD_CONF_BSET_NRR_COUNT) { 4305 ASSERT(prop_list != NULL); 4306 if (prop_list->sdt_not_rdy_retries) { 4307 un->un_notready_retry_count = 4308 prop_list->sdt_not_rdy_retries; 4309 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4310 "sd_set_vers1_properties: not ready retry count" 4311 " set to %d\n", un->un_notready_retry_count); 4312 } 4313 } 4314 4315 /* The controller type is reported for generic disk driver ioctls */ 4316 if (flags & SD_CONF_BSET_CTYPE) { 4317 ASSERT(prop_list != NULL); 4318 switch (prop_list->sdt_ctype) { 4319 case CTYPE_CDROM: 4320 un->un_ctype = prop_list->sdt_ctype; 4321 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4322 "sd_set_vers1_properties: ctype set to " 4323 "CTYPE_CDROM\n"); 4324 break; 4325 case CTYPE_CCS: 4326 un->un_ctype = prop_list->sdt_ctype; 4327 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4328 "sd_set_vers1_properties: ctype set to " 4329 "CTYPE_CCS\n"); 4330 break; 4331 case CTYPE_ROD: /* RW optical */ 4332 un->un_ctype = prop_list->sdt_ctype; 4333 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4334 "sd_set_vers1_properties: ctype set to " 4335 "CTYPE_ROD\n"); 4336 break; 4337 default: 4338 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4339 "sd_set_vers1_properties: Could not set " 4340 "invalid ctype value (%d)", 4341 prop_list->sdt_ctype); 4342 } 4343 } 4344 4345 /* Purple failover timeout */ 4346 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) { 4347 ASSERT(prop_list != NULL); 4348 un->un_busy_retry_count = 4349 prop_list->sdt_busy_retries; 4350 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4351 "sd_set_vers1_properties: " 4352 "busy retry count set to %d\n", 4353 un->un_busy_retry_count); 4354 } 4355 4356 /* Purple reset retry count */ 4357 if (flags & SD_CONF_BSET_RST_RETRIES) { 4358 ASSERT(prop_list != NULL); 4359 un->un_reset_retry_count = 4360 prop_list->sdt_reset_retries; 4361 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4362 "sd_set_vers1_properties: " 4363 "reset retry count set to %d\n", 4364 un->un_reset_retry_count); 4365 } 4366 4367 /* Purple reservation release timeout */ 4368 if (flags & SD_CONF_BSET_RSV_REL_TIME) { 4369 ASSERT(prop_list != NULL); 4370 un->un_reserve_release_time = 4371 prop_list->sdt_reserv_rel_time; 4372 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4373 "sd_set_vers1_properties: " 4374 "reservation release timeout set to %d\n", 4375 un->un_reserve_release_time); 4376 } 4377 4378 /* 4379 * Driver flag telling the driver to verify that no commands are pending 4380 * for a device before issuing a Test Unit Ready. This is a workaround 4381 * for a firmware bug in some Seagate eliteI drives. 4382 */ 4383 if (flags & SD_CONF_BSET_TUR_CHECK) { 4384 un->un_f_cfg_tur_check = TRUE; 4385 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4386 "sd_set_vers1_properties: tur queue check set\n"); 4387 } 4388 4389 if (flags & SD_CONF_BSET_MIN_THROTTLE) { 4390 un->un_min_throttle = prop_list->sdt_min_throttle; 4391 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4392 "sd_set_vers1_properties: min throttle set to %d\n", 4393 un->un_min_throttle); 4394 } 4395 4396 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) { 4397 un->un_f_disksort_disabled = 4398 (prop_list->sdt_disk_sort_dis != 0) ? 4399 TRUE : FALSE; 4400 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4401 "sd_set_vers1_properties: disksort disabled " 4402 "flag set to %d\n", 4403 prop_list->sdt_disk_sort_dis); 4404 } 4405 4406 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) { 4407 un->un_f_lun_reset_enabled = 4408 (prop_list->sdt_lun_reset_enable != 0) ? 4409 TRUE : FALSE; 4410 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4411 "sd_set_vers1_properties: lun reset enabled " 4412 "flag set to %d\n", 4413 prop_list->sdt_lun_reset_enable); 4414 } 4415 4416 if (flags & SD_CONF_BSET_CACHE_IS_NV) { 4417 un->un_f_suppress_cache_flush = 4418 (prop_list->sdt_suppress_cache_flush != 0) ? 4419 TRUE : FALSE; 4420 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4421 "sd_set_vers1_properties: suppress_cache_flush " 4422 "flag set to %d\n", 4423 prop_list->sdt_suppress_cache_flush); 4424 } 4425 4426 /* 4427 * Validate the throttle values. 4428 * If any of the numbers are invalid, set everything to defaults. 4429 */ 4430 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4431 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4432 (un->un_min_throttle > un->un_throttle)) { 4433 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4434 un->un_min_throttle = sd_min_throttle; 4435 } 4436 } 4437 4438 /* 4439 * Function: sd_is_lsi() 4440 * 4441 * Description: Check for lsi devices, step through the static device 4442 * table to match vid/pid. 4443 * 4444 * Args: un - ptr to sd_lun 4445 * 4446 * Notes: When creating new LSI property, need to add the new LSI property 4447 * to this function. 4448 */ 4449 static void 4450 sd_is_lsi(struct sd_lun *un) 4451 { 4452 char *id = NULL; 4453 int table_index; 4454 int idlen; 4455 void *prop; 4456 4457 ASSERT(un != NULL); 4458 for (table_index = 0; table_index < sd_disk_table_size; 4459 table_index++) { 4460 id = sd_disk_table[table_index].device_id; 4461 idlen = strlen(id); 4462 if (idlen == 0) { 4463 continue; 4464 } 4465 4466 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4467 prop = sd_disk_table[table_index].properties; 4468 if (prop == &lsi_properties || 4469 prop == &lsi_oem_properties || 4470 prop == &lsi_properties_scsi || 4471 prop == &symbios_properties) { 4472 un->un_f_cfg_is_lsi = TRUE; 4473 } 4474 break; 4475 } 4476 } 4477 } 4478 4479 /* 4480 * Function: sd_get_physical_geometry 4481 * 4482 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and 4483 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the 4484 * target, and use this information to initialize the physical 4485 * geometry cache specified by pgeom_p. 4486 * 4487 * MODE SENSE is an optional command, so failure in this case 4488 * does not necessarily denote an error. We want to use the 4489 * MODE SENSE commands to derive the physical geometry of the 4490 * device, but if either command fails, the logical geometry is 4491 * used as the fallback for disk label geometry in cmlb. 4492 * 4493 * This requires that un->un_blockcount and un->un_tgt_blocksize 4494 * have already been initialized for the current target and 4495 * that the current values be passed as args so that we don't 4496 * end up ever trying to use -1 as a valid value. This could 4497 * happen if either value is reset while we're not holding 4498 * the mutex. 4499 * 4500 * Arguments: un - driver soft state (unit) structure 4501 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4502 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4503 * to use the USCSI "direct" chain and bypass the normal 4504 * command waitq. 4505 * 4506 * Context: Kernel thread only (can sleep). 4507 */ 4508 4509 static int 4510 sd_get_physical_geometry(struct sd_lun *un, cmlb_geom_t *pgeom_p, 4511 diskaddr_t capacity, int lbasize, int path_flag) 4512 { 4513 struct mode_format *page3p; 4514 struct mode_geometry *page4p; 4515 struct mode_header *headerp; 4516 int sector_size; 4517 int nsect; 4518 int nhead; 4519 int ncyl; 4520 int intrlv; 4521 int spc; 4522 diskaddr_t modesense_capacity; 4523 int rpm; 4524 int bd_len; 4525 int mode_header_length; 4526 uchar_t *p3bufp; 4527 uchar_t *p4bufp; 4528 int cdbsize; 4529 int ret = EIO; 4530 4531 ASSERT(un != NULL); 4532 4533 if (lbasize == 0) { 4534 if (ISCD(un)) { 4535 lbasize = 2048; 4536 } else { 4537 lbasize = un->un_sys_blocksize; 4538 } 4539 } 4540 pgeom_p->g_secsize = (unsigned short)lbasize; 4541 4542 /* 4543 * If the unit is a cd/dvd drive MODE SENSE page three 4544 * and MODE SENSE page four are reserved (see SBC spec 4545 * and MMC spec). To prevent soft errors just return 4546 * using the default LBA size. 4547 */ 4548 if (ISCD(un)) 4549 return (ret); 4550 4551 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0; 4552 4553 /* 4554 * Retrieve MODE SENSE page 3 - Format Device Page 4555 */ 4556 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP); 4557 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p3bufp, 4558 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag) 4559 != 0) { 4560 SD_ERROR(SD_LOG_COMMON, un, 4561 "sd_get_physical_geometry: mode sense page 3 failed\n"); 4562 goto page3_exit; 4563 } 4564 4565 /* 4566 * Determine size of Block Descriptors in order to locate the mode 4567 * page data. ATAPI devices return 0, SCSI devices should return 4568 * MODE_BLK_DESC_LENGTH. 4569 */ 4570 headerp = (struct mode_header *)p3bufp; 4571 if (un->un_f_cfg_is_atapi == TRUE) { 4572 struct mode_header_grp2 *mhp = 4573 (struct mode_header_grp2 *)headerp; 4574 mode_header_length = MODE_HEADER_LENGTH_GRP2; 4575 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4576 } else { 4577 mode_header_length = MODE_HEADER_LENGTH; 4578 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4579 } 4580 4581 if (bd_len > MODE_BLK_DESC_LENGTH) { 4582 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4583 "received unexpected bd_len of %d, page3\n", bd_len); 4584 goto page3_exit; 4585 } 4586 4587 page3p = (struct mode_format *) 4588 ((caddr_t)headerp + mode_header_length + bd_len); 4589 4590 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) { 4591 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4592 "mode sense pg3 code mismatch %d\n", 4593 page3p->mode_page.code); 4594 goto page3_exit; 4595 } 4596 4597 /* 4598 * Use this physical geometry data only if BOTH MODE SENSE commands 4599 * complete successfully; otherwise, revert to the logical geometry. 4600 * So, we need to save everything in temporary variables. 4601 */ 4602 sector_size = BE_16(page3p->data_bytes_sect); 4603 4604 /* 4605 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size 4606 */ 4607 if (sector_size == 0) { 4608 sector_size = un->un_sys_blocksize; 4609 } else { 4610 sector_size &= ~(un->un_sys_blocksize - 1); 4611 } 4612 4613 nsect = BE_16(page3p->sect_track); 4614 intrlv = BE_16(page3p->interleave); 4615 4616 SD_INFO(SD_LOG_COMMON, un, 4617 "sd_get_physical_geometry: Format Parameters (page 3)\n"); 4618 SD_INFO(SD_LOG_COMMON, un, 4619 " mode page: %d; nsect: %d; sector size: %d;\n", 4620 page3p->mode_page.code, nsect, sector_size); 4621 SD_INFO(SD_LOG_COMMON, un, 4622 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv, 4623 BE_16(page3p->track_skew), 4624 BE_16(page3p->cylinder_skew)); 4625 4626 4627 /* 4628 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page 4629 */ 4630 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP); 4631 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p4bufp, 4632 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag) 4633 != 0) { 4634 SD_ERROR(SD_LOG_COMMON, un, 4635 "sd_get_physical_geometry: mode sense page 4 failed\n"); 4636 goto page4_exit; 4637 } 4638 4639 /* 4640 * Determine size of Block Descriptors in order to locate the mode 4641 * page data. ATAPI devices return 0, SCSI devices should return 4642 * MODE_BLK_DESC_LENGTH. 4643 */ 4644 headerp = (struct mode_header *)p4bufp; 4645 if (un->un_f_cfg_is_atapi == TRUE) { 4646 struct mode_header_grp2 *mhp = 4647 (struct mode_header_grp2 *)headerp; 4648 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4649 } else { 4650 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4651 } 4652 4653 if (bd_len > MODE_BLK_DESC_LENGTH) { 4654 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4655 "received unexpected bd_len of %d, page4\n", bd_len); 4656 goto page4_exit; 4657 } 4658 4659 page4p = (struct mode_geometry *) 4660 ((caddr_t)headerp + mode_header_length + bd_len); 4661 4662 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) { 4663 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4664 "mode sense pg4 code mismatch %d\n", 4665 page4p->mode_page.code); 4666 goto page4_exit; 4667 } 4668 4669 /* 4670 * Stash the data now, after we know that both commands completed. 4671 */ 4672 4673 4674 nhead = (int)page4p->heads; /* uchar, so no conversion needed */ 4675 spc = nhead * nsect; 4676 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb; 4677 rpm = BE_16(page4p->rpm); 4678 4679 modesense_capacity = spc * ncyl; 4680 4681 SD_INFO(SD_LOG_COMMON, un, 4682 "sd_get_physical_geometry: Geometry Parameters (page 4)\n"); 4683 SD_INFO(SD_LOG_COMMON, un, 4684 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm); 4685 SD_INFO(SD_LOG_COMMON, un, 4686 " computed capacity(h*s*c): %d;\n", modesense_capacity); 4687 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n", 4688 (void *)pgeom_p, capacity); 4689 4690 /* 4691 * Compensate if the drive's geometry is not rectangular, i.e., 4692 * the product of C * H * S returned by MODE SENSE >= that returned 4693 * by read capacity. This is an idiosyncrasy of the original x86 4694 * disk subsystem. 4695 */ 4696 if (modesense_capacity >= capacity) { 4697 SD_INFO(SD_LOG_COMMON, un, 4698 "sd_get_physical_geometry: adjusting acyl; " 4699 "old: %d; new: %d\n", pgeom_p->g_acyl, 4700 (modesense_capacity - capacity + spc - 1) / spc); 4701 if (sector_size != 0) { 4702 /* 1243403: NEC D38x7 drives don't support sec size */ 4703 pgeom_p->g_secsize = (unsigned short)sector_size; 4704 } 4705 pgeom_p->g_nsect = (unsigned short)nsect; 4706 pgeom_p->g_nhead = (unsigned short)nhead; 4707 pgeom_p->g_capacity = capacity; 4708 pgeom_p->g_acyl = 4709 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc; 4710 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl; 4711 } 4712 4713 pgeom_p->g_rpm = (unsigned short)rpm; 4714 pgeom_p->g_intrlv = (unsigned short)intrlv; 4715 ret = 0; 4716 4717 SD_INFO(SD_LOG_COMMON, un, 4718 "sd_get_physical_geometry: mode sense geometry:\n"); 4719 SD_INFO(SD_LOG_COMMON, un, 4720 " nsect: %d; sector size: %d; interlv: %d\n", 4721 nsect, sector_size, intrlv); 4722 SD_INFO(SD_LOG_COMMON, un, 4723 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n", 4724 nhead, ncyl, rpm, modesense_capacity); 4725 SD_INFO(SD_LOG_COMMON, un, 4726 "sd_get_physical_geometry: (cached)\n"); 4727 SD_INFO(SD_LOG_COMMON, un, 4728 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 4729 pgeom_p->g_ncyl, pgeom_p->g_acyl, 4730 pgeom_p->g_nhead, pgeom_p->g_nsect); 4731 SD_INFO(SD_LOG_COMMON, un, 4732 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n", 4733 pgeom_p->g_secsize, pgeom_p->g_capacity, 4734 pgeom_p->g_intrlv, pgeom_p->g_rpm); 4735 4736 page4_exit: 4737 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH); 4738 page3_exit: 4739 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH); 4740 4741 return (ret); 4742 } 4743 4744 /* 4745 * Function: sd_get_virtual_geometry 4746 * 4747 * Description: Ask the controller to tell us about the target device. 4748 * 4749 * Arguments: un - pointer to softstate 4750 * capacity - disk capacity in #blocks 4751 * lbasize - disk block size in bytes 4752 * 4753 * Context: Kernel thread only 4754 */ 4755 4756 static int 4757 sd_get_virtual_geometry(struct sd_lun *un, cmlb_geom_t *lgeom_p, 4758 diskaddr_t capacity, int lbasize) 4759 { 4760 uint_t geombuf; 4761 int spc; 4762 4763 ASSERT(un != NULL); 4764 4765 /* Set sector size, and total number of sectors */ 4766 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1); 4767 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1); 4768 4769 /* Let the HBA tell us its geometry */ 4770 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1); 4771 4772 /* A value of -1 indicates an undefined "geometry" property */ 4773 if (geombuf == (-1)) { 4774 return (EINVAL); 4775 } 4776 4777 /* Initialize the logical geometry cache. */ 4778 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff; 4779 lgeom_p->g_nsect = geombuf & 0xffff; 4780 lgeom_p->g_secsize = un->un_sys_blocksize; 4781 4782 spc = lgeom_p->g_nhead * lgeom_p->g_nsect; 4783 4784 /* 4785 * Note: The driver originally converted the capacity value from 4786 * target blocks to system blocks. However, the capacity value passed 4787 * to this routine is already in terms of system blocks (this scaling 4788 * is done when the READ CAPACITY command is issued and processed). 4789 * This 'error' may have gone undetected because the usage of g_ncyl 4790 * (which is based upon g_capacity) is very limited within the driver 4791 */ 4792 lgeom_p->g_capacity = capacity; 4793 4794 /* 4795 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The 4796 * hba may return zero values if the device has been removed. 4797 */ 4798 if (spc == 0) { 4799 lgeom_p->g_ncyl = 0; 4800 } else { 4801 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc; 4802 } 4803 lgeom_p->g_acyl = 0; 4804 4805 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n"); 4806 return (0); 4807 4808 } 4809 /* 4810 * Function: sd_update_block_info 4811 * 4812 * Description: Calculate a byte count to sector count bitshift value 4813 * from sector size. 4814 * 4815 * Arguments: un: unit struct. 4816 * lbasize: new target sector size 4817 * capacity: new target capacity, ie. block count 4818 * 4819 * Context: Kernel thread context 4820 */ 4821 4822 static void 4823 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity) 4824 { 4825 if (lbasize != 0) { 4826 un->un_tgt_blocksize = lbasize; 4827 un->un_f_tgt_blocksize_is_valid = TRUE; 4828 } 4829 4830 if (capacity != 0) { 4831 un->un_blockcount = capacity; 4832 un->un_f_blockcount_is_valid = TRUE; 4833 } 4834 } 4835 4836 4837 /* 4838 * Function: sd_register_devid 4839 * 4840 * Description: This routine will obtain the device id information from the 4841 * target, obtain the serial number, and register the device 4842 * id with the ddi framework. 4843 * 4844 * Arguments: devi - the system's dev_info_t for the device. 4845 * un - driver soft state (unit) structure 4846 * reservation_flag - indicates if a reservation conflict 4847 * occurred during attach 4848 * 4849 * Context: Kernel Thread 4850 */ 4851 static void 4852 sd_register_devid(struct sd_lun *un, dev_info_t *devi, int reservation_flag) 4853 { 4854 int rval = 0; 4855 uchar_t *inq80 = NULL; 4856 size_t inq80_len = MAX_INQUIRY_SIZE; 4857 size_t inq80_resid = 0; 4858 uchar_t *inq83 = NULL; 4859 size_t inq83_len = MAX_INQUIRY_SIZE; 4860 size_t inq83_resid = 0; 4861 int dlen, len; 4862 char *sn; 4863 4864 ASSERT(un != NULL); 4865 ASSERT(mutex_owned(SD_MUTEX(un))); 4866 ASSERT((SD_DEVINFO(un)) == devi); 4867 4868 /* 4869 * If transport has already registered a devid for this target 4870 * then that takes precedence over the driver's determination 4871 * of the devid. 4872 */ 4873 if (ddi_devid_get(SD_DEVINFO(un), &un->un_devid) == DDI_SUCCESS) { 4874 ASSERT(un->un_devid); 4875 return; /* use devid registered by the transport */ 4876 } 4877 4878 /* 4879 * This is the case of antiquated Sun disk drives that have the 4880 * FAB_DEVID property set in the disk_table. These drives 4881 * manage the devid's by storing them in last 2 available sectors 4882 * on the drive and have them fabricated by the ddi layer by calling 4883 * ddi_devid_init and passing the DEVID_FAB flag. 4884 */ 4885 if (un->un_f_opt_fab_devid == TRUE) { 4886 /* 4887 * Depending on EINVAL isn't reliable, since a reserved disk 4888 * may result in invalid geometry, so check to make sure a 4889 * reservation conflict did not occur during attach. 4890 */ 4891 if ((sd_get_devid(un) == EINVAL) && 4892 (reservation_flag != SD_TARGET_IS_RESERVED)) { 4893 /* 4894 * The devid is invalid AND there is no reservation 4895 * conflict. Fabricate a new devid. 4896 */ 4897 (void) sd_create_devid(un); 4898 } 4899 4900 /* Register the devid if it exists */ 4901 if (un->un_devid != NULL) { 4902 (void) ddi_devid_register(SD_DEVINFO(un), 4903 un->un_devid); 4904 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4905 "sd_register_devid: Devid Fabricated\n"); 4906 } 4907 return; 4908 } 4909 4910 /* 4911 * We check the availability of the World Wide Name (0x83) and Unit 4912 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using 4913 * un_vpd_page_mask from them, we decide which way to get the WWN. If 4914 * 0x83 is available, that is the best choice. Our next choice is 4915 * 0x80. If neither are available, we munge the devid from the device 4916 * vid/pid/serial # for Sun qualified disks, or use the ddi framework 4917 * to fabricate a devid for non-Sun qualified disks. 4918 */ 4919 if (sd_check_vpd_page_support(un) == 0) { 4920 /* collect page 80 data if available */ 4921 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) { 4922 4923 mutex_exit(SD_MUTEX(un)); 4924 inq80 = kmem_zalloc(inq80_len, KM_SLEEP); 4925 rval = sd_send_scsi_INQUIRY(un, inq80, inq80_len, 4926 0x01, 0x80, &inq80_resid); 4927 4928 if (rval != 0) { 4929 kmem_free(inq80, inq80_len); 4930 inq80 = NULL; 4931 inq80_len = 0; 4932 } else if (ddi_prop_exists( 4933 DDI_DEV_T_NONE, SD_DEVINFO(un), 4934 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 4935 INQUIRY_SERIAL_NO) == 0) { 4936 /* 4937 * If we don't already have a serial number 4938 * property, do quick verify of data returned 4939 * and define property. 4940 */ 4941 dlen = inq80_len - inq80_resid; 4942 len = (size_t)inq80[3]; 4943 if ((dlen >= 4) && ((len + 4) <= dlen)) { 4944 /* 4945 * Ensure sn termination, skip leading 4946 * blanks, and create property 4947 * 'inquiry-serial-no'. 4948 */ 4949 sn = (char *)&inq80[4]; 4950 sn[len] = 0; 4951 while (*sn && (*sn == ' ')) 4952 sn++; 4953 if (*sn) { 4954 (void) ddi_prop_update_string( 4955 DDI_DEV_T_NONE, 4956 SD_DEVINFO(un), 4957 INQUIRY_SERIAL_NO, sn); 4958 } 4959 } 4960 } 4961 mutex_enter(SD_MUTEX(un)); 4962 } 4963 4964 /* collect page 83 data if available */ 4965 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) { 4966 mutex_exit(SD_MUTEX(un)); 4967 inq83 = kmem_zalloc(inq83_len, KM_SLEEP); 4968 rval = sd_send_scsi_INQUIRY(un, inq83, inq83_len, 4969 0x01, 0x83, &inq83_resid); 4970 4971 if (rval != 0) { 4972 kmem_free(inq83, inq83_len); 4973 inq83 = NULL; 4974 inq83_len = 0; 4975 } 4976 mutex_enter(SD_MUTEX(un)); 4977 } 4978 } 4979 4980 /* encode best devid possible based on data available */ 4981 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, 4982 (char *)ddi_driver_name(SD_DEVINFO(un)), 4983 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)), 4984 inq80, inq80_len - inq80_resid, inq83, inq83_len - 4985 inq83_resid, &un->un_devid) == DDI_SUCCESS) { 4986 4987 /* devid successfully encoded, register devid */ 4988 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); 4989 4990 } else { 4991 /* 4992 * Unable to encode a devid based on data available. 4993 * This is not a Sun qualified disk. Older Sun disk 4994 * drives that have the SD_FAB_DEVID property 4995 * set in the disk_table and non Sun qualified 4996 * disks are treated in the same manner. These 4997 * drives manage the devid's by storing them in 4998 * last 2 available sectors on the drive and 4999 * have them fabricated by the ddi layer by 5000 * calling ddi_devid_init and passing the 5001 * DEVID_FAB flag. 5002 * Create a fabricate devid only if there's no 5003 * fabricate devid existed. 5004 */ 5005 if (sd_get_devid(un) == EINVAL) { 5006 (void) sd_create_devid(un); 5007 } 5008 un->un_f_opt_fab_devid = TRUE; 5009 5010 /* Register the devid if it exists */ 5011 if (un->un_devid != NULL) { 5012 (void) ddi_devid_register(SD_DEVINFO(un), 5013 un->un_devid); 5014 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5015 "sd_register_devid: devid fabricated using " 5016 "ddi framework\n"); 5017 } 5018 } 5019 5020 /* clean up resources */ 5021 if (inq80 != NULL) { 5022 kmem_free(inq80, inq80_len); 5023 } 5024 if (inq83 != NULL) { 5025 kmem_free(inq83, inq83_len); 5026 } 5027 } 5028 5029 5030 5031 /* 5032 * Function: sd_get_devid 5033 * 5034 * Description: This routine will return 0 if a valid device id has been 5035 * obtained from the target and stored in the soft state. If a 5036 * valid device id has not been previously read and stored, a 5037 * read attempt will be made. 5038 * 5039 * Arguments: un - driver soft state (unit) structure 5040 * 5041 * Return Code: 0 if we successfully get the device id 5042 * 5043 * Context: Kernel Thread 5044 */ 5045 5046 static int 5047 sd_get_devid(struct sd_lun *un) 5048 { 5049 struct dk_devid *dkdevid; 5050 ddi_devid_t tmpid; 5051 uint_t *ip; 5052 size_t sz; 5053 diskaddr_t blk; 5054 int status; 5055 int chksum; 5056 int i; 5057 size_t buffer_size; 5058 5059 ASSERT(un != NULL); 5060 ASSERT(mutex_owned(SD_MUTEX(un))); 5061 5062 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n", 5063 un); 5064 5065 if (un->un_devid != NULL) { 5066 return (0); 5067 } 5068 5069 mutex_exit(SD_MUTEX(un)); 5070 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5071 (void *)SD_PATH_DIRECT) != 0) { 5072 mutex_enter(SD_MUTEX(un)); 5073 return (EINVAL); 5074 } 5075 5076 /* 5077 * Read and verify device id, stored in the reserved cylinders at the 5078 * end of the disk. Backup label is on the odd sectors of the last 5079 * track of the last cylinder. Device id will be on track of the next 5080 * to last cylinder. 5081 */ 5082 mutex_enter(SD_MUTEX(un)); 5083 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid)); 5084 mutex_exit(SD_MUTEX(un)); 5085 dkdevid = kmem_alloc(buffer_size, KM_SLEEP); 5086 status = sd_send_scsi_READ(un, dkdevid, buffer_size, blk, 5087 SD_PATH_DIRECT); 5088 if (status != 0) { 5089 goto error; 5090 } 5091 5092 /* Validate the revision */ 5093 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 5094 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 5095 status = EINVAL; 5096 goto error; 5097 } 5098 5099 /* Calculate the checksum */ 5100 chksum = 0; 5101 ip = (uint_t *)dkdevid; 5102 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 5103 i++) { 5104 chksum ^= ip[i]; 5105 } 5106 5107 /* Compare the checksums */ 5108 if (DKD_GETCHKSUM(dkdevid) != chksum) { 5109 status = EINVAL; 5110 goto error; 5111 } 5112 5113 /* Validate the device id */ 5114 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 5115 status = EINVAL; 5116 goto error; 5117 } 5118 5119 /* 5120 * Store the device id in the driver soft state 5121 */ 5122 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 5123 tmpid = kmem_alloc(sz, KM_SLEEP); 5124 5125 mutex_enter(SD_MUTEX(un)); 5126 5127 un->un_devid = tmpid; 5128 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 5129 5130 kmem_free(dkdevid, buffer_size); 5131 5132 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un); 5133 5134 return (status); 5135 error: 5136 mutex_enter(SD_MUTEX(un)); 5137 kmem_free(dkdevid, buffer_size); 5138 return (status); 5139 } 5140 5141 5142 /* 5143 * Function: sd_create_devid 5144 * 5145 * Description: This routine will fabricate the device id and write it 5146 * to the disk. 5147 * 5148 * Arguments: un - driver soft state (unit) structure 5149 * 5150 * Return Code: value of the fabricated device id 5151 * 5152 * Context: Kernel Thread 5153 */ 5154 5155 static ddi_devid_t 5156 sd_create_devid(struct sd_lun *un) 5157 { 5158 ASSERT(un != NULL); 5159 5160 /* Fabricate the devid */ 5161 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid) 5162 == DDI_FAILURE) { 5163 return (NULL); 5164 } 5165 5166 /* Write the devid to disk */ 5167 if (sd_write_deviceid(un) != 0) { 5168 ddi_devid_free(un->un_devid); 5169 un->un_devid = NULL; 5170 } 5171 5172 return (un->un_devid); 5173 } 5174 5175 5176 /* 5177 * Function: sd_write_deviceid 5178 * 5179 * Description: This routine will write the device id to the disk 5180 * reserved sector. 5181 * 5182 * Arguments: un - driver soft state (unit) structure 5183 * 5184 * Return Code: EINVAL 5185 * value returned by sd_send_scsi_cmd 5186 * 5187 * Context: Kernel Thread 5188 */ 5189 5190 static int 5191 sd_write_deviceid(struct sd_lun *un) 5192 { 5193 struct dk_devid *dkdevid; 5194 diskaddr_t blk; 5195 uint_t *ip, chksum; 5196 int status; 5197 int i; 5198 5199 ASSERT(mutex_owned(SD_MUTEX(un))); 5200 5201 mutex_exit(SD_MUTEX(un)); 5202 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5203 (void *)SD_PATH_DIRECT) != 0) { 5204 mutex_enter(SD_MUTEX(un)); 5205 return (-1); 5206 } 5207 5208 5209 /* Allocate the buffer */ 5210 dkdevid = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP); 5211 5212 /* Fill in the revision */ 5213 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 5214 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 5215 5216 /* Copy in the device id */ 5217 mutex_enter(SD_MUTEX(un)); 5218 bcopy(un->un_devid, &dkdevid->dkd_devid, 5219 ddi_devid_sizeof(un->un_devid)); 5220 mutex_exit(SD_MUTEX(un)); 5221 5222 /* Calculate the checksum */ 5223 chksum = 0; 5224 ip = (uint_t *)dkdevid; 5225 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 5226 i++) { 5227 chksum ^= ip[i]; 5228 } 5229 5230 /* Fill-in checksum */ 5231 DKD_FORMCHKSUM(chksum, dkdevid); 5232 5233 /* Write the reserved sector */ 5234 status = sd_send_scsi_WRITE(un, dkdevid, un->un_sys_blocksize, blk, 5235 SD_PATH_DIRECT); 5236 5237 kmem_free(dkdevid, un->un_sys_blocksize); 5238 5239 mutex_enter(SD_MUTEX(un)); 5240 return (status); 5241 } 5242 5243 5244 /* 5245 * Function: sd_check_vpd_page_support 5246 * 5247 * Description: This routine sends an inquiry command with the EVPD bit set and 5248 * a page code of 0x00 to the device. It is used to determine which 5249 * vital product pages are available to find the devid. We are 5250 * looking for pages 0x83 or 0x80. If we return a negative 1, the 5251 * device does not support that command. 5252 * 5253 * Arguments: un - driver soft state (unit) structure 5254 * 5255 * Return Code: 0 - success 5256 * 1 - check condition 5257 * 5258 * Context: This routine can sleep. 5259 */ 5260 5261 static int 5262 sd_check_vpd_page_support(struct sd_lun *un) 5263 { 5264 uchar_t *page_list = NULL; 5265 uchar_t page_length = 0xff; /* Use max possible length */ 5266 uchar_t evpd = 0x01; /* Set the EVPD bit */ 5267 uchar_t page_code = 0x00; /* Supported VPD Pages */ 5268 int rval = 0; 5269 int counter; 5270 5271 ASSERT(un != NULL); 5272 ASSERT(mutex_owned(SD_MUTEX(un))); 5273 5274 mutex_exit(SD_MUTEX(un)); 5275 5276 /* 5277 * We'll set the page length to the maximum to save figuring it out 5278 * with an additional call. 5279 */ 5280 page_list = kmem_zalloc(page_length, KM_SLEEP); 5281 5282 rval = sd_send_scsi_INQUIRY(un, page_list, page_length, evpd, 5283 page_code, NULL); 5284 5285 mutex_enter(SD_MUTEX(un)); 5286 5287 /* 5288 * Now we must validate that the device accepted the command, as some 5289 * drives do not support it. If the drive does support it, we will 5290 * return 0, and the supported pages will be in un_vpd_page_mask. If 5291 * not, we return -1. 5292 */ 5293 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) { 5294 /* Loop to find one of the 2 pages we need */ 5295 counter = 4; /* Supported pages start at byte 4, with 0x00 */ 5296 5297 /* 5298 * Pages are returned in ascending order, and 0x83 is what we 5299 * are hoping for. 5300 */ 5301 while ((page_list[counter] <= 0x86) && 5302 (counter <= (page_list[VPD_PAGE_LENGTH] + 5303 VPD_HEAD_OFFSET))) { 5304 /* 5305 * Add 3 because page_list[3] is the number of 5306 * pages minus 3 5307 */ 5308 5309 switch (page_list[counter]) { 5310 case 0x00: 5311 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG; 5312 break; 5313 case 0x80: 5314 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG; 5315 break; 5316 case 0x81: 5317 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG; 5318 break; 5319 case 0x82: 5320 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG; 5321 break; 5322 case 0x83: 5323 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG; 5324 break; 5325 case 0x86: 5326 un->un_vpd_page_mask |= SD_VPD_EXTENDED_DATA_PG; 5327 break; 5328 } 5329 counter++; 5330 } 5331 5332 } else { 5333 rval = -1; 5334 5335 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5336 "sd_check_vpd_page_support: This drive does not implement " 5337 "VPD pages.\n"); 5338 } 5339 5340 kmem_free(page_list, page_length); 5341 5342 return (rval); 5343 } 5344 5345 5346 /* 5347 * Function: sd_setup_pm 5348 * 5349 * Description: Initialize Power Management on the device 5350 * 5351 * Context: Kernel Thread 5352 */ 5353 5354 static void 5355 sd_setup_pm(struct sd_lun *un, dev_info_t *devi) 5356 { 5357 uint_t log_page_size; 5358 uchar_t *log_page_data; 5359 int rval; 5360 5361 /* 5362 * Since we are called from attach, holding a mutex for 5363 * un is unnecessary. Because some of the routines called 5364 * from here require SD_MUTEX to not be held, assert this 5365 * right up front. 5366 */ 5367 ASSERT(!mutex_owned(SD_MUTEX(un))); 5368 /* 5369 * Since the sd device does not have the 'reg' property, 5370 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 5371 * The following code is to tell cpr that this device 5372 * DOES need to be suspended and resumed. 5373 */ 5374 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 5375 "pm-hardware-state", "needs-suspend-resume"); 5376 5377 /* 5378 * This complies with the new power management framework 5379 * for certain desktop machines. Create the pm_components 5380 * property as a string array property. 5381 */ 5382 if (un->un_f_pm_supported) { 5383 /* 5384 * not all devices have a motor, try it first. 5385 * some devices may return ILLEGAL REQUEST, some 5386 * will hang 5387 * The following START_STOP_UNIT is used to check if target 5388 * device has a motor. 5389 */ 5390 un->un_f_start_stop_supported = TRUE; 5391 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 5392 SD_PATH_DIRECT) != 0) { 5393 un->un_f_start_stop_supported = FALSE; 5394 } 5395 5396 /* 5397 * create pm properties anyways otherwise the parent can't 5398 * go to sleep 5399 */ 5400 (void) sd_create_pm_components(devi, un); 5401 un->un_f_pm_is_enabled = TRUE; 5402 return; 5403 } 5404 5405 if (!un->un_f_log_sense_supported) { 5406 un->un_power_level = SD_SPINDLE_ON; 5407 un->un_f_pm_is_enabled = FALSE; 5408 return; 5409 } 5410 5411 rval = sd_log_page_supported(un, START_STOP_CYCLE_PAGE); 5412 5413 #ifdef SDDEBUG 5414 if (sd_force_pm_supported) { 5415 /* Force a successful result */ 5416 rval = 1; 5417 } 5418 #endif 5419 5420 /* 5421 * If the start-stop cycle counter log page is not supported 5422 * or if the pm-capable property is SD_PM_CAPABLE_FALSE (0) 5423 * then we should not create the pm_components property. 5424 */ 5425 if (rval == -1) { 5426 /* 5427 * Error. 5428 * Reading log sense failed, most likely this is 5429 * an older drive that does not support log sense. 5430 * If this fails auto-pm is not supported. 5431 */ 5432 un->un_power_level = SD_SPINDLE_ON; 5433 un->un_f_pm_is_enabled = FALSE; 5434 5435 } else if (rval == 0) { 5436 /* 5437 * Page not found. 5438 * The start stop cycle counter is implemented as page 5439 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For 5440 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE). 5441 */ 5442 if (sd_log_page_supported(un, START_STOP_CYCLE_VU_PAGE) == 1) { 5443 /* 5444 * Page found, use this one. 5445 */ 5446 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE; 5447 un->un_f_pm_is_enabled = TRUE; 5448 } else { 5449 /* 5450 * Error or page not found. 5451 * auto-pm is not supported for this device. 5452 */ 5453 un->un_power_level = SD_SPINDLE_ON; 5454 un->un_f_pm_is_enabled = FALSE; 5455 } 5456 } else { 5457 /* 5458 * Page found, use it. 5459 */ 5460 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE; 5461 un->un_f_pm_is_enabled = TRUE; 5462 } 5463 5464 5465 if (un->un_f_pm_is_enabled == TRUE) { 5466 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5467 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5468 5469 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 5470 log_page_size, un->un_start_stop_cycle_page, 5471 0x01, 0, SD_PATH_DIRECT); 5472 #ifdef SDDEBUG 5473 if (sd_force_pm_supported) { 5474 /* Force a successful result */ 5475 rval = 0; 5476 } 5477 #endif 5478 5479 /* 5480 * If the Log sense for Page( Start/stop cycle counter page) 5481 * succeeds, then power management is supported and we can 5482 * enable auto-pm. 5483 */ 5484 if (rval == 0) { 5485 (void) sd_create_pm_components(devi, un); 5486 } else { 5487 un->un_power_level = SD_SPINDLE_ON; 5488 un->un_f_pm_is_enabled = FALSE; 5489 } 5490 5491 kmem_free(log_page_data, log_page_size); 5492 } 5493 } 5494 5495 5496 /* 5497 * Function: sd_create_pm_components 5498 * 5499 * Description: Initialize PM property. 5500 * 5501 * Context: Kernel thread context 5502 */ 5503 5504 static void 5505 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un) 5506 { 5507 char *pm_comp[] = { "NAME=spindle-motor", "0=off", "1=on", NULL }; 5508 5509 ASSERT(!mutex_owned(SD_MUTEX(un))); 5510 5511 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 5512 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) { 5513 /* 5514 * When components are initially created they are idle, 5515 * power up any non-removables. 5516 * Note: the return value of pm_raise_power can't be used 5517 * for determining if PM should be enabled for this device. 5518 * Even if you check the return values and remove this 5519 * property created above, the PM framework will not honor the 5520 * change after the first call to pm_raise_power. Hence, 5521 * removal of that property does not help if pm_raise_power 5522 * fails. In the case of removable media, the start/stop 5523 * will fail if the media is not present. 5524 */ 5525 if (un->un_f_attach_spinup && (pm_raise_power(SD_DEVINFO(un), 0, 5526 SD_SPINDLE_ON) == DDI_SUCCESS)) { 5527 mutex_enter(SD_MUTEX(un)); 5528 un->un_power_level = SD_SPINDLE_ON; 5529 mutex_enter(&un->un_pm_mutex); 5530 /* Set to on and not busy. */ 5531 un->un_pm_count = 0; 5532 } else { 5533 mutex_enter(SD_MUTEX(un)); 5534 un->un_power_level = SD_SPINDLE_OFF; 5535 mutex_enter(&un->un_pm_mutex); 5536 /* Set to off. */ 5537 un->un_pm_count = -1; 5538 } 5539 mutex_exit(&un->un_pm_mutex); 5540 mutex_exit(SD_MUTEX(un)); 5541 } else { 5542 un->un_power_level = SD_SPINDLE_ON; 5543 un->un_f_pm_is_enabled = FALSE; 5544 } 5545 } 5546 5547 5548 /* 5549 * Function: sd_ddi_suspend 5550 * 5551 * Description: Performs system power-down operations. This includes 5552 * setting the drive state to indicate its suspended so 5553 * that no new commands will be accepted. Also, wait for 5554 * all commands that are in transport or queued to a timer 5555 * for retry to complete. All timeout threads are cancelled. 5556 * 5557 * Return Code: DDI_FAILURE or DDI_SUCCESS 5558 * 5559 * Context: Kernel thread context 5560 */ 5561 5562 static int 5563 sd_ddi_suspend(dev_info_t *devi) 5564 { 5565 struct sd_lun *un; 5566 clock_t wait_cmds_complete; 5567 5568 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5569 if (un == NULL) { 5570 return (DDI_FAILURE); 5571 } 5572 5573 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n"); 5574 5575 mutex_enter(SD_MUTEX(un)); 5576 5577 /* Return success if the device is already suspended. */ 5578 if (un->un_state == SD_STATE_SUSPENDED) { 5579 mutex_exit(SD_MUTEX(un)); 5580 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5581 "device already suspended, exiting\n"); 5582 return (DDI_SUCCESS); 5583 } 5584 5585 /* Return failure if the device is being used by HA */ 5586 if (un->un_resvd_status & 5587 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) { 5588 mutex_exit(SD_MUTEX(un)); 5589 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5590 "device in use by HA, exiting\n"); 5591 return (DDI_FAILURE); 5592 } 5593 5594 /* 5595 * Return failure if the device is in a resource wait 5596 * or power changing state. 5597 */ 5598 if ((un->un_state == SD_STATE_RWAIT) || 5599 (un->un_state == SD_STATE_PM_CHANGING)) { 5600 mutex_exit(SD_MUTEX(un)); 5601 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5602 "device in resource wait state, exiting\n"); 5603 return (DDI_FAILURE); 5604 } 5605 5606 5607 un->un_save_state = un->un_last_state; 5608 New_state(un, SD_STATE_SUSPENDED); 5609 5610 /* 5611 * Wait for all commands that are in transport or queued to a timer 5612 * for retry to complete. 5613 * 5614 * While waiting, no new commands will be accepted or sent because of 5615 * the new state we set above. 5616 * 5617 * Wait till current operation has completed. If we are in the resource 5618 * wait state (with an intr outstanding) then we need to wait till the 5619 * intr completes and starts the next cmd. We want to wait for 5620 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND. 5621 */ 5622 wait_cmds_complete = ddi_get_lbolt() + 5623 (sd_wait_cmds_complete * drv_usectohz(1000000)); 5624 5625 while (un->un_ncmds_in_transport != 0) { 5626 /* 5627 * Fail if commands do not finish in the specified time. 5628 */ 5629 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un), 5630 wait_cmds_complete) == -1) { 5631 /* 5632 * Undo the state changes made above. Everything 5633 * must go back to it's original value. 5634 */ 5635 Restore_state(un); 5636 un->un_last_state = un->un_save_state; 5637 /* Wake up any threads that might be waiting. */ 5638 cv_broadcast(&un->un_suspend_cv); 5639 mutex_exit(SD_MUTEX(un)); 5640 SD_ERROR(SD_LOG_IO_PM, un, 5641 "sd_ddi_suspend: failed due to outstanding cmds\n"); 5642 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n"); 5643 return (DDI_FAILURE); 5644 } 5645 } 5646 5647 /* 5648 * Cancel SCSI watch thread and timeouts, if any are active 5649 */ 5650 5651 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) { 5652 opaque_t temp_token = un->un_swr_token; 5653 mutex_exit(SD_MUTEX(un)); 5654 scsi_watch_suspend(temp_token); 5655 mutex_enter(SD_MUTEX(un)); 5656 } 5657 5658 if (un->un_reset_throttle_timeid != NULL) { 5659 timeout_id_t temp_id = un->un_reset_throttle_timeid; 5660 un->un_reset_throttle_timeid = NULL; 5661 mutex_exit(SD_MUTEX(un)); 5662 (void) untimeout(temp_id); 5663 mutex_enter(SD_MUTEX(un)); 5664 } 5665 5666 if (un->un_dcvb_timeid != NULL) { 5667 timeout_id_t temp_id = un->un_dcvb_timeid; 5668 un->un_dcvb_timeid = NULL; 5669 mutex_exit(SD_MUTEX(un)); 5670 (void) untimeout(temp_id); 5671 mutex_enter(SD_MUTEX(un)); 5672 } 5673 5674 mutex_enter(&un->un_pm_mutex); 5675 if (un->un_pm_timeid != NULL) { 5676 timeout_id_t temp_id = un->un_pm_timeid; 5677 un->un_pm_timeid = NULL; 5678 mutex_exit(&un->un_pm_mutex); 5679 mutex_exit(SD_MUTEX(un)); 5680 (void) untimeout(temp_id); 5681 mutex_enter(SD_MUTEX(un)); 5682 } else { 5683 mutex_exit(&un->un_pm_mutex); 5684 } 5685 5686 if (un->un_retry_timeid != NULL) { 5687 timeout_id_t temp_id = un->un_retry_timeid; 5688 un->un_retry_timeid = NULL; 5689 mutex_exit(SD_MUTEX(un)); 5690 (void) untimeout(temp_id); 5691 mutex_enter(SD_MUTEX(un)); 5692 5693 if (un->un_retry_bp != NULL) { 5694 un->un_retry_bp->av_forw = un->un_waitq_headp; 5695 un->un_waitq_headp = un->un_retry_bp; 5696 if (un->un_waitq_tailp == NULL) { 5697 un->un_waitq_tailp = un->un_retry_bp; 5698 } 5699 un->un_retry_bp = NULL; 5700 un->un_retry_statp = NULL; 5701 } 5702 } 5703 5704 if (un->un_direct_priority_timeid != NULL) { 5705 timeout_id_t temp_id = un->un_direct_priority_timeid; 5706 un->un_direct_priority_timeid = NULL; 5707 mutex_exit(SD_MUTEX(un)); 5708 (void) untimeout(temp_id); 5709 mutex_enter(SD_MUTEX(un)); 5710 } 5711 5712 if (un->un_f_is_fibre == TRUE) { 5713 /* 5714 * Remove callbacks for insert and remove events 5715 */ 5716 if (un->un_insert_event != NULL) { 5717 mutex_exit(SD_MUTEX(un)); 5718 (void) ddi_remove_event_handler(un->un_insert_cb_id); 5719 mutex_enter(SD_MUTEX(un)); 5720 un->un_insert_event = NULL; 5721 } 5722 5723 if (un->un_remove_event != NULL) { 5724 mutex_exit(SD_MUTEX(un)); 5725 (void) ddi_remove_event_handler(un->un_remove_cb_id); 5726 mutex_enter(SD_MUTEX(un)); 5727 un->un_remove_event = NULL; 5728 } 5729 } 5730 5731 mutex_exit(SD_MUTEX(un)); 5732 5733 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n"); 5734 5735 return (DDI_SUCCESS); 5736 } 5737 5738 5739 /* 5740 * Function: sd_ddi_pm_suspend 5741 * 5742 * Description: Set the drive state to low power. 5743 * Someone else is required to actually change the drive 5744 * power level. 5745 * 5746 * Arguments: un - driver soft state (unit) structure 5747 * 5748 * Return Code: DDI_FAILURE or DDI_SUCCESS 5749 * 5750 * Context: Kernel thread context 5751 */ 5752 5753 static int 5754 sd_ddi_pm_suspend(struct sd_lun *un) 5755 { 5756 ASSERT(un != NULL); 5757 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: entry\n"); 5758 5759 ASSERT(!mutex_owned(SD_MUTEX(un))); 5760 mutex_enter(SD_MUTEX(un)); 5761 5762 /* 5763 * Exit if power management is not enabled for this device, or if 5764 * the device is being used by HA. 5765 */ 5766 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status & 5767 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) { 5768 mutex_exit(SD_MUTEX(un)); 5769 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exiting\n"); 5770 return (DDI_SUCCESS); 5771 } 5772 5773 SD_INFO(SD_LOG_POWER, un, "sd_ddi_pm_suspend: un_ncmds_in_driver=%ld\n", 5774 un->un_ncmds_in_driver); 5775 5776 /* 5777 * See if the device is not busy, ie.: 5778 * - we have no commands in the driver for this device 5779 * - not waiting for resources 5780 */ 5781 if ((un->un_ncmds_in_driver == 0) && 5782 (un->un_state != SD_STATE_RWAIT)) { 5783 /* 5784 * The device is not busy, so it is OK to go to low power state. 5785 * Indicate low power, but rely on someone else to actually 5786 * change it. 5787 */ 5788 mutex_enter(&un->un_pm_mutex); 5789 un->un_pm_count = -1; 5790 mutex_exit(&un->un_pm_mutex); 5791 un->un_power_level = SD_SPINDLE_OFF; 5792 } 5793 5794 mutex_exit(SD_MUTEX(un)); 5795 5796 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exit\n"); 5797 5798 return (DDI_SUCCESS); 5799 } 5800 5801 5802 /* 5803 * Function: sd_ddi_resume 5804 * 5805 * Description: Performs system power-up operations.. 5806 * 5807 * Return Code: DDI_SUCCESS 5808 * DDI_FAILURE 5809 * 5810 * Context: Kernel thread context 5811 */ 5812 5813 static int 5814 sd_ddi_resume(dev_info_t *devi) 5815 { 5816 struct sd_lun *un; 5817 5818 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5819 if (un == NULL) { 5820 return (DDI_FAILURE); 5821 } 5822 5823 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n"); 5824 5825 mutex_enter(SD_MUTEX(un)); 5826 Restore_state(un); 5827 5828 /* 5829 * Restore the state which was saved to give the 5830 * the right state in un_last_state 5831 */ 5832 un->un_last_state = un->un_save_state; 5833 /* 5834 * Note: throttle comes back at full. 5835 * Also note: this MUST be done before calling pm_raise_power 5836 * otherwise the system can get hung in biowait. The scenario where 5837 * this'll happen is under cpr suspend. Writing of the system 5838 * state goes through sddump, which writes 0 to un_throttle. If 5839 * writing the system state then fails, example if the partition is 5840 * too small, then cpr attempts a resume. If throttle isn't restored 5841 * from the saved value until after calling pm_raise_power then 5842 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs 5843 * in biowait. 5844 */ 5845 un->un_throttle = un->un_saved_throttle; 5846 5847 /* 5848 * The chance of failure is very rare as the only command done in power 5849 * entry point is START command when you transition from 0->1 or 5850 * unknown->1. Put it to SPINDLE ON state irrespective of the state at 5851 * which suspend was done. Ignore the return value as the resume should 5852 * not be failed. In the case of removable media the media need not be 5853 * inserted and hence there is a chance that raise power will fail with 5854 * media not present. 5855 */ 5856 if (un->un_f_attach_spinup) { 5857 mutex_exit(SD_MUTEX(un)); 5858 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 5859 mutex_enter(SD_MUTEX(un)); 5860 } 5861 5862 /* 5863 * Don't broadcast to the suspend cv and therefore possibly 5864 * start I/O until after power has been restored. 5865 */ 5866 cv_broadcast(&un->un_suspend_cv); 5867 cv_broadcast(&un->un_state_cv); 5868 5869 /* restart thread */ 5870 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) { 5871 scsi_watch_resume(un->un_swr_token); 5872 } 5873 5874 #if (defined(__fibre)) 5875 if (un->un_f_is_fibre == TRUE) { 5876 /* 5877 * Add callbacks for insert and remove events 5878 */ 5879 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 5880 sd_init_event_callbacks(un); 5881 } 5882 } 5883 #endif 5884 5885 /* 5886 * Transport any pending commands to the target. 5887 * 5888 * If this is a low-activity device commands in queue will have to wait 5889 * until new commands come in, which may take awhile. Also, we 5890 * specifically don't check un_ncmds_in_transport because we know that 5891 * there really are no commands in progress after the unit was 5892 * suspended and we could have reached the throttle level, been 5893 * suspended, and have no new commands coming in for awhile. Highly 5894 * unlikely, but so is the low-activity disk scenario. 5895 */ 5896 ddi_xbuf_dispatch(un->un_xbuf_attr); 5897 5898 sd_start_cmds(un, NULL); 5899 mutex_exit(SD_MUTEX(un)); 5900 5901 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n"); 5902 5903 return (DDI_SUCCESS); 5904 } 5905 5906 5907 /* 5908 * Function: sd_ddi_pm_resume 5909 * 5910 * Description: Set the drive state to powered on. 5911 * Someone else is required to actually change the drive 5912 * power level. 5913 * 5914 * Arguments: un - driver soft state (unit) structure 5915 * 5916 * Return Code: DDI_SUCCESS 5917 * 5918 * Context: Kernel thread context 5919 */ 5920 5921 static int 5922 sd_ddi_pm_resume(struct sd_lun *un) 5923 { 5924 ASSERT(un != NULL); 5925 5926 ASSERT(!mutex_owned(SD_MUTEX(un))); 5927 mutex_enter(SD_MUTEX(un)); 5928 un->un_power_level = SD_SPINDLE_ON; 5929 5930 ASSERT(!mutex_owned(&un->un_pm_mutex)); 5931 mutex_enter(&un->un_pm_mutex); 5932 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 5933 un->un_pm_count++; 5934 ASSERT(un->un_pm_count == 0); 5935 /* 5936 * Note: no longer do the cv_broadcast on un_suspend_cv. The 5937 * un_suspend_cv is for a system resume, not a power management 5938 * device resume. (4297749) 5939 * cv_broadcast(&un->un_suspend_cv); 5940 */ 5941 } 5942 mutex_exit(&un->un_pm_mutex); 5943 mutex_exit(SD_MUTEX(un)); 5944 5945 return (DDI_SUCCESS); 5946 } 5947 5948 5949 /* 5950 * Function: sd_pm_idletimeout_handler 5951 * 5952 * Description: A timer routine that's active only while a device is busy. 5953 * The purpose is to extend slightly the pm framework's busy 5954 * view of the device to prevent busy/idle thrashing for 5955 * back-to-back commands. Do this by comparing the current time 5956 * to the time at which the last command completed and when the 5957 * difference is greater than sd_pm_idletime, call 5958 * pm_idle_component. In addition to indicating idle to the pm 5959 * framework, update the chain type to again use the internal pm 5960 * layers of the driver. 5961 * 5962 * Arguments: arg - driver soft state (unit) structure 5963 * 5964 * Context: Executes in a timeout(9F) thread context 5965 */ 5966 5967 static void 5968 sd_pm_idletimeout_handler(void *arg) 5969 { 5970 struct sd_lun *un = arg; 5971 5972 time_t now; 5973 5974 mutex_enter(&sd_detach_mutex); 5975 if (un->un_detach_count != 0) { 5976 /* Abort if the instance is detaching */ 5977 mutex_exit(&sd_detach_mutex); 5978 return; 5979 } 5980 mutex_exit(&sd_detach_mutex); 5981 5982 now = ddi_get_time(); 5983 /* 5984 * Grab both mutexes, in the proper order, since we're accessing 5985 * both PM and softstate variables. 5986 */ 5987 mutex_enter(SD_MUTEX(un)); 5988 mutex_enter(&un->un_pm_mutex); 5989 if (((now - un->un_pm_idle_time) > sd_pm_idletime) && 5990 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) { 5991 /* 5992 * Update the chain types. 5993 * This takes affect on the next new command received. 5994 */ 5995 if (un->un_f_non_devbsize_supported) { 5996 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 5997 } else { 5998 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 5999 } 6000 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 6001 6002 SD_TRACE(SD_LOG_IO_PM, un, 6003 "sd_pm_idletimeout_handler: idling device\n"); 6004 (void) pm_idle_component(SD_DEVINFO(un), 0); 6005 un->un_pm_idle_timeid = NULL; 6006 } else { 6007 un->un_pm_idle_timeid = 6008 timeout(sd_pm_idletimeout_handler, un, 6009 (drv_usectohz((clock_t)300000))); /* 300 ms. */ 6010 } 6011 mutex_exit(&un->un_pm_mutex); 6012 mutex_exit(SD_MUTEX(un)); 6013 } 6014 6015 6016 /* 6017 * Function: sd_pm_timeout_handler 6018 * 6019 * Description: Callback to tell framework we are idle. 6020 * 6021 * Context: timeout(9f) thread context. 6022 */ 6023 6024 static void 6025 sd_pm_timeout_handler(void *arg) 6026 { 6027 struct sd_lun *un = arg; 6028 6029 (void) pm_idle_component(SD_DEVINFO(un), 0); 6030 mutex_enter(&un->un_pm_mutex); 6031 un->un_pm_timeid = NULL; 6032 mutex_exit(&un->un_pm_mutex); 6033 } 6034 6035 6036 /* 6037 * Function: sdpower 6038 * 6039 * Description: PM entry point. 6040 * 6041 * Return Code: DDI_SUCCESS 6042 * DDI_FAILURE 6043 * 6044 * Context: Kernel thread context 6045 */ 6046 6047 static int 6048 sdpower(dev_info_t *devi, int component, int level) 6049 { 6050 struct sd_lun *un; 6051 int instance; 6052 int rval = DDI_SUCCESS; 6053 uint_t i, log_page_size, maxcycles, ncycles; 6054 uchar_t *log_page_data; 6055 int log_sense_page; 6056 int medium_present; 6057 time_t intvlp; 6058 dev_t dev; 6059 struct pm_trans_data sd_pm_tran_data; 6060 uchar_t save_state; 6061 int sval; 6062 uchar_t state_before_pm; 6063 int got_semaphore_here; 6064 6065 instance = ddi_get_instance(devi); 6066 6067 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 6068 (SD_SPINDLE_OFF > level) || (level > SD_SPINDLE_ON) || 6069 component != 0) { 6070 return (DDI_FAILURE); 6071 } 6072 6073 dev = sd_make_device(SD_DEVINFO(un)); 6074 6075 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level); 6076 6077 /* 6078 * Must synchronize power down with close. 6079 * Attempt to decrement/acquire the open/close semaphore, 6080 * but do NOT wait on it. If it's not greater than zero, 6081 * ie. it can't be decremented without waiting, then 6082 * someone else, either open or close, already has it 6083 * and the try returns 0. Use that knowledge here to determine 6084 * if it's OK to change the device power level. 6085 * Also, only increment it on exit if it was decremented, ie. gotten, 6086 * here. 6087 */ 6088 got_semaphore_here = sema_tryp(&un->un_semoclose); 6089 6090 mutex_enter(SD_MUTEX(un)); 6091 6092 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n", 6093 un->un_ncmds_in_driver); 6094 6095 /* 6096 * If un_ncmds_in_driver is non-zero it indicates commands are 6097 * already being processed in the driver, or if the semaphore was 6098 * not gotten here it indicates an open or close is being processed. 6099 * At the same time somebody is requesting to go low power which 6100 * can't happen, therefore we need to return failure. 6101 */ 6102 if ((level == SD_SPINDLE_OFF) && 6103 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) { 6104 mutex_exit(SD_MUTEX(un)); 6105 6106 if (got_semaphore_here != 0) { 6107 sema_v(&un->un_semoclose); 6108 } 6109 SD_TRACE(SD_LOG_IO_PM, un, 6110 "sdpower: exit, device has queued cmds.\n"); 6111 return (DDI_FAILURE); 6112 } 6113 6114 /* 6115 * if it is OFFLINE that means the disk is completely dead 6116 * in our case we have to put the disk in on or off by sending commands 6117 * Of course that will fail anyway so return back here. 6118 * 6119 * Power changes to a device that's OFFLINE or SUSPENDED 6120 * are not allowed. 6121 */ 6122 if ((un->un_state == SD_STATE_OFFLINE) || 6123 (un->un_state == SD_STATE_SUSPENDED)) { 6124 mutex_exit(SD_MUTEX(un)); 6125 6126 if (got_semaphore_here != 0) { 6127 sema_v(&un->un_semoclose); 6128 } 6129 SD_TRACE(SD_LOG_IO_PM, un, 6130 "sdpower: exit, device is off-line.\n"); 6131 return (DDI_FAILURE); 6132 } 6133 6134 /* 6135 * Change the device's state to indicate it's power level 6136 * is being changed. Do this to prevent a power off in the 6137 * middle of commands, which is especially bad on devices 6138 * that are really powered off instead of just spun down. 6139 */ 6140 state_before_pm = un->un_state; 6141 un->un_state = SD_STATE_PM_CHANGING; 6142 6143 mutex_exit(SD_MUTEX(un)); 6144 6145 /* 6146 * If "pm-capable" property is set to TRUE by HBA drivers, 6147 * bypass the following checking, otherwise, check the log 6148 * sense information for this device 6149 */ 6150 if ((level == SD_SPINDLE_OFF) && un->un_f_log_sense_supported) { 6151 /* 6152 * Get the log sense information to understand whether the 6153 * the powercycle counts have gone beyond the threshhold. 6154 */ 6155 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 6156 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 6157 6158 mutex_enter(SD_MUTEX(un)); 6159 log_sense_page = un->un_start_stop_cycle_page; 6160 mutex_exit(SD_MUTEX(un)); 6161 6162 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 6163 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT); 6164 #ifdef SDDEBUG 6165 if (sd_force_pm_supported) { 6166 /* Force a successful result */ 6167 rval = 0; 6168 } 6169 #endif 6170 if (rval != 0) { 6171 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 6172 "Log Sense Failed\n"); 6173 kmem_free(log_page_data, log_page_size); 6174 /* Cannot support power management on those drives */ 6175 6176 if (got_semaphore_here != 0) { 6177 sema_v(&un->un_semoclose); 6178 } 6179 /* 6180 * On exit put the state back to it's original value 6181 * and broadcast to anyone waiting for the power 6182 * change completion. 6183 */ 6184 mutex_enter(SD_MUTEX(un)); 6185 un->un_state = state_before_pm; 6186 cv_broadcast(&un->un_suspend_cv); 6187 mutex_exit(SD_MUTEX(un)); 6188 SD_TRACE(SD_LOG_IO_PM, un, 6189 "sdpower: exit, Log Sense Failed.\n"); 6190 return (DDI_FAILURE); 6191 } 6192 6193 /* 6194 * From the page data - Convert the essential information to 6195 * pm_trans_data 6196 */ 6197 maxcycles = 6198 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) | 6199 (log_page_data[0x1E] << 8) | log_page_data[0x1F]; 6200 6201 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles; 6202 6203 ncycles = 6204 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) | 6205 (log_page_data[0x26] << 8) | log_page_data[0x27]; 6206 6207 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles; 6208 6209 for (i = 0; i < DC_SCSI_MFR_LEN; i++) { 6210 sd_pm_tran_data.un.scsi_cycles.svc_date[i] = 6211 log_page_data[8+i]; 6212 } 6213 6214 kmem_free(log_page_data, log_page_size); 6215 6216 /* 6217 * Call pm_trans_check routine to get the Ok from 6218 * the global policy 6219 */ 6220 6221 sd_pm_tran_data.format = DC_SCSI_FORMAT; 6222 sd_pm_tran_data.un.scsi_cycles.flag = 0; 6223 6224 rval = pm_trans_check(&sd_pm_tran_data, &intvlp); 6225 #ifdef SDDEBUG 6226 if (sd_force_pm_supported) { 6227 /* Force a successful result */ 6228 rval = 1; 6229 } 6230 #endif 6231 switch (rval) { 6232 case 0: 6233 /* 6234 * Not Ok to Power cycle or error in parameters passed 6235 * Would have given the advised time to consider power 6236 * cycle. Based on the new intvlp parameter we are 6237 * supposed to pretend we are busy so that pm framework 6238 * will never call our power entry point. Because of 6239 * that install a timeout handler and wait for the 6240 * recommended time to elapse so that power management 6241 * can be effective again. 6242 * 6243 * To effect this behavior, call pm_busy_component to 6244 * indicate to the framework this device is busy. 6245 * By not adjusting un_pm_count the rest of PM in 6246 * the driver will function normally, and independent 6247 * of this but because the framework is told the device 6248 * is busy it won't attempt powering down until it gets 6249 * a matching idle. The timeout handler sends this. 6250 * Note: sd_pm_entry can't be called here to do this 6251 * because sdpower may have been called as a result 6252 * of a call to pm_raise_power from within sd_pm_entry. 6253 * 6254 * If a timeout handler is already active then 6255 * don't install another. 6256 */ 6257 mutex_enter(&un->un_pm_mutex); 6258 if (un->un_pm_timeid == NULL) { 6259 un->un_pm_timeid = 6260 timeout(sd_pm_timeout_handler, 6261 un, intvlp * drv_usectohz(1000000)); 6262 mutex_exit(&un->un_pm_mutex); 6263 (void) pm_busy_component(SD_DEVINFO(un), 0); 6264 } else { 6265 mutex_exit(&un->un_pm_mutex); 6266 } 6267 if (got_semaphore_here != 0) { 6268 sema_v(&un->un_semoclose); 6269 } 6270 /* 6271 * On exit put the state back to it's original value 6272 * and broadcast to anyone waiting for the power 6273 * change completion. 6274 */ 6275 mutex_enter(SD_MUTEX(un)); 6276 un->un_state = state_before_pm; 6277 cv_broadcast(&un->un_suspend_cv); 6278 mutex_exit(SD_MUTEX(un)); 6279 6280 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, " 6281 "trans check Failed, not ok to power cycle.\n"); 6282 return (DDI_FAILURE); 6283 6284 case -1: 6285 if (got_semaphore_here != 0) { 6286 sema_v(&un->un_semoclose); 6287 } 6288 /* 6289 * On exit put the state back to it's original value 6290 * and broadcast to anyone waiting for the power 6291 * change completion. 6292 */ 6293 mutex_enter(SD_MUTEX(un)); 6294 un->un_state = state_before_pm; 6295 cv_broadcast(&un->un_suspend_cv); 6296 mutex_exit(SD_MUTEX(un)); 6297 SD_TRACE(SD_LOG_IO_PM, un, 6298 "sdpower: exit, trans check command Failed.\n"); 6299 return (DDI_FAILURE); 6300 } 6301 } 6302 6303 if (level == SD_SPINDLE_OFF) { 6304 /* 6305 * Save the last state... if the STOP FAILS we need it 6306 * for restoring 6307 */ 6308 mutex_enter(SD_MUTEX(un)); 6309 save_state = un->un_last_state; 6310 /* 6311 * There must not be any cmds. getting processed 6312 * in the driver when we get here. Power to the 6313 * device is potentially going off. 6314 */ 6315 ASSERT(un->un_ncmds_in_driver == 0); 6316 mutex_exit(SD_MUTEX(un)); 6317 6318 /* 6319 * For now suspend the device completely before spindle is 6320 * turned off 6321 */ 6322 if ((rval = sd_ddi_pm_suspend(un)) == DDI_FAILURE) { 6323 if (got_semaphore_here != 0) { 6324 sema_v(&un->un_semoclose); 6325 } 6326 /* 6327 * On exit put the state back to it's original value 6328 * and broadcast to anyone waiting for the power 6329 * change completion. 6330 */ 6331 mutex_enter(SD_MUTEX(un)); 6332 un->un_state = state_before_pm; 6333 cv_broadcast(&un->un_suspend_cv); 6334 mutex_exit(SD_MUTEX(un)); 6335 SD_TRACE(SD_LOG_IO_PM, un, 6336 "sdpower: exit, PM suspend Failed.\n"); 6337 return (DDI_FAILURE); 6338 } 6339 } 6340 6341 /* 6342 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open, 6343 * close, or strategy. Dump no long uses this routine, it uses it's 6344 * own code so it can be done in polled mode. 6345 */ 6346 6347 medium_present = TRUE; 6348 6349 /* 6350 * When powering up, issue a TUR in case the device is at unit 6351 * attention. Don't do retries. Bypass the PM layer, otherwise 6352 * a deadlock on un_pm_busy_cv will occur. 6353 */ 6354 if (level == SD_SPINDLE_ON) { 6355 (void) sd_send_scsi_TEST_UNIT_READY(un, 6356 SD_DONT_RETRY_TUR | SD_BYPASS_PM); 6357 } 6358 6359 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n", 6360 ((level == SD_SPINDLE_ON) ? "START" : "STOP")); 6361 6362 sval = sd_send_scsi_START_STOP_UNIT(un, 6363 ((level == SD_SPINDLE_ON) ? SD_TARGET_START : SD_TARGET_STOP), 6364 SD_PATH_DIRECT); 6365 /* Command failed, check for media present. */ 6366 if ((sval == ENXIO) && un->un_f_has_removable_media) { 6367 medium_present = FALSE; 6368 } 6369 6370 /* 6371 * The conditions of interest here are: 6372 * if a spindle off with media present fails, 6373 * then restore the state and return an error. 6374 * else if a spindle on fails, 6375 * then return an error (there's no state to restore). 6376 * In all other cases we setup for the new state 6377 * and return success. 6378 */ 6379 switch (level) { 6380 case SD_SPINDLE_OFF: 6381 if ((medium_present == TRUE) && (sval != 0)) { 6382 /* The stop command from above failed */ 6383 rval = DDI_FAILURE; 6384 /* 6385 * The stop command failed, and we have media 6386 * present. Put the level back by calling the 6387 * sd_pm_resume() and set the state back to 6388 * it's previous value. 6389 */ 6390 (void) sd_ddi_pm_resume(un); 6391 mutex_enter(SD_MUTEX(un)); 6392 un->un_last_state = save_state; 6393 mutex_exit(SD_MUTEX(un)); 6394 break; 6395 } 6396 /* 6397 * The stop command from above succeeded. 6398 */ 6399 if (un->un_f_monitor_media_state) { 6400 /* 6401 * Terminate watch thread in case of removable media 6402 * devices going into low power state. This is as per 6403 * the requirements of pm framework, otherwise commands 6404 * will be generated for the device (through watch 6405 * thread), even when the device is in low power state. 6406 */ 6407 mutex_enter(SD_MUTEX(un)); 6408 un->un_f_watcht_stopped = FALSE; 6409 if (un->un_swr_token != NULL) { 6410 opaque_t temp_token = un->un_swr_token; 6411 un->un_f_watcht_stopped = TRUE; 6412 un->un_swr_token = NULL; 6413 mutex_exit(SD_MUTEX(un)); 6414 (void) scsi_watch_request_terminate(temp_token, 6415 SCSI_WATCH_TERMINATE_ALL_WAIT); 6416 } else { 6417 mutex_exit(SD_MUTEX(un)); 6418 } 6419 } 6420 break; 6421 6422 default: /* The level requested is spindle on... */ 6423 /* 6424 * Legacy behavior: return success on a failed spinup 6425 * if there is no media in the drive. 6426 * Do this by looking at medium_present here. 6427 */ 6428 if ((sval != 0) && medium_present) { 6429 /* The start command from above failed */ 6430 rval = DDI_FAILURE; 6431 break; 6432 } 6433 /* 6434 * The start command from above succeeded 6435 * Resume the devices now that we have 6436 * started the disks 6437 */ 6438 (void) sd_ddi_pm_resume(un); 6439 6440 /* 6441 * Resume the watch thread since it was suspended 6442 * when the device went into low power mode. 6443 */ 6444 if (un->un_f_monitor_media_state) { 6445 mutex_enter(SD_MUTEX(un)); 6446 if (un->un_f_watcht_stopped == TRUE) { 6447 opaque_t temp_token; 6448 6449 un->un_f_watcht_stopped = FALSE; 6450 mutex_exit(SD_MUTEX(un)); 6451 temp_token = scsi_watch_request_submit( 6452 SD_SCSI_DEVP(un), 6453 sd_check_media_time, 6454 SENSE_LENGTH, sd_media_watch_cb, 6455 (caddr_t)dev); 6456 mutex_enter(SD_MUTEX(un)); 6457 un->un_swr_token = temp_token; 6458 } 6459 mutex_exit(SD_MUTEX(un)); 6460 } 6461 } 6462 if (got_semaphore_here != 0) { 6463 sema_v(&un->un_semoclose); 6464 } 6465 /* 6466 * On exit put the state back to it's original value 6467 * and broadcast to anyone waiting for the power 6468 * change completion. 6469 */ 6470 mutex_enter(SD_MUTEX(un)); 6471 un->un_state = state_before_pm; 6472 cv_broadcast(&un->un_suspend_cv); 6473 mutex_exit(SD_MUTEX(un)); 6474 6475 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval); 6476 6477 return (rval); 6478 } 6479 6480 6481 6482 /* 6483 * Function: sdattach 6484 * 6485 * Description: Driver's attach(9e) entry point function. 6486 * 6487 * Arguments: devi - opaque device info handle 6488 * cmd - attach type 6489 * 6490 * Return Code: DDI_SUCCESS 6491 * DDI_FAILURE 6492 * 6493 * Context: Kernel thread context 6494 */ 6495 6496 static int 6497 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 6498 { 6499 switch (cmd) { 6500 case DDI_ATTACH: 6501 return (sd_unit_attach(devi)); 6502 case DDI_RESUME: 6503 return (sd_ddi_resume(devi)); 6504 default: 6505 break; 6506 } 6507 return (DDI_FAILURE); 6508 } 6509 6510 6511 /* 6512 * Function: sddetach 6513 * 6514 * Description: Driver's detach(9E) entry point function. 6515 * 6516 * Arguments: devi - opaque device info handle 6517 * cmd - detach type 6518 * 6519 * Return Code: DDI_SUCCESS 6520 * DDI_FAILURE 6521 * 6522 * Context: Kernel thread context 6523 */ 6524 6525 static int 6526 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 6527 { 6528 switch (cmd) { 6529 case DDI_DETACH: 6530 return (sd_unit_detach(devi)); 6531 case DDI_SUSPEND: 6532 return (sd_ddi_suspend(devi)); 6533 default: 6534 break; 6535 } 6536 return (DDI_FAILURE); 6537 } 6538 6539 6540 /* 6541 * Function: sd_sync_with_callback 6542 * 6543 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft 6544 * state while the callback routine is active. 6545 * 6546 * Arguments: un: softstate structure for the instance 6547 * 6548 * Context: Kernel thread context 6549 */ 6550 6551 static void 6552 sd_sync_with_callback(struct sd_lun *un) 6553 { 6554 ASSERT(un != NULL); 6555 6556 mutex_enter(SD_MUTEX(un)); 6557 6558 ASSERT(un->un_in_callback >= 0); 6559 6560 while (un->un_in_callback > 0) { 6561 mutex_exit(SD_MUTEX(un)); 6562 delay(2); 6563 mutex_enter(SD_MUTEX(un)); 6564 } 6565 6566 mutex_exit(SD_MUTEX(un)); 6567 } 6568 6569 /* 6570 * Function: sd_unit_attach 6571 * 6572 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates 6573 * the soft state structure for the device and performs 6574 * all necessary structure and device initializations. 6575 * 6576 * Arguments: devi: the system's dev_info_t for the device. 6577 * 6578 * Return Code: DDI_SUCCESS if attach is successful. 6579 * DDI_FAILURE if any part of the attach fails. 6580 * 6581 * Context: Called at attach(9e) time for the DDI_ATTACH flag. 6582 * Kernel thread context only. Can sleep. 6583 */ 6584 6585 static int 6586 sd_unit_attach(dev_info_t *devi) 6587 { 6588 struct scsi_device *devp; 6589 struct sd_lun *un; 6590 char *variantp; 6591 int reservation_flag = SD_TARGET_IS_UNRESERVED; 6592 int instance; 6593 int rval; 6594 int wc_enabled; 6595 int tgt; 6596 uint64_t capacity; 6597 uint_t lbasize = 0; 6598 dev_info_t *pdip = ddi_get_parent(devi); 6599 int offbyone = 0; 6600 int geom_label_valid = 0; 6601 #if defined(__sparc) 6602 int max_xfer_size; 6603 #endif 6604 6605 /* 6606 * Retrieve the target driver's private data area. This was set 6607 * up by the HBA. 6608 */ 6609 devp = ddi_get_driver_private(devi); 6610 6611 /* 6612 * Retrieve the target ID of the device. 6613 */ 6614 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6615 SCSI_ADDR_PROP_TARGET, -1); 6616 6617 /* 6618 * Since we have no idea what state things were left in by the last 6619 * user of the device, set up some 'default' settings, ie. turn 'em 6620 * off. The scsi_ifsetcap calls force re-negotiations with the drive. 6621 * Do this before the scsi_probe, which sends an inquiry. 6622 * This is a fix for bug (4430280). 6623 * Of special importance is wide-xfer. The drive could have been left 6624 * in wide transfer mode by the last driver to communicate with it, 6625 * this includes us. If that's the case, and if the following is not 6626 * setup properly or we don't re-negotiate with the drive prior to 6627 * transferring data to/from the drive, it causes bus parity errors, 6628 * data overruns, and unexpected interrupts. This first occurred when 6629 * the fix for bug (4378686) was made. 6630 */ 6631 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1); 6632 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1); 6633 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1); 6634 6635 /* 6636 * Currently, scsi_ifsetcap sets tagged-qing capability for all LUNs 6637 * on a target. Setting it per lun instance actually sets the 6638 * capability of this target, which affects those luns already 6639 * attached on the same target. So during attach, we can only disable 6640 * this capability only when no other lun has been attached on this 6641 * target. By doing this, we assume a target has the same tagged-qing 6642 * capability for every lun. The condition can be removed when HBA 6643 * is changed to support per lun based tagged-qing capability. 6644 */ 6645 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 6646 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1); 6647 } 6648 6649 /* 6650 * Use scsi_probe() to issue an INQUIRY command to the device. 6651 * This call will allocate and fill in the scsi_inquiry structure 6652 * and point the sd_inq member of the scsi_device structure to it. 6653 * If the attach succeeds, then this memory will not be de-allocated 6654 * (via scsi_unprobe()) until the instance is detached. 6655 */ 6656 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 6657 goto probe_failed; 6658 } 6659 6660 /* 6661 * Check the device type as specified in the inquiry data and 6662 * claim it if it is of a type that we support. 6663 */ 6664 switch (devp->sd_inq->inq_dtype) { 6665 case DTYPE_DIRECT: 6666 break; 6667 case DTYPE_RODIRECT: 6668 break; 6669 case DTYPE_OPTICAL: 6670 break; 6671 case DTYPE_NOTPRESENT: 6672 default: 6673 /* Unsupported device type; fail the attach. */ 6674 goto probe_failed; 6675 } 6676 6677 /* 6678 * Allocate the soft state structure for this unit. 6679 * 6680 * We rely upon this memory being set to all zeroes by 6681 * ddi_soft_state_zalloc(). We assume that any member of the 6682 * soft state structure that is not explicitly initialized by 6683 * this routine will have a value of zero. 6684 */ 6685 instance = ddi_get_instance(devp->sd_dev); 6686 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) { 6687 goto probe_failed; 6688 } 6689 6690 /* 6691 * Retrieve a pointer to the newly-allocated soft state. 6692 * 6693 * This should NEVER fail if the ddi_soft_state_zalloc() call above 6694 * was successful, unless something has gone horribly wrong and the 6695 * ddi's soft state internals are corrupt (in which case it is 6696 * probably better to halt here than just fail the attach....) 6697 */ 6698 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 6699 panic("sd_unit_attach: NULL soft state on instance:0x%x", 6700 instance); 6701 /*NOTREACHED*/ 6702 } 6703 6704 /* 6705 * Link the back ptr of the driver soft state to the scsi_device 6706 * struct for this lun. 6707 * Save a pointer to the softstate in the driver-private area of 6708 * the scsi_device struct. 6709 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until 6710 * we first set un->un_sd below. 6711 */ 6712 un->un_sd = devp; 6713 devp->sd_private = (opaque_t)un; 6714 6715 /* 6716 * The following must be after devp is stored in the soft state struct. 6717 */ 6718 #ifdef SDDEBUG 6719 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6720 "%s_unit_attach: un:0x%p instance:%d\n", 6721 ddi_driver_name(devi), un, instance); 6722 #endif 6723 6724 /* 6725 * Set up the device type and node type (for the minor nodes). 6726 * By default we assume that the device can at least support the 6727 * Common Command Set. Call it a CD-ROM if it reports itself 6728 * as a RODIRECT device. 6729 */ 6730 switch (devp->sd_inq->inq_dtype) { 6731 case DTYPE_RODIRECT: 6732 un->un_node_type = DDI_NT_CD_CHAN; 6733 un->un_ctype = CTYPE_CDROM; 6734 break; 6735 case DTYPE_OPTICAL: 6736 un->un_node_type = DDI_NT_BLOCK_CHAN; 6737 un->un_ctype = CTYPE_ROD; 6738 break; 6739 default: 6740 un->un_node_type = DDI_NT_BLOCK_CHAN; 6741 un->un_ctype = CTYPE_CCS; 6742 break; 6743 } 6744 6745 /* 6746 * Try to read the interconnect type from the HBA. 6747 * 6748 * Note: This driver is currently compiled as two binaries, a parallel 6749 * scsi version (sd) and a fibre channel version (ssd). All functional 6750 * differences are determined at compile time. In the future a single 6751 * binary will be provided and the interconnect type will be used to 6752 * differentiate between fibre and parallel scsi behaviors. At that time 6753 * it will be necessary for all fibre channel HBAs to support this 6754 * property. 6755 * 6756 * set un_f_is_fiber to TRUE ( default fiber ) 6757 */ 6758 un->un_f_is_fibre = TRUE; 6759 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { 6760 case INTERCONNECT_SSA: 6761 un->un_interconnect_type = SD_INTERCONNECT_SSA; 6762 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6763 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un); 6764 break; 6765 case INTERCONNECT_PARALLEL: 6766 un->un_f_is_fibre = FALSE; 6767 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 6768 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6769 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); 6770 break; 6771 case INTERCONNECT_SATA: 6772 un->un_f_is_fibre = FALSE; 6773 un->un_interconnect_type = SD_INTERCONNECT_SATA; 6774 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6775 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un); 6776 break; 6777 case INTERCONNECT_FIBRE: 6778 un->un_interconnect_type = SD_INTERCONNECT_FIBRE; 6779 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6780 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); 6781 break; 6782 case INTERCONNECT_FABRIC: 6783 un->un_interconnect_type = SD_INTERCONNECT_FABRIC; 6784 un->un_node_type = DDI_NT_BLOCK_FABRIC; 6785 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6786 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); 6787 break; 6788 default: 6789 #ifdef SD_DEFAULT_INTERCONNECT_TYPE 6790 /* 6791 * The HBA does not support the "interconnect-type" property 6792 * (or did not provide a recognized type). 6793 * 6794 * Note: This will be obsoleted when a single fibre channel 6795 * and parallel scsi driver is delivered. In the meantime the 6796 * interconnect type will be set to the platform default.If that 6797 * type is not parallel SCSI, it means that we should be 6798 * assuming "ssd" semantics. However, here this also means that 6799 * the FC HBA is not supporting the "interconnect-type" property 6800 * like we expect it to, so log this occurrence. 6801 */ 6802 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE; 6803 if (!SD_IS_PARALLEL_SCSI(un)) { 6804 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6805 "sd_unit_attach: un:0x%p Assuming " 6806 "INTERCONNECT_FIBRE\n", un); 6807 } else { 6808 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6809 "sd_unit_attach: un:0x%p Assuming " 6810 "INTERCONNECT_PARALLEL\n", un); 6811 un->un_f_is_fibre = FALSE; 6812 } 6813 #else 6814 /* 6815 * Note: This source will be implemented when a single fibre 6816 * channel and parallel scsi driver is delivered. The default 6817 * will be to assume that if a device does not support the 6818 * "interconnect-type" property it is a parallel SCSI HBA and 6819 * we will set the interconnect type for parallel scsi. 6820 */ 6821 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 6822 un->un_f_is_fibre = FALSE; 6823 #endif 6824 break; 6825 } 6826 6827 if (un->un_f_is_fibre == TRUE) { 6828 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) == 6829 SCSI_VERSION_3) { 6830 switch (un->un_interconnect_type) { 6831 case SD_INTERCONNECT_FIBRE: 6832 case SD_INTERCONNECT_SSA: 6833 un->un_node_type = DDI_NT_BLOCK_WWN; 6834 break; 6835 default: 6836 break; 6837 } 6838 } 6839 } 6840 6841 /* 6842 * Initialize the Request Sense command for the target 6843 */ 6844 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { 6845 goto alloc_rqs_failed; 6846 } 6847 6848 /* 6849 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc 6850 * with separate binary for sd and ssd. 6851 * 6852 * x86 has 1 binary, un_retry_count is set base on connection type. 6853 * The hardcoded values will go away when Sparc uses 1 binary 6854 * for sd and ssd. This hardcoded values need to match 6855 * SD_RETRY_COUNT in sddef.h 6856 * The value used is base on interconnect type. 6857 * fibre = 3, parallel = 5 6858 */ 6859 #if defined(__i386) || defined(__amd64) 6860 un->un_retry_count = un->un_f_is_fibre ? 3 : 5; 6861 #else 6862 un->un_retry_count = SD_RETRY_COUNT; 6863 #endif 6864 6865 /* 6866 * Set the per disk retry count to the default number of retries 6867 * for disks and CDROMs. This value can be overridden by the 6868 * disk property list or an entry in sd.conf. 6869 */ 6870 un->un_notready_retry_count = 6871 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un) 6872 : DISK_NOT_READY_RETRY_COUNT(un); 6873 6874 /* 6875 * Set the busy retry count to the default value of un_retry_count. 6876 * This can be overridden by entries in sd.conf or the device 6877 * config table. 6878 */ 6879 un->un_busy_retry_count = un->un_retry_count; 6880 6881 /* 6882 * Init the reset threshold for retries. This number determines 6883 * how many retries must be performed before a reset can be issued 6884 * (for certain error conditions). This can be overridden by entries 6885 * in sd.conf or the device config table. 6886 */ 6887 un->un_reset_retry_count = (un->un_retry_count / 2); 6888 6889 /* 6890 * Set the victim_retry_count to the default un_retry_count 6891 */ 6892 un->un_victim_retry_count = (2 * un->un_retry_count); 6893 6894 /* 6895 * Set the reservation release timeout to the default value of 6896 * 5 seconds. This can be overridden by entries in ssd.conf or the 6897 * device config table. 6898 */ 6899 un->un_reserve_release_time = 5; 6900 6901 /* 6902 * Set up the default maximum transfer size. Note that this may 6903 * get updated later in the attach, when setting up default wide 6904 * operations for disks. 6905 */ 6906 #if defined(__i386) || defined(__amd64) 6907 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; 6908 un->un_partial_dma_supported = 1; 6909 #else 6910 un->un_max_xfer_size = (uint_t)maxphys; 6911 #endif 6912 6913 /* 6914 * Get "allow bus device reset" property (defaults to "enabled" if 6915 * the property was not defined). This is to disable bus resets for 6916 * certain kinds of error recovery. Note: In the future when a run-time 6917 * fibre check is available the soft state flag should default to 6918 * enabled. 6919 */ 6920 if (un->un_f_is_fibre == TRUE) { 6921 un->un_f_allow_bus_device_reset = TRUE; 6922 } else { 6923 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6924 "allow-bus-device-reset", 1) != 0) { 6925 un->un_f_allow_bus_device_reset = TRUE; 6926 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6927 "sd_unit_attach: un:0x%p Bus device reset " 6928 "enabled\n", un); 6929 } else { 6930 un->un_f_allow_bus_device_reset = FALSE; 6931 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6932 "sd_unit_attach: un:0x%p Bus device reset " 6933 "disabled\n", un); 6934 } 6935 } 6936 6937 /* 6938 * Check if this is an ATAPI device. ATAPI devices use Group 1 6939 * Read/Write commands and Group 2 Mode Sense/Select commands. 6940 * 6941 * Note: The "obsolete" way of doing this is to check for the "atapi" 6942 * property. The new "variant" property with a value of "atapi" has been 6943 * introduced so that future 'variants' of standard SCSI behavior (like 6944 * atapi) could be specified by the underlying HBA drivers by supplying 6945 * a new value for the "variant" property, instead of having to define a 6946 * new property. 6947 */ 6948 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { 6949 un->un_f_cfg_is_atapi = TRUE; 6950 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6951 "sd_unit_attach: un:0x%p Atapi device\n", un); 6952 } 6953 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", 6954 &variantp) == DDI_PROP_SUCCESS) { 6955 if (strcmp(variantp, "atapi") == 0) { 6956 un->un_f_cfg_is_atapi = TRUE; 6957 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6958 "sd_unit_attach: un:0x%p Atapi device\n", un); 6959 } 6960 ddi_prop_free(variantp); 6961 } 6962 6963 un->un_cmd_timeout = SD_IO_TIME; 6964 6965 un->un_busy_timeout = SD_BSY_TIMEOUT; 6966 6967 /* Info on current states, statuses, etc. (Updated frequently) */ 6968 un->un_state = SD_STATE_NORMAL; 6969 un->un_last_state = SD_STATE_NORMAL; 6970 6971 /* Control & status info for command throttling */ 6972 un->un_throttle = sd_max_throttle; 6973 un->un_saved_throttle = sd_max_throttle; 6974 un->un_min_throttle = sd_min_throttle; 6975 6976 if (un->un_f_is_fibre == TRUE) { 6977 un->un_f_use_adaptive_throttle = TRUE; 6978 } else { 6979 un->un_f_use_adaptive_throttle = FALSE; 6980 } 6981 6982 /* Removable media support. */ 6983 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 6984 un->un_mediastate = DKIO_NONE; 6985 un->un_specified_mediastate = DKIO_NONE; 6986 6987 /* CVs for suspend/resume (PM or DR) */ 6988 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 6989 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 6990 6991 /* Power management support. */ 6992 un->un_power_level = SD_SPINDLE_UNINIT; 6993 6994 cv_init(&un->un_wcc_cv, NULL, CV_DRIVER, NULL); 6995 un->un_f_wcc_inprog = 0; 6996 6997 /* 6998 * The open/close semaphore is used to serialize threads executing 6999 * in the driver's open & close entry point routines for a given 7000 * instance. 7001 */ 7002 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); 7003 7004 /* 7005 * The conf file entry and softstate variable is a forceful override, 7006 * meaning a non-zero value must be entered to change the default. 7007 */ 7008 un->un_f_disksort_disabled = FALSE; 7009 7010 /* 7011 * Retrieve the properties from the static driver table or the driver 7012 * configuration file (.conf) for this unit and update the soft state 7013 * for the device as needed for the indicated properties. 7014 * Note: the property configuration needs to occur here as some of the 7015 * following routines may have dependencies on soft state flags set 7016 * as part of the driver property configuration. 7017 */ 7018 sd_read_unit_properties(un); 7019 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7020 "sd_unit_attach: un:0x%p property configuration complete.\n", un); 7021 7022 /* 7023 * Only if a device has "hotpluggable" property, it is 7024 * treated as hotpluggable device. Otherwise, it is 7025 * regarded as non-hotpluggable one. 7026 */ 7027 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "hotpluggable", 7028 -1) != -1) { 7029 un->un_f_is_hotpluggable = TRUE; 7030 } 7031 7032 /* 7033 * set unit's attributes(flags) according to "hotpluggable" and 7034 * RMB bit in INQUIRY data. 7035 */ 7036 sd_set_unit_attributes(un, devi); 7037 7038 /* 7039 * By default, we mark the capacity, lbasize, and geometry 7040 * as invalid. Only if we successfully read a valid capacity 7041 * will we update the un_blockcount and un_tgt_blocksize with the 7042 * valid values (the geometry will be validated later). 7043 */ 7044 un->un_f_blockcount_is_valid = FALSE; 7045 un->un_f_tgt_blocksize_is_valid = FALSE; 7046 7047 /* 7048 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine 7049 * otherwise. 7050 */ 7051 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE; 7052 un->un_blockcount = 0; 7053 7054 /* 7055 * Set up the per-instance info needed to determine the correct 7056 * CDBs and other info for issuing commands to the target. 7057 */ 7058 sd_init_cdb_limits(un); 7059 7060 /* 7061 * Set up the IO chains to use, based upon the target type. 7062 */ 7063 if (un->un_f_non_devbsize_supported) { 7064 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 7065 } else { 7066 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 7067 } 7068 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 7069 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD; 7070 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD; 7071 7072 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf), 7073 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit, 7074 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER); 7075 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi); 7076 7077 7078 if (ISCD(un)) { 7079 un->un_additional_codes = sd_additional_codes; 7080 } else { 7081 un->un_additional_codes = NULL; 7082 } 7083 7084 /* 7085 * Create the kstats here so they can be available for attach-time 7086 * routines that send commands to the unit (either polled or via 7087 * sd_send_scsi_cmd). 7088 * 7089 * Note: This is a critical sequence that needs to be maintained: 7090 * 1) Instantiate the kstats here, before any routines using the 7091 * iopath (i.e. sd_send_scsi_cmd). 7092 * 2) Instantiate and initialize the partition stats 7093 * (sd_set_pstats). 7094 * 3) Initialize the error stats (sd_set_errstats), following 7095 * sd_validate_geometry(),sd_register_devid(), 7096 * and sd_cache_control(). 7097 */ 7098 7099 un->un_stats = kstat_create(sd_label, instance, 7100 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 7101 if (un->un_stats != NULL) { 7102 un->un_stats->ks_lock = SD_MUTEX(un); 7103 kstat_install(un->un_stats); 7104 } 7105 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7106 "sd_unit_attach: un:0x%p un_stats created\n", un); 7107 7108 sd_create_errstats(un, instance); 7109 if (un->un_errstats == NULL) { 7110 goto create_errstats_failed; 7111 } 7112 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7113 "sd_unit_attach: un:0x%p errstats created\n", un); 7114 7115 /* 7116 * The following if/else code was relocated here from below as part 7117 * of the fix for bug (4430280). However with the default setup added 7118 * on entry to this routine, it's no longer absolutely necessary for 7119 * this to be before the call to sd_spin_up_unit. 7120 */ 7121 if (SD_IS_PARALLEL_SCSI(un) || SD_IS_SERIAL(un)) { 7122 int tq_trigger_flag = (((devp->sd_inq->inq_ansi == 4) || 7123 (devp->sd_inq->inq_ansi == 5)) && 7124 devp->sd_inq->inq_bque) || devp->sd_inq->inq_cmdque; 7125 7126 /* 7127 * If tagged queueing is supported by the target 7128 * and by the host adapter then we will enable it 7129 */ 7130 un->un_tagflags = 0; 7131 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && tq_trigger_flag && 7132 (un->un_f_arq_enabled == TRUE)) { 7133 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 7134 1, 1) == 1) { 7135 un->un_tagflags = FLAG_STAG; 7136 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7137 "sd_unit_attach: un:0x%p tag queueing " 7138 "enabled\n", un); 7139 } else if (scsi_ifgetcap(SD_ADDRESS(un), 7140 "untagged-qing", 0) == 1) { 7141 un->un_f_opt_queueing = TRUE; 7142 un->un_saved_throttle = un->un_throttle = 7143 min(un->un_throttle, 3); 7144 } else { 7145 un->un_f_opt_queueing = FALSE; 7146 un->un_saved_throttle = un->un_throttle = 1; 7147 } 7148 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) 7149 == 1) && (un->un_f_arq_enabled == TRUE)) { 7150 /* The Host Adapter supports internal queueing. */ 7151 un->un_f_opt_queueing = TRUE; 7152 un->un_saved_throttle = un->un_throttle = 7153 min(un->un_throttle, 3); 7154 } else { 7155 un->un_f_opt_queueing = FALSE; 7156 un->un_saved_throttle = un->un_throttle = 1; 7157 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7158 "sd_unit_attach: un:0x%p no tag queueing\n", un); 7159 } 7160 7161 /* 7162 * Enable large transfers for SATA/SAS drives 7163 */ 7164 if (SD_IS_SERIAL(un)) { 7165 un->un_max_xfer_size = 7166 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7167 sd_max_xfer_size, SD_MAX_XFER_SIZE); 7168 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7169 "sd_unit_attach: un:0x%p max transfer " 7170 "size=0x%x\n", un, un->un_max_xfer_size); 7171 7172 } 7173 7174 /* Setup or tear down default wide operations for disks */ 7175 7176 /* 7177 * Note: Legacy: it may be possible for both "sd_max_xfer_size" 7178 * and "ssd_max_xfer_size" to exist simultaneously on the same 7179 * system and be set to different values. In the future this 7180 * code may need to be updated when the ssd module is 7181 * obsoleted and removed from the system. (4299588) 7182 */ 7183 if (SD_IS_PARALLEL_SCSI(un) && 7184 (devp->sd_inq->inq_rdf == RDF_SCSI2) && 7185 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { 7186 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7187 1, 1) == 1) { 7188 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7189 "sd_unit_attach: un:0x%p Wide Transfer " 7190 "enabled\n", un); 7191 } 7192 7193 /* 7194 * If tagged queuing has also been enabled, then 7195 * enable large xfers 7196 */ 7197 if (un->un_saved_throttle == sd_max_throttle) { 7198 un->un_max_xfer_size = 7199 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7200 sd_max_xfer_size, SD_MAX_XFER_SIZE); 7201 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7202 "sd_unit_attach: un:0x%p max transfer " 7203 "size=0x%x\n", un, un->un_max_xfer_size); 7204 } 7205 } else { 7206 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7207 0, 1) == 1) { 7208 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7209 "sd_unit_attach: un:0x%p " 7210 "Wide Transfer disabled\n", un); 7211 } 7212 } 7213 } else { 7214 un->un_tagflags = FLAG_STAG; 7215 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, 7216 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE); 7217 } 7218 7219 /* 7220 * If this target supports LUN reset, try to enable it. 7221 */ 7222 if (un->un_f_lun_reset_enabled) { 7223 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { 7224 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7225 "un:0x%p lun_reset capability set\n", un); 7226 } else { 7227 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7228 "un:0x%p lun-reset capability not set\n", un); 7229 } 7230 } 7231 7232 /* 7233 * Adjust the maximum transfer size. This is to fix 7234 * the problem of partial DMA support on SPARC. Some 7235 * HBA driver, like aac, has very small dma_attr_maxxfer 7236 * size, which requires partial DMA support on SPARC. 7237 * In the future the SPARC pci nexus driver may solve 7238 * the problem instead of this fix. 7239 */ 7240 #if defined(__sparc) 7241 max_xfer_size = scsi_ifgetcap(SD_ADDRESS(un), "dma-max", 1); 7242 if ((max_xfer_size > 0) && (max_xfer_size < un->un_max_xfer_size)) { 7243 un->un_max_xfer_size = max_xfer_size; 7244 un->un_partial_dma_supported = 1; 7245 } 7246 #endif 7247 7248 /* 7249 * Set PKT_DMA_PARTIAL flag. 7250 */ 7251 if (un->un_partial_dma_supported == 1) { 7252 un->un_pkt_flags = PKT_DMA_PARTIAL; 7253 } else { 7254 un->un_pkt_flags = 0; 7255 } 7256 7257 /* 7258 * At this point in the attach, we have enough info in the 7259 * soft state to be able to issue commands to the target. 7260 * 7261 * All command paths used below MUST issue their commands as 7262 * SD_PATH_DIRECT. This is important as intermediate layers 7263 * are not all initialized yet (such as PM). 7264 */ 7265 7266 /* 7267 * Send a TEST UNIT READY command to the device. This should clear 7268 * any outstanding UNIT ATTENTION that may be present. 7269 * 7270 * Note: Don't check for success, just track if there is a reservation, 7271 * this is a throw away command to clear any unit attentions. 7272 * 7273 * Note: This MUST be the first command issued to the target during 7274 * attach to ensure power on UNIT ATTENTIONS are cleared. 7275 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated 7276 * with attempts at spinning up a device with no media. 7277 */ 7278 if (sd_send_scsi_TEST_UNIT_READY(un, SD_DONT_RETRY_TUR) == EACCES) { 7279 reservation_flag = SD_TARGET_IS_RESERVED; 7280 } 7281 7282 /* 7283 * If the device is NOT a removable media device, attempt to spin 7284 * it up (using the START_STOP_UNIT command) and read its capacity 7285 * (using the READ CAPACITY command). Note, however, that either 7286 * of these could fail and in some cases we would continue with 7287 * the attach despite the failure (see below). 7288 */ 7289 if (un->un_f_descr_format_supported) { 7290 switch (sd_spin_up_unit(un)) { 7291 case 0: 7292 /* 7293 * Spin-up was successful; now try to read the 7294 * capacity. If successful then save the results 7295 * and mark the capacity & lbasize as valid. 7296 */ 7297 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7298 "sd_unit_attach: un:0x%p spin-up successful\n", un); 7299 7300 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, 7301 &lbasize, SD_PATH_DIRECT)) { 7302 case 0: { 7303 if (capacity > DK_MAX_BLOCKS) { 7304 #ifdef _LP64 7305 if (capacity + 1 > 7306 SD_GROUP1_MAX_ADDRESS) { 7307 /* 7308 * Enable descriptor format 7309 * sense data so that we can 7310 * get 64 bit sense data 7311 * fields. 7312 */ 7313 sd_enable_descr_sense(un); 7314 } 7315 #else 7316 /* 32-bit kernels can't handle this */ 7317 scsi_log(SD_DEVINFO(un), 7318 sd_label, CE_WARN, 7319 "disk has %llu blocks, which " 7320 "is too large for a 32-bit " 7321 "kernel", capacity); 7322 7323 #if defined(__i386) || defined(__amd64) 7324 /* 7325 * 1TB disk was treated as (1T - 512)B 7326 * in the past, so that it might have 7327 * valid VTOC and solaris partitions, 7328 * we have to allow it to continue to 7329 * work. 7330 */ 7331 if (capacity -1 > DK_MAX_BLOCKS) 7332 #endif 7333 goto spinup_failed; 7334 #endif 7335 } 7336 7337 /* 7338 * Here it's not necessary to check the case: 7339 * the capacity of the device is bigger than 7340 * what the max hba cdb can support. Because 7341 * sd_send_scsi_READ_CAPACITY will retrieve 7342 * the capacity by sending USCSI command, which 7343 * is constrained by the max hba cdb. Actually, 7344 * sd_send_scsi_READ_CAPACITY will return 7345 * EINVAL when using bigger cdb than required 7346 * cdb length. Will handle this case in 7347 * "case EINVAL". 7348 */ 7349 7350 /* 7351 * The following relies on 7352 * sd_send_scsi_READ_CAPACITY never 7353 * returning 0 for capacity and/or lbasize. 7354 */ 7355 sd_update_block_info(un, lbasize, capacity); 7356 7357 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7358 "sd_unit_attach: un:0x%p capacity = %ld " 7359 "blocks; lbasize= %ld.\n", un, 7360 un->un_blockcount, un->un_tgt_blocksize); 7361 7362 break; 7363 } 7364 case EINVAL: 7365 /* 7366 * In the case where the max-cdb-length property 7367 * is smaller than the required CDB length for 7368 * a SCSI device, a target driver can fail to 7369 * attach to that device. 7370 */ 7371 scsi_log(SD_DEVINFO(un), 7372 sd_label, CE_WARN, 7373 "disk capacity is too large " 7374 "for current cdb length"); 7375 goto spinup_failed; 7376 case EACCES: 7377 /* 7378 * Should never get here if the spin-up 7379 * succeeded, but code it in anyway. 7380 * From here, just continue with the attach... 7381 */ 7382 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7383 "sd_unit_attach: un:0x%p " 7384 "sd_send_scsi_READ_CAPACITY " 7385 "returned reservation conflict\n", un); 7386 reservation_flag = SD_TARGET_IS_RESERVED; 7387 break; 7388 default: 7389 /* 7390 * Likewise, should never get here if the 7391 * spin-up succeeded. Just continue with 7392 * the attach... 7393 */ 7394 break; 7395 } 7396 break; 7397 case EACCES: 7398 /* 7399 * Device is reserved by another host. In this case 7400 * we could not spin it up or read the capacity, but 7401 * we continue with the attach anyway. 7402 */ 7403 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7404 "sd_unit_attach: un:0x%p spin-up reservation " 7405 "conflict.\n", un); 7406 reservation_flag = SD_TARGET_IS_RESERVED; 7407 break; 7408 default: 7409 /* Fail the attach if the spin-up failed. */ 7410 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7411 "sd_unit_attach: un:0x%p spin-up failed.", un); 7412 goto spinup_failed; 7413 } 7414 } 7415 7416 /* 7417 * Check to see if this is a MMC drive 7418 */ 7419 if (ISCD(un)) { 7420 sd_set_mmc_caps(un); 7421 } 7422 7423 7424 /* 7425 * Add a zero-length attribute to tell the world we support 7426 * kernel ioctls (for layered drivers) 7427 */ 7428 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7429 DDI_KERNEL_IOCTL, NULL, 0); 7430 7431 /* 7432 * Add a boolean property to tell the world we support 7433 * the B_FAILFAST flag (for layered drivers) 7434 */ 7435 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7436 "ddi-failfast-supported", NULL, 0); 7437 7438 /* 7439 * Initialize power management 7440 */ 7441 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL); 7442 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL); 7443 sd_setup_pm(un, devi); 7444 if (un->un_f_pm_is_enabled == FALSE) { 7445 /* 7446 * For performance, point to a jump table that does 7447 * not include pm. 7448 * The direct and priority chains don't change with PM. 7449 * 7450 * Note: this is currently done based on individual device 7451 * capabilities. When an interface for determining system 7452 * power enabled state becomes available, or when additional 7453 * layers are added to the command chain, these values will 7454 * have to be re-evaluated for correctness. 7455 */ 7456 if (un->un_f_non_devbsize_supported) { 7457 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM; 7458 } else { 7459 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM; 7460 } 7461 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 7462 } 7463 7464 /* 7465 * This property is set to 0 by HA software to avoid retries 7466 * on a reserved disk. (The preferred property name is 7467 * "retry-on-reservation-conflict") (1189689) 7468 * 7469 * Note: The use of a global here can have unintended consequences. A 7470 * per instance variable is preferable to match the capabilities of 7471 * different underlying hba's (4402600) 7472 */ 7473 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, 7474 DDI_PROP_DONTPASS, "retry-on-reservation-conflict", 7475 sd_retry_on_reservation_conflict); 7476 if (sd_retry_on_reservation_conflict != 0) { 7477 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, 7478 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name, 7479 sd_retry_on_reservation_conflict); 7480 } 7481 7482 /* Set up options for QFULL handling. */ 7483 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7484 "qfull-retries", -1)) != -1) { 7485 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries", 7486 rval, 1); 7487 } 7488 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7489 "qfull-retry-interval", -1)) != -1) { 7490 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval", 7491 rval, 1); 7492 } 7493 7494 /* 7495 * This just prints a message that announces the existence of the 7496 * device. The message is always printed in the system logfile, but 7497 * only appears on the console if the system is booted with the 7498 * -v (verbose) argument. 7499 */ 7500 ddi_report_dev(devi); 7501 7502 un->un_mediastate = DKIO_NONE; 7503 7504 cmlb_alloc_handle(&un->un_cmlbhandle); 7505 7506 #if defined(__i386) || defined(__amd64) 7507 /* 7508 * On x86, compensate for off-by-1 legacy error 7509 */ 7510 if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable && 7511 (lbasize == un->un_sys_blocksize)) 7512 offbyone = CMLB_OFF_BY_ONE; 7513 #endif 7514 7515 if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype, 7516 un->un_f_has_removable_media, un->un_f_is_hotpluggable, 7517 un->un_node_type, offbyone, un->un_cmlbhandle, 7518 (void *)SD_PATH_DIRECT) != 0) { 7519 goto cmlb_attach_failed; 7520 } 7521 7522 7523 /* 7524 * Read and validate the device's geometry (ie, disk label) 7525 * A new unformatted drive will not have a valid geometry, but 7526 * the driver needs to successfully attach to this device so 7527 * the drive can be formatted via ioctls. 7528 */ 7529 geom_label_valid = (cmlb_validate(un->un_cmlbhandle, 0, 7530 (void *)SD_PATH_DIRECT) == 0) ? 1: 0; 7531 7532 mutex_enter(SD_MUTEX(un)); 7533 7534 /* 7535 * Read and initialize the devid for the unit. 7536 */ 7537 if (un->un_f_devid_supported) { 7538 sd_register_devid(un, devi, reservation_flag); 7539 } 7540 mutex_exit(SD_MUTEX(un)); 7541 7542 #if (defined(__fibre)) 7543 /* 7544 * Register callbacks for fibre only. You can't do this solely 7545 * on the basis of the devid_type because this is hba specific. 7546 * We need to query our hba capabilities to find out whether to 7547 * register or not. 7548 */ 7549 if (un->un_f_is_fibre) { 7550 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 7551 sd_init_event_callbacks(un); 7552 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7553 "sd_unit_attach: un:0x%p event callbacks inserted", 7554 un); 7555 } 7556 } 7557 #endif 7558 7559 if (un->un_f_opt_disable_cache == TRUE) { 7560 /* 7561 * Disable both read cache and write cache. This is 7562 * the historic behavior of the keywords in the config file. 7563 */ 7564 if (sd_cache_control(un, SD_CACHE_DISABLE, SD_CACHE_DISABLE) != 7565 0) { 7566 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7567 "sd_unit_attach: un:0x%p Could not disable " 7568 "caching", un); 7569 goto devid_failed; 7570 } 7571 } 7572 7573 /* 7574 * Check the value of the WCE bit now and 7575 * set un_f_write_cache_enabled accordingly. 7576 */ 7577 (void) sd_get_write_cache_enabled(un, &wc_enabled); 7578 mutex_enter(SD_MUTEX(un)); 7579 un->un_f_write_cache_enabled = (wc_enabled != 0); 7580 mutex_exit(SD_MUTEX(un)); 7581 7582 /* 7583 * Check the value of the NV_SUP bit and set 7584 * un_f_suppress_cache_flush accordingly. 7585 */ 7586 sd_get_nv_sup(un); 7587 7588 /* 7589 * Find out what type of reservation this disk supports. 7590 */ 7591 switch (sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 0, NULL)) { 7592 case 0: 7593 /* 7594 * SCSI-3 reservations are supported. 7595 */ 7596 un->un_reservation_type = SD_SCSI3_RESERVATION; 7597 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7598 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un); 7599 break; 7600 case ENOTSUP: 7601 /* 7602 * The PERSISTENT RESERVE IN command would not be recognized by 7603 * a SCSI-2 device, so assume the reservation type is SCSI-2. 7604 */ 7605 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7606 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un); 7607 un->un_reservation_type = SD_SCSI2_RESERVATION; 7608 break; 7609 default: 7610 /* 7611 * default to SCSI-3 reservations 7612 */ 7613 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7614 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un); 7615 un->un_reservation_type = SD_SCSI3_RESERVATION; 7616 break; 7617 } 7618 7619 /* 7620 * Set the pstat and error stat values here, so data obtained during the 7621 * previous attach-time routines is available. 7622 * 7623 * Note: This is a critical sequence that needs to be maintained: 7624 * 1) Instantiate the kstats before any routines using the iopath 7625 * (i.e. sd_send_scsi_cmd). 7626 * 2) Initialize the error stats (sd_set_errstats) and partition 7627 * stats (sd_set_pstats)here, following 7628 * cmlb_validate_geometry(), sd_register_devid(), and 7629 * sd_cache_control(). 7630 */ 7631 7632 if (un->un_f_pkstats_enabled && geom_label_valid) { 7633 sd_set_pstats(un); 7634 SD_TRACE(SD_LOG_IO_PARTITION, un, 7635 "sd_unit_attach: un:0x%p pstats created and set\n", un); 7636 } 7637 7638 sd_set_errstats(un); 7639 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7640 "sd_unit_attach: un:0x%p errstats set\n", un); 7641 7642 7643 /* 7644 * After successfully attaching an instance, we record the information 7645 * of how many luns have been attached on the relative target and 7646 * controller for parallel SCSI. This information is used when sd tries 7647 * to set the tagged queuing capability in HBA. 7648 */ 7649 if (SD_IS_PARALLEL_SCSI(un) && (tgt >= 0) && (tgt < NTARGETS_WIDE)) { 7650 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_ATTACH); 7651 } 7652 7653 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7654 "sd_unit_attach: un:0x%p exit success\n", un); 7655 7656 return (DDI_SUCCESS); 7657 7658 /* 7659 * An error occurred during the attach; clean up & return failure. 7660 */ 7661 7662 devid_failed: 7663 7664 setup_pm_failed: 7665 ddi_remove_minor_node(devi, NULL); 7666 7667 cmlb_attach_failed: 7668 /* 7669 * Cleanup from the scsi_ifsetcap() calls (437868) 7670 */ 7671 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 7672 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 7673 7674 /* 7675 * Refer to the comments of setting tagged-qing in the beginning of 7676 * sd_unit_attach. We can only disable tagged queuing when there is 7677 * no lun attached on the target. 7678 */ 7679 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 7680 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 7681 } 7682 7683 if (un->un_f_is_fibre == FALSE) { 7684 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 7685 } 7686 7687 spinup_failed: 7688 7689 mutex_enter(SD_MUTEX(un)); 7690 7691 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ 7692 if (un->un_direct_priority_timeid != NULL) { 7693 timeout_id_t temp_id = un->un_direct_priority_timeid; 7694 un->un_direct_priority_timeid = NULL; 7695 mutex_exit(SD_MUTEX(un)); 7696 (void) untimeout(temp_id); 7697 mutex_enter(SD_MUTEX(un)); 7698 } 7699 7700 /* Cancel any pending start/stop timeouts */ 7701 if (un->un_startstop_timeid != NULL) { 7702 timeout_id_t temp_id = un->un_startstop_timeid; 7703 un->un_startstop_timeid = NULL; 7704 mutex_exit(SD_MUTEX(un)); 7705 (void) untimeout(temp_id); 7706 mutex_enter(SD_MUTEX(un)); 7707 } 7708 7709 /* Cancel any pending reset-throttle timeouts */ 7710 if (un->un_reset_throttle_timeid != NULL) { 7711 timeout_id_t temp_id = un->un_reset_throttle_timeid; 7712 un->un_reset_throttle_timeid = NULL; 7713 mutex_exit(SD_MUTEX(un)); 7714 (void) untimeout(temp_id); 7715 mutex_enter(SD_MUTEX(un)); 7716 } 7717 7718 /* Cancel any pending retry timeouts */ 7719 if (un->un_retry_timeid != NULL) { 7720 timeout_id_t temp_id = un->un_retry_timeid; 7721 un->un_retry_timeid = NULL; 7722 mutex_exit(SD_MUTEX(un)); 7723 (void) untimeout(temp_id); 7724 mutex_enter(SD_MUTEX(un)); 7725 } 7726 7727 /* Cancel any pending delayed cv broadcast timeouts */ 7728 if (un->un_dcvb_timeid != NULL) { 7729 timeout_id_t temp_id = un->un_dcvb_timeid; 7730 un->un_dcvb_timeid = NULL; 7731 mutex_exit(SD_MUTEX(un)); 7732 (void) untimeout(temp_id); 7733 mutex_enter(SD_MUTEX(un)); 7734 } 7735 7736 mutex_exit(SD_MUTEX(un)); 7737 7738 /* There should not be any in-progress I/O so ASSERT this check */ 7739 ASSERT(un->un_ncmds_in_transport == 0); 7740 ASSERT(un->un_ncmds_in_driver == 0); 7741 7742 /* Do not free the softstate if the callback routine is active */ 7743 sd_sync_with_callback(un); 7744 7745 /* 7746 * Partition stats apparently are not used with removables. These would 7747 * not have been created during attach, so no need to clean them up... 7748 */ 7749 if (un->un_errstats != NULL) { 7750 kstat_delete(un->un_errstats); 7751 un->un_errstats = NULL; 7752 } 7753 7754 create_errstats_failed: 7755 7756 if (un->un_stats != NULL) { 7757 kstat_delete(un->un_stats); 7758 un->un_stats = NULL; 7759 } 7760 7761 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 7762 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 7763 7764 ddi_prop_remove_all(devi); 7765 sema_destroy(&un->un_semoclose); 7766 cv_destroy(&un->un_state_cv); 7767 7768 getrbuf_failed: 7769 7770 sd_free_rqs(un); 7771 7772 alloc_rqs_failed: 7773 7774 devp->sd_private = NULL; 7775 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ 7776 7777 get_softstate_failed: 7778 /* 7779 * Note: the man pages are unclear as to whether or not doing a 7780 * ddi_soft_state_free(sd_state, instance) is the right way to 7781 * clean up after the ddi_soft_state_zalloc() if the subsequent 7782 * ddi_get_soft_state() fails. The implication seems to be 7783 * that the get_soft_state cannot fail if the zalloc succeeds. 7784 */ 7785 ddi_soft_state_free(sd_state, instance); 7786 7787 probe_failed: 7788 scsi_unprobe(devp); 7789 7790 return (DDI_FAILURE); 7791 } 7792 7793 7794 /* 7795 * Function: sd_unit_detach 7796 * 7797 * Description: Performs DDI_DETACH processing for sddetach(). 7798 * 7799 * Return Code: DDI_SUCCESS 7800 * DDI_FAILURE 7801 * 7802 * Context: Kernel thread context 7803 */ 7804 7805 static int 7806 sd_unit_detach(dev_info_t *devi) 7807 { 7808 struct scsi_device *devp; 7809 struct sd_lun *un; 7810 int i; 7811 int tgt; 7812 dev_t dev; 7813 dev_info_t *pdip = ddi_get_parent(devi); 7814 int instance = ddi_get_instance(devi); 7815 7816 mutex_enter(&sd_detach_mutex); 7817 7818 /* 7819 * Fail the detach for any of the following: 7820 * - Unable to get the sd_lun struct for the instance 7821 * - A layered driver has an outstanding open on the instance 7822 * - Another thread is already detaching this instance 7823 * - Another thread is currently performing an open 7824 */ 7825 devp = ddi_get_driver_private(devi); 7826 if ((devp == NULL) || 7827 ((un = (struct sd_lun *)devp->sd_private) == NULL) || 7828 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) || 7829 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) { 7830 mutex_exit(&sd_detach_mutex); 7831 return (DDI_FAILURE); 7832 } 7833 7834 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un); 7835 7836 /* 7837 * Mark this instance as currently in a detach, to inhibit any 7838 * opens from a layered driver. 7839 */ 7840 un->un_detach_count++; 7841 mutex_exit(&sd_detach_mutex); 7842 7843 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7844 SCSI_ADDR_PROP_TARGET, -1); 7845 7846 dev = sd_make_device(SD_DEVINFO(un)); 7847 7848 #ifndef lint 7849 _NOTE(COMPETING_THREADS_NOW); 7850 #endif 7851 7852 mutex_enter(SD_MUTEX(un)); 7853 7854 /* 7855 * Fail the detach if there are any outstanding layered 7856 * opens on this device. 7857 */ 7858 for (i = 0; i < NDKMAP; i++) { 7859 if (un->un_ocmap.lyropen[i] != 0) { 7860 goto err_notclosed; 7861 } 7862 } 7863 7864 /* 7865 * Verify there are NO outstanding commands issued to this device. 7866 * ie, un_ncmds_in_transport == 0. 7867 * It's possible to have outstanding commands through the physio 7868 * code path, even though everything's closed. 7869 */ 7870 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) || 7871 (un->un_direct_priority_timeid != NULL) || 7872 (un->un_state == SD_STATE_RWAIT)) { 7873 mutex_exit(SD_MUTEX(un)); 7874 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7875 "sd_dr_detach: Detach failure due to outstanding cmds\n"); 7876 goto err_stillbusy; 7877 } 7878 7879 /* 7880 * If we have the device reserved, release the reservation. 7881 */ 7882 if ((un->un_resvd_status & SD_RESERVE) && 7883 !(un->un_resvd_status & SD_LOST_RESERVE)) { 7884 mutex_exit(SD_MUTEX(un)); 7885 /* 7886 * Note: sd_reserve_release sends a command to the device 7887 * via the sd_ioctlcmd() path, and can sleep. 7888 */ 7889 if (sd_reserve_release(dev, SD_RELEASE) != 0) { 7890 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7891 "sd_dr_detach: Cannot release reservation \n"); 7892 } 7893 } else { 7894 mutex_exit(SD_MUTEX(un)); 7895 } 7896 7897 /* 7898 * Untimeout any reserve recover, throttle reset, restart unit 7899 * and delayed broadcast timeout threads. Protect the timeout pointer 7900 * from getting nulled by their callback functions. 7901 */ 7902 mutex_enter(SD_MUTEX(un)); 7903 if (un->un_resvd_timeid != NULL) { 7904 timeout_id_t temp_id = un->un_resvd_timeid; 7905 un->un_resvd_timeid = NULL; 7906 mutex_exit(SD_MUTEX(un)); 7907 (void) untimeout(temp_id); 7908 mutex_enter(SD_MUTEX(un)); 7909 } 7910 7911 if (un->un_reset_throttle_timeid != NULL) { 7912 timeout_id_t temp_id = un->un_reset_throttle_timeid; 7913 un->un_reset_throttle_timeid = NULL; 7914 mutex_exit(SD_MUTEX(un)); 7915 (void) untimeout(temp_id); 7916 mutex_enter(SD_MUTEX(un)); 7917 } 7918 7919 if (un->un_startstop_timeid != NULL) { 7920 timeout_id_t temp_id = un->un_startstop_timeid; 7921 un->un_startstop_timeid = NULL; 7922 mutex_exit(SD_MUTEX(un)); 7923 (void) untimeout(temp_id); 7924 mutex_enter(SD_MUTEX(un)); 7925 } 7926 7927 if (un->un_dcvb_timeid != NULL) { 7928 timeout_id_t temp_id = un->un_dcvb_timeid; 7929 un->un_dcvb_timeid = NULL; 7930 mutex_exit(SD_MUTEX(un)); 7931 (void) untimeout(temp_id); 7932 } else { 7933 mutex_exit(SD_MUTEX(un)); 7934 } 7935 7936 /* Remove any pending reservation reclaim requests for this device */ 7937 sd_rmv_resv_reclaim_req(dev); 7938 7939 mutex_enter(SD_MUTEX(un)); 7940 7941 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */ 7942 if (un->un_direct_priority_timeid != NULL) { 7943 timeout_id_t temp_id = un->un_direct_priority_timeid; 7944 un->un_direct_priority_timeid = NULL; 7945 mutex_exit(SD_MUTEX(un)); 7946 (void) untimeout(temp_id); 7947 mutex_enter(SD_MUTEX(un)); 7948 } 7949 7950 /* Cancel any active multi-host disk watch thread requests */ 7951 if (un->un_mhd_token != NULL) { 7952 mutex_exit(SD_MUTEX(un)); 7953 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token)); 7954 if (scsi_watch_request_terminate(un->un_mhd_token, 7955 SCSI_WATCH_TERMINATE_NOWAIT)) { 7956 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7957 "sd_dr_detach: Cannot cancel mhd watch request\n"); 7958 /* 7959 * Note: We are returning here after having removed 7960 * some driver timeouts above. This is consistent with 7961 * the legacy implementation but perhaps the watch 7962 * terminate call should be made with the wait flag set. 7963 */ 7964 goto err_stillbusy; 7965 } 7966 mutex_enter(SD_MUTEX(un)); 7967 un->un_mhd_token = NULL; 7968 } 7969 7970 if (un->un_swr_token != NULL) { 7971 mutex_exit(SD_MUTEX(un)); 7972 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token)); 7973 if (scsi_watch_request_terminate(un->un_swr_token, 7974 SCSI_WATCH_TERMINATE_NOWAIT)) { 7975 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7976 "sd_dr_detach: Cannot cancel swr watch request\n"); 7977 /* 7978 * Note: We are returning here after having removed 7979 * some driver timeouts above. This is consistent with 7980 * the legacy implementation but perhaps the watch 7981 * terminate call should be made with the wait flag set. 7982 */ 7983 goto err_stillbusy; 7984 } 7985 mutex_enter(SD_MUTEX(un)); 7986 un->un_swr_token = NULL; 7987 } 7988 7989 mutex_exit(SD_MUTEX(un)); 7990 7991 /* 7992 * Clear any scsi_reset_notifies. We clear the reset notifies 7993 * if we have not registered one. 7994 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! 7995 */ 7996 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 7997 sd_mhd_reset_notify_cb, (caddr_t)un); 7998 7999 /* 8000 * protect the timeout pointers from getting nulled by 8001 * their callback functions during the cancellation process. 8002 * In such a scenario untimeout can be invoked with a null value. 8003 */ 8004 _NOTE(NO_COMPETING_THREADS_NOW); 8005 8006 mutex_enter(&un->un_pm_mutex); 8007 if (un->un_pm_idle_timeid != NULL) { 8008 timeout_id_t temp_id = un->un_pm_idle_timeid; 8009 un->un_pm_idle_timeid = NULL; 8010 mutex_exit(&un->un_pm_mutex); 8011 8012 /* 8013 * Timeout is active; cancel it. 8014 * Note that it'll never be active on a device 8015 * that does not support PM therefore we don't 8016 * have to check before calling pm_idle_component. 8017 */ 8018 (void) untimeout(temp_id); 8019 (void) pm_idle_component(SD_DEVINFO(un), 0); 8020 mutex_enter(&un->un_pm_mutex); 8021 } 8022 8023 /* 8024 * Check whether there is already a timeout scheduled for power 8025 * management. If yes then don't lower the power here, that's. 8026 * the timeout handler's job. 8027 */ 8028 if (un->un_pm_timeid != NULL) { 8029 timeout_id_t temp_id = un->un_pm_timeid; 8030 un->un_pm_timeid = NULL; 8031 mutex_exit(&un->un_pm_mutex); 8032 /* 8033 * Timeout is active; cancel it. 8034 * Note that it'll never be active on a device 8035 * that does not support PM therefore we don't 8036 * have to check before calling pm_idle_component. 8037 */ 8038 (void) untimeout(temp_id); 8039 (void) pm_idle_component(SD_DEVINFO(un), 0); 8040 8041 } else { 8042 mutex_exit(&un->un_pm_mutex); 8043 if ((un->un_f_pm_is_enabled == TRUE) && 8044 (pm_lower_power(SD_DEVINFO(un), 0, SD_SPINDLE_OFF) != 8045 DDI_SUCCESS)) { 8046 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8047 "sd_dr_detach: Lower power request failed, ignoring.\n"); 8048 /* 8049 * Fix for bug: 4297749, item # 13 8050 * The above test now includes a check to see if PM is 8051 * supported by this device before call 8052 * pm_lower_power(). 8053 * Note, the following is not dead code. The call to 8054 * pm_lower_power above will generate a call back into 8055 * our sdpower routine which might result in a timeout 8056 * handler getting activated. Therefore the following 8057 * code is valid and necessary. 8058 */ 8059 mutex_enter(&un->un_pm_mutex); 8060 if (un->un_pm_timeid != NULL) { 8061 timeout_id_t temp_id = un->un_pm_timeid; 8062 un->un_pm_timeid = NULL; 8063 mutex_exit(&un->un_pm_mutex); 8064 (void) untimeout(temp_id); 8065 (void) pm_idle_component(SD_DEVINFO(un), 0); 8066 } else { 8067 mutex_exit(&un->un_pm_mutex); 8068 } 8069 } 8070 } 8071 8072 /* 8073 * Cleanup from the scsi_ifsetcap() calls (437868) 8074 * Relocated here from above to be after the call to 8075 * pm_lower_power, which was getting errors. 8076 */ 8077 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 8078 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 8079 8080 /* 8081 * Currently, tagged queuing is supported per target based by HBA. 8082 * Setting this per lun instance actually sets the capability of this 8083 * target in HBA, which affects those luns already attached on the 8084 * same target. So during detach, we can only disable this capability 8085 * only when this is the only lun left on this target. By doing 8086 * this, we assume a target has the same tagged queuing capability 8087 * for every lun. The condition can be removed when HBA is changed to 8088 * support per lun based tagged queuing capability. 8089 */ 8090 if (sd_scsi_get_target_lun_count(pdip, tgt) <= 1) { 8091 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 8092 } 8093 8094 if (un->un_f_is_fibre == FALSE) { 8095 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 8096 } 8097 8098 /* 8099 * Remove any event callbacks, fibre only 8100 */ 8101 if (un->un_f_is_fibre == TRUE) { 8102 if ((un->un_insert_event != NULL) && 8103 (ddi_remove_event_handler(un->un_insert_cb_id) != 8104 DDI_SUCCESS)) { 8105 /* 8106 * Note: We are returning here after having done 8107 * substantial cleanup above. This is consistent 8108 * with the legacy implementation but this may not 8109 * be the right thing to do. 8110 */ 8111 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8112 "sd_dr_detach: Cannot cancel insert event\n"); 8113 goto err_remove_event; 8114 } 8115 un->un_insert_event = NULL; 8116 8117 if ((un->un_remove_event != NULL) && 8118 (ddi_remove_event_handler(un->un_remove_cb_id) != 8119 DDI_SUCCESS)) { 8120 /* 8121 * Note: We are returning here after having done 8122 * substantial cleanup above. This is consistent 8123 * with the legacy implementation but this may not 8124 * be the right thing to do. 8125 */ 8126 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8127 "sd_dr_detach: Cannot cancel remove event\n"); 8128 goto err_remove_event; 8129 } 8130 un->un_remove_event = NULL; 8131 } 8132 8133 /* Do not free the softstate if the callback routine is active */ 8134 sd_sync_with_callback(un); 8135 8136 cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 8137 cmlb_free_handle(&un->un_cmlbhandle); 8138 8139 /* 8140 * Hold the detach mutex here, to make sure that no other threads ever 8141 * can access a (partially) freed soft state structure. 8142 */ 8143 mutex_enter(&sd_detach_mutex); 8144 8145 /* 8146 * Clean up the soft state struct. 8147 * Cleanup is done in reverse order of allocs/inits. 8148 * At this point there should be no competing threads anymore. 8149 */ 8150 8151 /* Unregister and free device id. */ 8152 ddi_devid_unregister(devi); 8153 if (un->un_devid) { 8154 ddi_devid_free(un->un_devid); 8155 un->un_devid = NULL; 8156 } 8157 8158 /* 8159 * Destroy wmap cache if it exists. 8160 */ 8161 if (un->un_wm_cache != NULL) { 8162 kmem_cache_destroy(un->un_wm_cache); 8163 un->un_wm_cache = NULL; 8164 } 8165 8166 /* 8167 * kstat cleanup is done in detach for all device types (4363169). 8168 * We do not want to fail detach if the device kstats are not deleted 8169 * since there is a confusion about the devo_refcnt for the device. 8170 * We just delete the kstats and let detach complete successfully. 8171 */ 8172 if (un->un_stats != NULL) { 8173 kstat_delete(un->un_stats); 8174 un->un_stats = NULL; 8175 } 8176 if (un->un_errstats != NULL) { 8177 kstat_delete(un->un_errstats); 8178 un->un_errstats = NULL; 8179 } 8180 8181 /* Remove partition stats */ 8182 if (un->un_f_pkstats_enabled) { 8183 for (i = 0; i < NSDMAP; i++) { 8184 if (un->un_pstats[i] != NULL) { 8185 kstat_delete(un->un_pstats[i]); 8186 un->un_pstats[i] = NULL; 8187 } 8188 } 8189 } 8190 8191 /* Remove xbuf registration */ 8192 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8193 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8194 8195 /* Remove driver properties */ 8196 ddi_prop_remove_all(devi); 8197 8198 mutex_destroy(&un->un_pm_mutex); 8199 cv_destroy(&un->un_pm_busy_cv); 8200 8201 cv_destroy(&un->un_wcc_cv); 8202 8203 /* Open/close semaphore */ 8204 sema_destroy(&un->un_semoclose); 8205 8206 /* Removable media condvar. */ 8207 cv_destroy(&un->un_state_cv); 8208 8209 /* Suspend/resume condvar. */ 8210 cv_destroy(&un->un_suspend_cv); 8211 cv_destroy(&un->un_disk_busy_cv); 8212 8213 sd_free_rqs(un); 8214 8215 /* Free up soft state */ 8216 devp->sd_private = NULL; 8217 8218 bzero(un, sizeof (struct sd_lun)); 8219 ddi_soft_state_free(sd_state, instance); 8220 8221 mutex_exit(&sd_detach_mutex); 8222 8223 /* This frees up the INQUIRY data associated with the device. */ 8224 scsi_unprobe(devp); 8225 8226 /* 8227 * After successfully detaching an instance, we update the information 8228 * of how many luns have been attached in the relative target and 8229 * controller for parallel SCSI. This information is used when sd tries 8230 * to set the tagged queuing capability in HBA. 8231 * Since un has been released, we can't use SD_IS_PARALLEL_SCSI(un) to 8232 * check if the device is parallel SCSI. However, we don't need to 8233 * check here because we've already checked during attach. No device 8234 * that is not parallel SCSI is in the chain. 8235 */ 8236 if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) { 8237 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH); 8238 } 8239 8240 return (DDI_SUCCESS); 8241 8242 err_notclosed: 8243 mutex_exit(SD_MUTEX(un)); 8244 8245 err_stillbusy: 8246 _NOTE(NO_COMPETING_THREADS_NOW); 8247 8248 err_remove_event: 8249 mutex_enter(&sd_detach_mutex); 8250 un->un_detach_count--; 8251 mutex_exit(&sd_detach_mutex); 8252 8253 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n"); 8254 return (DDI_FAILURE); 8255 } 8256 8257 8258 /* 8259 * Function: sd_create_errstats 8260 * 8261 * Description: This routine instantiates the device error stats. 8262 * 8263 * Note: During attach the stats are instantiated first so they are 8264 * available for attach-time routines that utilize the driver 8265 * iopath to send commands to the device. The stats are initialized 8266 * separately so data obtained during some attach-time routines is 8267 * available. (4362483) 8268 * 8269 * Arguments: un - driver soft state (unit) structure 8270 * instance - driver instance 8271 * 8272 * Context: Kernel thread context 8273 */ 8274 8275 static void 8276 sd_create_errstats(struct sd_lun *un, int instance) 8277 { 8278 struct sd_errstats *stp; 8279 char kstatmodule_err[KSTAT_STRLEN]; 8280 char kstatname[KSTAT_STRLEN]; 8281 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t)); 8282 8283 ASSERT(un != NULL); 8284 8285 if (un->un_errstats != NULL) { 8286 return; 8287 } 8288 8289 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err), 8290 "%serr", sd_label); 8291 (void) snprintf(kstatname, sizeof (kstatname), 8292 "%s%d,err", sd_label, instance); 8293 8294 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname, 8295 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT); 8296 8297 if (un->un_errstats == NULL) { 8298 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8299 "sd_create_errstats: Failed kstat_create\n"); 8300 return; 8301 } 8302 8303 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8304 kstat_named_init(&stp->sd_softerrs, "Soft Errors", 8305 KSTAT_DATA_UINT32); 8306 kstat_named_init(&stp->sd_harderrs, "Hard Errors", 8307 KSTAT_DATA_UINT32); 8308 kstat_named_init(&stp->sd_transerrs, "Transport Errors", 8309 KSTAT_DATA_UINT32); 8310 kstat_named_init(&stp->sd_vid, "Vendor", 8311 KSTAT_DATA_CHAR); 8312 kstat_named_init(&stp->sd_pid, "Product", 8313 KSTAT_DATA_CHAR); 8314 kstat_named_init(&stp->sd_revision, "Revision", 8315 KSTAT_DATA_CHAR); 8316 kstat_named_init(&stp->sd_serial, "Serial No", 8317 KSTAT_DATA_CHAR); 8318 kstat_named_init(&stp->sd_capacity, "Size", 8319 KSTAT_DATA_ULONGLONG); 8320 kstat_named_init(&stp->sd_rq_media_err, "Media Error", 8321 KSTAT_DATA_UINT32); 8322 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready", 8323 KSTAT_DATA_UINT32); 8324 kstat_named_init(&stp->sd_rq_nodev_err, "No Device", 8325 KSTAT_DATA_UINT32); 8326 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable", 8327 KSTAT_DATA_UINT32); 8328 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request", 8329 KSTAT_DATA_UINT32); 8330 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis", 8331 KSTAT_DATA_UINT32); 8332 8333 un->un_errstats->ks_private = un; 8334 un->un_errstats->ks_update = nulldev; 8335 8336 kstat_install(un->un_errstats); 8337 } 8338 8339 8340 /* 8341 * Function: sd_set_errstats 8342 * 8343 * Description: This routine sets the value of the vendor id, product id, 8344 * revision, serial number, and capacity device error stats. 8345 * 8346 * Note: During attach the stats are instantiated first so they are 8347 * available for attach-time routines that utilize the driver 8348 * iopath to send commands to the device. The stats are initialized 8349 * separately so data obtained during some attach-time routines is 8350 * available. (4362483) 8351 * 8352 * Arguments: un - driver soft state (unit) structure 8353 * 8354 * Context: Kernel thread context 8355 */ 8356 8357 static void 8358 sd_set_errstats(struct sd_lun *un) 8359 { 8360 struct sd_errstats *stp; 8361 8362 ASSERT(un != NULL); 8363 ASSERT(un->un_errstats != NULL); 8364 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8365 ASSERT(stp != NULL); 8366 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8); 8367 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16); 8368 (void) strncpy(stp->sd_revision.value.c, 8369 un->un_sd->sd_inq->inq_revision, 4); 8370 8371 /* 8372 * All the errstats are persistent across detach/attach, 8373 * so reset all the errstats here in case of the hot 8374 * replacement of disk drives, except for not changed 8375 * Sun qualified drives. 8376 */ 8377 if ((bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) != 0) || 8378 (bcmp(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8379 sizeof (SD_INQUIRY(un)->inq_serial)) != 0)) { 8380 stp->sd_softerrs.value.ui32 = 0; 8381 stp->sd_harderrs.value.ui32 = 0; 8382 stp->sd_transerrs.value.ui32 = 0; 8383 stp->sd_rq_media_err.value.ui32 = 0; 8384 stp->sd_rq_ntrdy_err.value.ui32 = 0; 8385 stp->sd_rq_nodev_err.value.ui32 = 0; 8386 stp->sd_rq_recov_err.value.ui32 = 0; 8387 stp->sd_rq_illrq_err.value.ui32 = 0; 8388 stp->sd_rq_pfa_err.value.ui32 = 0; 8389 } 8390 8391 /* 8392 * Set the "Serial No" kstat for Sun qualified drives (indicated by 8393 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid) 8394 * (4376302)) 8395 */ 8396 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) { 8397 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8398 sizeof (SD_INQUIRY(un)->inq_serial)); 8399 } 8400 8401 if (un->un_f_blockcount_is_valid != TRUE) { 8402 /* 8403 * Set capacity error stat to 0 for no media. This ensures 8404 * a valid capacity is displayed in response to 'iostat -E' 8405 * when no media is present in the device. 8406 */ 8407 stp->sd_capacity.value.ui64 = 0; 8408 } else { 8409 /* 8410 * Multiply un_blockcount by un->un_sys_blocksize to get 8411 * capacity. 8412 * 8413 * Note: for non-512 blocksize devices "un_blockcount" has been 8414 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by 8415 * (un_tgt_blocksize / un->un_sys_blocksize). 8416 */ 8417 stp->sd_capacity.value.ui64 = (uint64_t) 8418 ((uint64_t)un->un_blockcount * un->un_sys_blocksize); 8419 } 8420 } 8421 8422 8423 /* 8424 * Function: sd_set_pstats 8425 * 8426 * Description: This routine instantiates and initializes the partition 8427 * stats for each partition with more than zero blocks. 8428 * (4363169) 8429 * 8430 * Arguments: un - driver soft state (unit) structure 8431 * 8432 * Context: Kernel thread context 8433 */ 8434 8435 static void 8436 sd_set_pstats(struct sd_lun *un) 8437 { 8438 char kstatname[KSTAT_STRLEN]; 8439 int instance; 8440 int i; 8441 diskaddr_t nblks = 0; 8442 char *partname = NULL; 8443 8444 ASSERT(un != NULL); 8445 8446 instance = ddi_get_instance(SD_DEVINFO(un)); 8447 8448 /* Note:x86: is this a VTOC8/VTOC16 difference? */ 8449 for (i = 0; i < NSDMAP; i++) { 8450 8451 if (cmlb_partinfo(un->un_cmlbhandle, i, 8452 &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0) 8453 continue; 8454 mutex_enter(SD_MUTEX(un)); 8455 8456 if ((un->un_pstats[i] == NULL) && 8457 (nblks != 0)) { 8458 8459 (void) snprintf(kstatname, sizeof (kstatname), 8460 "%s%d,%s", sd_label, instance, 8461 partname); 8462 8463 un->un_pstats[i] = kstat_create(sd_label, 8464 instance, kstatname, "partition", KSTAT_TYPE_IO, 8465 1, KSTAT_FLAG_PERSISTENT); 8466 if (un->un_pstats[i] != NULL) { 8467 un->un_pstats[i]->ks_lock = SD_MUTEX(un); 8468 kstat_install(un->un_pstats[i]); 8469 } 8470 } 8471 mutex_exit(SD_MUTEX(un)); 8472 } 8473 } 8474 8475 8476 #if (defined(__fibre)) 8477 /* 8478 * Function: sd_init_event_callbacks 8479 * 8480 * Description: This routine initializes the insertion and removal event 8481 * callbacks. (fibre only) 8482 * 8483 * Arguments: un - driver soft state (unit) structure 8484 * 8485 * Context: Kernel thread context 8486 */ 8487 8488 static void 8489 sd_init_event_callbacks(struct sd_lun *un) 8490 { 8491 ASSERT(un != NULL); 8492 8493 if ((un->un_insert_event == NULL) && 8494 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT, 8495 &un->un_insert_event) == DDI_SUCCESS)) { 8496 /* 8497 * Add the callback for an insertion event 8498 */ 8499 (void) ddi_add_event_handler(SD_DEVINFO(un), 8500 un->un_insert_event, sd_event_callback, (void *)un, 8501 &(un->un_insert_cb_id)); 8502 } 8503 8504 if ((un->un_remove_event == NULL) && 8505 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT, 8506 &un->un_remove_event) == DDI_SUCCESS)) { 8507 /* 8508 * Add the callback for a removal event 8509 */ 8510 (void) ddi_add_event_handler(SD_DEVINFO(un), 8511 un->un_remove_event, sd_event_callback, (void *)un, 8512 &(un->un_remove_cb_id)); 8513 } 8514 } 8515 8516 8517 /* 8518 * Function: sd_event_callback 8519 * 8520 * Description: This routine handles insert/remove events (photon). The 8521 * state is changed to OFFLINE which can be used to supress 8522 * error msgs. (fibre only) 8523 * 8524 * Arguments: un - driver soft state (unit) structure 8525 * 8526 * Context: Callout thread context 8527 */ 8528 /* ARGSUSED */ 8529 static void 8530 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg, 8531 void *bus_impldata) 8532 { 8533 struct sd_lun *un = (struct sd_lun *)arg; 8534 8535 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event)); 8536 if (event == un->un_insert_event) { 8537 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event"); 8538 mutex_enter(SD_MUTEX(un)); 8539 if (un->un_state == SD_STATE_OFFLINE) { 8540 if (un->un_last_state != SD_STATE_SUSPENDED) { 8541 un->un_state = un->un_last_state; 8542 } else { 8543 /* 8544 * We have gone through SUSPEND/RESUME while 8545 * we were offline. Restore the last state 8546 */ 8547 un->un_state = un->un_save_state; 8548 } 8549 } 8550 mutex_exit(SD_MUTEX(un)); 8551 8552 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event)); 8553 } else if (event == un->un_remove_event) { 8554 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event"); 8555 mutex_enter(SD_MUTEX(un)); 8556 /* 8557 * We need to handle an event callback that occurs during 8558 * the suspend operation, since we don't prevent it. 8559 */ 8560 if (un->un_state != SD_STATE_OFFLINE) { 8561 if (un->un_state != SD_STATE_SUSPENDED) { 8562 New_state(un, SD_STATE_OFFLINE); 8563 } else { 8564 un->un_last_state = SD_STATE_OFFLINE; 8565 } 8566 } 8567 mutex_exit(SD_MUTEX(un)); 8568 } else { 8569 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 8570 "!Unknown event\n"); 8571 } 8572 8573 } 8574 #endif 8575 8576 /* 8577 * Function: sd_cache_control() 8578 * 8579 * Description: This routine is the driver entry point for setting 8580 * read and write caching by modifying the WCE (write cache 8581 * enable) and RCD (read cache disable) bits of mode 8582 * page 8 (MODEPAGE_CACHING). 8583 * 8584 * Arguments: un - driver soft state (unit) structure 8585 * rcd_flag - flag for controlling the read cache 8586 * wce_flag - flag for controlling the write cache 8587 * 8588 * Return Code: EIO 8589 * code returned by sd_send_scsi_MODE_SENSE and 8590 * sd_send_scsi_MODE_SELECT 8591 * 8592 * Context: Kernel Thread 8593 */ 8594 8595 static int 8596 sd_cache_control(struct sd_lun *un, int rcd_flag, int wce_flag) 8597 { 8598 struct mode_caching *mode_caching_page; 8599 uchar_t *header; 8600 size_t buflen; 8601 int hdrlen; 8602 int bd_len; 8603 int rval = 0; 8604 struct mode_header_grp2 *mhp; 8605 8606 ASSERT(un != NULL); 8607 8608 /* 8609 * Do a test unit ready, otherwise a mode sense may not work if this 8610 * is the first command sent to the device after boot. 8611 */ 8612 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 8613 8614 if (un->un_f_cfg_is_atapi == TRUE) { 8615 hdrlen = MODE_HEADER_LENGTH_GRP2; 8616 } else { 8617 hdrlen = MODE_HEADER_LENGTH; 8618 } 8619 8620 /* 8621 * Allocate memory for the retrieved mode page and its headers. Set 8622 * a pointer to the page itself. Use mode_cache_scsi3 to insure 8623 * we get all of the mode sense data otherwise, the mode select 8624 * will fail. mode_cache_scsi3 is a superset of mode_caching. 8625 */ 8626 buflen = hdrlen + MODE_BLK_DESC_LENGTH + 8627 sizeof (struct mode_cache_scsi3); 8628 8629 header = kmem_zalloc(buflen, KM_SLEEP); 8630 8631 /* Get the information from the device. */ 8632 if (un->un_f_cfg_is_atapi == TRUE) { 8633 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, header, buflen, 8634 MODEPAGE_CACHING, SD_PATH_DIRECT); 8635 } else { 8636 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 8637 MODEPAGE_CACHING, SD_PATH_DIRECT); 8638 } 8639 if (rval != 0) { 8640 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 8641 "sd_cache_control: Mode Sense Failed\n"); 8642 kmem_free(header, buflen); 8643 return (rval); 8644 } 8645 8646 /* 8647 * Determine size of Block Descriptors in order to locate 8648 * the mode page data. ATAPI devices return 0, SCSI devices 8649 * should return MODE_BLK_DESC_LENGTH. 8650 */ 8651 if (un->un_f_cfg_is_atapi == TRUE) { 8652 mhp = (struct mode_header_grp2 *)header; 8653 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 8654 } else { 8655 bd_len = ((struct mode_header *)header)->bdesc_length; 8656 } 8657 8658 if (bd_len > MODE_BLK_DESC_LENGTH) { 8659 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 8660 "sd_cache_control: Mode Sense returned invalid " 8661 "block descriptor length\n"); 8662 kmem_free(header, buflen); 8663 return (EIO); 8664 } 8665 8666 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 8667 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 8668 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 8669 " caching page code mismatch %d\n", 8670 mode_caching_page->mode_page.code); 8671 kmem_free(header, buflen); 8672 return (EIO); 8673 } 8674 8675 /* Check the relevant bits on successful mode sense. */ 8676 if ((mode_caching_page->rcd && rcd_flag == SD_CACHE_ENABLE) || 8677 (!mode_caching_page->rcd && rcd_flag == SD_CACHE_DISABLE) || 8678 (mode_caching_page->wce && wce_flag == SD_CACHE_DISABLE) || 8679 (!mode_caching_page->wce && wce_flag == SD_CACHE_ENABLE)) { 8680 8681 size_t sbuflen; 8682 uchar_t save_pg; 8683 8684 /* 8685 * Construct select buffer length based on the 8686 * length of the sense data returned. 8687 */ 8688 sbuflen = hdrlen + MODE_BLK_DESC_LENGTH + 8689 sizeof (struct mode_page) + 8690 (int)mode_caching_page->mode_page.length; 8691 8692 /* 8693 * Set the caching bits as requested. 8694 */ 8695 if (rcd_flag == SD_CACHE_ENABLE) 8696 mode_caching_page->rcd = 0; 8697 else if (rcd_flag == SD_CACHE_DISABLE) 8698 mode_caching_page->rcd = 1; 8699 8700 if (wce_flag == SD_CACHE_ENABLE) 8701 mode_caching_page->wce = 1; 8702 else if (wce_flag == SD_CACHE_DISABLE) 8703 mode_caching_page->wce = 0; 8704 8705 /* 8706 * Save the page if the mode sense says the 8707 * drive supports it. 8708 */ 8709 save_pg = mode_caching_page->mode_page.ps ? 8710 SD_SAVE_PAGE : SD_DONTSAVE_PAGE; 8711 8712 /* Clear reserved bits before mode select. */ 8713 mode_caching_page->mode_page.ps = 0; 8714 8715 /* 8716 * Clear out mode header for mode select. 8717 * The rest of the retrieved page will be reused. 8718 */ 8719 bzero(header, hdrlen); 8720 8721 if (un->un_f_cfg_is_atapi == TRUE) { 8722 mhp = (struct mode_header_grp2 *)header; 8723 mhp->bdesc_length_hi = bd_len >> 8; 8724 mhp->bdesc_length_lo = (uchar_t)bd_len & 0xff; 8725 } else { 8726 ((struct mode_header *)header)->bdesc_length = bd_len; 8727 } 8728 8729 /* Issue mode select to change the cache settings */ 8730 if (un->un_f_cfg_is_atapi == TRUE) { 8731 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, header, 8732 sbuflen, save_pg, SD_PATH_DIRECT); 8733 } else { 8734 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 8735 sbuflen, save_pg, SD_PATH_DIRECT); 8736 } 8737 } 8738 8739 kmem_free(header, buflen); 8740 return (rval); 8741 } 8742 8743 8744 /* 8745 * Function: sd_get_write_cache_enabled() 8746 * 8747 * Description: This routine is the driver entry point for determining if 8748 * write caching is enabled. It examines the WCE (write cache 8749 * enable) bits of mode page 8 (MODEPAGE_CACHING). 8750 * 8751 * Arguments: un - driver soft state (unit) structure 8752 * is_enabled - pointer to int where write cache enabled state 8753 * is returned (non-zero -> write cache enabled) 8754 * 8755 * 8756 * Return Code: EIO 8757 * code returned by sd_send_scsi_MODE_SENSE 8758 * 8759 * Context: Kernel Thread 8760 * 8761 * NOTE: If ioctl is added to disable write cache, this sequence should 8762 * be followed so that no locking is required for accesses to 8763 * un->un_f_write_cache_enabled: 8764 * do mode select to clear wce 8765 * do synchronize cache to flush cache 8766 * set un->un_f_write_cache_enabled = FALSE 8767 * 8768 * Conversely, an ioctl to enable the write cache should be done 8769 * in this order: 8770 * set un->un_f_write_cache_enabled = TRUE 8771 * do mode select to set wce 8772 */ 8773 8774 static int 8775 sd_get_write_cache_enabled(struct sd_lun *un, int *is_enabled) 8776 { 8777 struct mode_caching *mode_caching_page; 8778 uchar_t *header; 8779 size_t buflen; 8780 int hdrlen; 8781 int bd_len; 8782 int rval = 0; 8783 8784 ASSERT(un != NULL); 8785 ASSERT(is_enabled != NULL); 8786 8787 /* in case of error, flag as enabled */ 8788 *is_enabled = TRUE; 8789 8790 /* 8791 * Do a test unit ready, otherwise a mode sense may not work if this 8792 * is the first command sent to the device after boot. 8793 */ 8794 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 8795 8796 if (un->un_f_cfg_is_atapi == TRUE) { 8797 hdrlen = MODE_HEADER_LENGTH_GRP2; 8798 } else { 8799 hdrlen = MODE_HEADER_LENGTH; 8800 } 8801 8802 /* 8803 * Allocate memory for the retrieved mode page and its headers. Set 8804 * a pointer to the page itself. 8805 */ 8806 buflen = hdrlen + MODE_BLK_DESC_LENGTH + sizeof (struct mode_caching); 8807 header = kmem_zalloc(buflen, KM_SLEEP); 8808 8809 /* Get the information from the device. */ 8810 if (un->un_f_cfg_is_atapi == TRUE) { 8811 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, header, buflen, 8812 MODEPAGE_CACHING, SD_PATH_DIRECT); 8813 } else { 8814 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 8815 MODEPAGE_CACHING, SD_PATH_DIRECT); 8816 } 8817 if (rval != 0) { 8818 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 8819 "sd_get_write_cache_enabled: Mode Sense Failed\n"); 8820 kmem_free(header, buflen); 8821 return (rval); 8822 } 8823 8824 /* 8825 * Determine size of Block Descriptors in order to locate 8826 * the mode page data. ATAPI devices return 0, SCSI devices 8827 * should return MODE_BLK_DESC_LENGTH. 8828 */ 8829 if (un->un_f_cfg_is_atapi == TRUE) { 8830 struct mode_header_grp2 *mhp; 8831 mhp = (struct mode_header_grp2 *)header; 8832 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 8833 } else { 8834 bd_len = ((struct mode_header *)header)->bdesc_length; 8835 } 8836 8837 if (bd_len > MODE_BLK_DESC_LENGTH) { 8838 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 8839 "sd_get_write_cache_enabled: Mode Sense returned invalid " 8840 "block descriptor length\n"); 8841 kmem_free(header, buflen); 8842 return (EIO); 8843 } 8844 8845 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 8846 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 8847 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 8848 " caching page code mismatch %d\n", 8849 mode_caching_page->mode_page.code); 8850 kmem_free(header, buflen); 8851 return (EIO); 8852 } 8853 *is_enabled = mode_caching_page->wce; 8854 8855 kmem_free(header, buflen); 8856 return (0); 8857 } 8858 8859 /* 8860 * Function: sd_get_nv_sup() 8861 * 8862 * Description: This routine is the driver entry point for 8863 * determining whether non-volatile cache is supported. This 8864 * determination process works as follows: 8865 * 8866 * 1. sd first queries sd.conf on whether 8867 * suppress_cache_flush bit is set for this device. 8868 * 8869 * 2. if not there, then queries the internal disk table. 8870 * 8871 * 3. if either sd.conf or internal disk table specifies 8872 * cache flush be suppressed, we don't bother checking 8873 * NV_SUP bit. 8874 * 8875 * If SUPPRESS_CACHE_FLUSH bit is not set to 1, sd queries 8876 * the optional INQUIRY VPD page 0x86. If the device 8877 * supports VPD page 0x86, sd examines the NV_SUP 8878 * (non-volatile cache support) bit in the INQUIRY VPD page 8879 * 0x86: 8880 * o If NV_SUP bit is set, sd assumes the device has a 8881 * non-volatile cache and set the 8882 * un_f_sync_nv_supported to TRUE. 8883 * o Otherwise cache is not non-volatile, 8884 * un_f_sync_nv_supported is set to FALSE. 8885 * 8886 * Arguments: un - driver soft state (unit) structure 8887 * 8888 * Return Code: 8889 * 8890 * Context: Kernel Thread 8891 */ 8892 8893 static void 8894 sd_get_nv_sup(struct sd_lun *un) 8895 { 8896 int rval = 0; 8897 uchar_t *inq86 = NULL; 8898 size_t inq86_len = MAX_INQUIRY_SIZE; 8899 size_t inq86_resid = 0; 8900 struct dk_callback *dkc; 8901 8902 ASSERT(un != NULL); 8903 8904 mutex_enter(SD_MUTEX(un)); 8905 8906 /* 8907 * Be conservative on the device's support of 8908 * SYNC_NV bit: un_f_sync_nv_supported is 8909 * initialized to be false. 8910 */ 8911 un->un_f_sync_nv_supported = FALSE; 8912 8913 /* 8914 * If either sd.conf or internal disk table 8915 * specifies cache flush be suppressed, then 8916 * we don't bother checking NV_SUP bit. 8917 */ 8918 if (un->un_f_suppress_cache_flush == TRUE) { 8919 mutex_exit(SD_MUTEX(un)); 8920 return; 8921 } 8922 8923 if (sd_check_vpd_page_support(un) == 0 && 8924 un->un_vpd_page_mask & SD_VPD_EXTENDED_DATA_PG) { 8925 mutex_exit(SD_MUTEX(un)); 8926 /* collect page 86 data if available */ 8927 inq86 = kmem_zalloc(inq86_len, KM_SLEEP); 8928 rval = sd_send_scsi_INQUIRY(un, inq86, inq86_len, 8929 0x01, 0x86, &inq86_resid); 8930 8931 if (rval == 0 && (inq86_len - inq86_resid > 6)) { 8932 SD_TRACE(SD_LOG_COMMON, un, 8933 "sd_get_nv_sup: \ 8934 successfully get VPD page: %x \ 8935 PAGE LENGTH: %x BYTE 6: %x\n", 8936 inq86[1], inq86[3], inq86[6]); 8937 8938 mutex_enter(SD_MUTEX(un)); 8939 /* 8940 * check the value of NV_SUP bit: only if the device 8941 * reports NV_SUP bit to be 1, the 8942 * un_f_sync_nv_supported bit will be set to true. 8943 */ 8944 if (inq86[6] & SD_VPD_NV_SUP) { 8945 un->un_f_sync_nv_supported = TRUE; 8946 } 8947 mutex_exit(SD_MUTEX(un)); 8948 } 8949 kmem_free(inq86, inq86_len); 8950 } else { 8951 mutex_exit(SD_MUTEX(un)); 8952 } 8953 8954 /* 8955 * Send a SYNC CACHE command to check whether 8956 * SYNC_NV bit is supported. This command should have 8957 * un_f_sync_nv_supported set to correct value. 8958 */ 8959 mutex_enter(SD_MUTEX(un)); 8960 if (un->un_f_sync_nv_supported) { 8961 mutex_exit(SD_MUTEX(un)); 8962 dkc = kmem_zalloc(sizeof (struct dk_callback), KM_SLEEP); 8963 dkc->dkc_flag = FLUSH_VOLATILE; 8964 (void) sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 8965 8966 /* 8967 * Send a TEST UNIT READY command to the device. This should 8968 * clear any outstanding UNIT ATTENTION that may be present. 8969 */ 8970 (void) sd_send_scsi_TEST_UNIT_READY(un, SD_DONT_RETRY_TUR); 8971 8972 kmem_free(dkc, sizeof (struct dk_callback)); 8973 } else { 8974 mutex_exit(SD_MUTEX(un)); 8975 } 8976 8977 SD_TRACE(SD_LOG_COMMON, un, "sd_get_nv_sup: \ 8978 un_f_suppress_cache_flush is set to %d\n", 8979 un->un_f_suppress_cache_flush); 8980 } 8981 8982 /* 8983 * Function: sd_make_device 8984 * 8985 * Description: Utility routine to return the Solaris device number from 8986 * the data in the device's dev_info structure. 8987 * 8988 * Return Code: The Solaris device number 8989 * 8990 * Context: Any 8991 */ 8992 8993 static dev_t 8994 sd_make_device(dev_info_t *devi) 8995 { 8996 return (makedevice(ddi_name_to_major(ddi_get_name(devi)), 8997 ddi_get_instance(devi) << SDUNIT_SHIFT)); 8998 } 8999 9000 9001 /* 9002 * Function: sd_pm_entry 9003 * 9004 * Description: Called at the start of a new command to manage power 9005 * and busy status of a device. This includes determining whether 9006 * the current power state of the device is sufficient for 9007 * performing the command or whether it must be changed. 9008 * The PM framework is notified appropriately. 9009 * Only with a return status of DDI_SUCCESS will the 9010 * component be busy to the framework. 9011 * 9012 * All callers of sd_pm_entry must check the return status 9013 * and only call sd_pm_exit it it was DDI_SUCCESS. A status 9014 * of DDI_FAILURE indicates the device failed to power up. 9015 * In this case un_pm_count has been adjusted so the result 9016 * on exit is still powered down, ie. count is less than 0. 9017 * Calling sd_pm_exit with this count value hits an ASSERT. 9018 * 9019 * Return Code: DDI_SUCCESS or DDI_FAILURE 9020 * 9021 * Context: Kernel thread context. 9022 */ 9023 9024 static int 9025 sd_pm_entry(struct sd_lun *un) 9026 { 9027 int return_status = DDI_SUCCESS; 9028 9029 ASSERT(!mutex_owned(SD_MUTEX(un))); 9030 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9031 9032 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n"); 9033 9034 if (un->un_f_pm_is_enabled == FALSE) { 9035 SD_TRACE(SD_LOG_IO_PM, un, 9036 "sd_pm_entry: exiting, PM not enabled\n"); 9037 return (return_status); 9038 } 9039 9040 /* 9041 * Just increment a counter if PM is enabled. On the transition from 9042 * 0 ==> 1, mark the device as busy. The iodone side will decrement 9043 * the count with each IO and mark the device as idle when the count 9044 * hits 0. 9045 * 9046 * If the count is less than 0 the device is powered down. If a powered 9047 * down device is successfully powered up then the count must be 9048 * incremented to reflect the power up. Note that it'll get incremented 9049 * a second time to become busy. 9050 * 9051 * Because the following has the potential to change the device state 9052 * and must release the un_pm_mutex to do so, only one thread can be 9053 * allowed through at a time. 9054 */ 9055 9056 mutex_enter(&un->un_pm_mutex); 9057 while (un->un_pm_busy == TRUE) { 9058 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex); 9059 } 9060 un->un_pm_busy = TRUE; 9061 9062 if (un->un_pm_count < 1) { 9063 9064 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n"); 9065 9066 /* 9067 * Indicate we are now busy so the framework won't attempt to 9068 * power down the device. This call will only fail if either 9069 * we passed a bad component number or the device has no 9070 * components. Neither of these should ever happen. 9071 */ 9072 mutex_exit(&un->un_pm_mutex); 9073 return_status = pm_busy_component(SD_DEVINFO(un), 0); 9074 ASSERT(return_status == DDI_SUCCESS); 9075 9076 mutex_enter(&un->un_pm_mutex); 9077 9078 if (un->un_pm_count < 0) { 9079 mutex_exit(&un->un_pm_mutex); 9080 9081 SD_TRACE(SD_LOG_IO_PM, un, 9082 "sd_pm_entry: power up component\n"); 9083 9084 /* 9085 * pm_raise_power will cause sdpower to be called 9086 * which brings the device power level to the 9087 * desired state, ON in this case. If successful, 9088 * un_pm_count and un_power_level will be updated 9089 * appropriately. 9090 */ 9091 return_status = pm_raise_power(SD_DEVINFO(un), 0, 9092 SD_SPINDLE_ON); 9093 9094 mutex_enter(&un->un_pm_mutex); 9095 9096 if (return_status != DDI_SUCCESS) { 9097 /* 9098 * Power up failed. 9099 * Idle the device and adjust the count 9100 * so the result on exit is that we're 9101 * still powered down, ie. count is less than 0. 9102 */ 9103 SD_TRACE(SD_LOG_IO_PM, un, 9104 "sd_pm_entry: power up failed," 9105 " idle the component\n"); 9106 9107 (void) pm_idle_component(SD_DEVINFO(un), 0); 9108 un->un_pm_count--; 9109 } else { 9110 /* 9111 * Device is powered up, verify the 9112 * count is non-negative. 9113 * This is debug only. 9114 */ 9115 ASSERT(un->un_pm_count == 0); 9116 } 9117 } 9118 9119 if (return_status == DDI_SUCCESS) { 9120 /* 9121 * For performance, now that the device has been tagged 9122 * as busy, and it's known to be powered up, update the 9123 * chain types to use jump tables that do not include 9124 * pm. This significantly lowers the overhead and 9125 * therefore improves performance. 9126 */ 9127 9128 mutex_exit(&un->un_pm_mutex); 9129 mutex_enter(SD_MUTEX(un)); 9130 SD_TRACE(SD_LOG_IO_PM, un, 9131 "sd_pm_entry: changing uscsi_chain_type from %d\n", 9132 un->un_uscsi_chain_type); 9133 9134 if (un->un_f_non_devbsize_supported) { 9135 un->un_buf_chain_type = 9136 SD_CHAIN_INFO_RMMEDIA_NO_PM; 9137 } else { 9138 un->un_buf_chain_type = 9139 SD_CHAIN_INFO_DISK_NO_PM; 9140 } 9141 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 9142 9143 SD_TRACE(SD_LOG_IO_PM, un, 9144 " changed uscsi_chain_type to %d\n", 9145 un->un_uscsi_chain_type); 9146 mutex_exit(SD_MUTEX(un)); 9147 mutex_enter(&un->un_pm_mutex); 9148 9149 if (un->un_pm_idle_timeid == NULL) { 9150 /* 300 ms. */ 9151 un->un_pm_idle_timeid = 9152 timeout(sd_pm_idletimeout_handler, un, 9153 (drv_usectohz((clock_t)300000))); 9154 /* 9155 * Include an extra call to busy which keeps the 9156 * device busy with-respect-to the PM layer 9157 * until the timer fires, at which time it'll 9158 * get the extra idle call. 9159 */ 9160 (void) pm_busy_component(SD_DEVINFO(un), 0); 9161 } 9162 } 9163 } 9164 un->un_pm_busy = FALSE; 9165 /* Next... */ 9166 cv_signal(&un->un_pm_busy_cv); 9167 9168 un->un_pm_count++; 9169 9170 SD_TRACE(SD_LOG_IO_PM, un, 9171 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count); 9172 9173 mutex_exit(&un->un_pm_mutex); 9174 9175 return (return_status); 9176 } 9177 9178 9179 /* 9180 * Function: sd_pm_exit 9181 * 9182 * Description: Called at the completion of a command to manage busy 9183 * status for the device. If the device becomes idle the 9184 * PM framework is notified. 9185 * 9186 * Context: Kernel thread context 9187 */ 9188 9189 static void 9190 sd_pm_exit(struct sd_lun *un) 9191 { 9192 ASSERT(!mutex_owned(SD_MUTEX(un))); 9193 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9194 9195 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n"); 9196 9197 /* 9198 * After attach the following flag is only read, so don't 9199 * take the penalty of acquiring a mutex for it. 9200 */ 9201 if (un->un_f_pm_is_enabled == TRUE) { 9202 9203 mutex_enter(&un->un_pm_mutex); 9204 un->un_pm_count--; 9205 9206 SD_TRACE(SD_LOG_IO_PM, un, 9207 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count); 9208 9209 ASSERT(un->un_pm_count >= 0); 9210 if (un->un_pm_count == 0) { 9211 mutex_exit(&un->un_pm_mutex); 9212 9213 SD_TRACE(SD_LOG_IO_PM, un, 9214 "sd_pm_exit: idle component\n"); 9215 9216 (void) pm_idle_component(SD_DEVINFO(un), 0); 9217 9218 } else { 9219 mutex_exit(&un->un_pm_mutex); 9220 } 9221 } 9222 9223 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n"); 9224 } 9225 9226 9227 /* 9228 * Function: sdopen 9229 * 9230 * Description: Driver's open(9e) entry point function. 9231 * 9232 * Arguments: dev_i - pointer to device number 9233 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE) 9234 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9235 * cred_p - user credential pointer 9236 * 9237 * Return Code: EINVAL 9238 * ENXIO 9239 * EIO 9240 * EROFS 9241 * EBUSY 9242 * 9243 * Context: Kernel thread context 9244 */ 9245 /* ARGSUSED */ 9246 static int 9247 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 9248 { 9249 struct sd_lun *un; 9250 int nodelay; 9251 int part; 9252 uint64_t partmask; 9253 int instance; 9254 dev_t dev; 9255 int rval = EIO; 9256 diskaddr_t nblks = 0; 9257 diskaddr_t label_cap; 9258 9259 /* Validate the open type */ 9260 if (otyp >= OTYPCNT) { 9261 return (EINVAL); 9262 } 9263 9264 dev = *dev_p; 9265 instance = SDUNIT(dev); 9266 mutex_enter(&sd_detach_mutex); 9267 9268 /* 9269 * Fail the open if there is no softstate for the instance, or 9270 * if another thread somewhere is trying to detach the instance. 9271 */ 9272 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 9273 (un->un_detach_count != 0)) { 9274 mutex_exit(&sd_detach_mutex); 9275 /* 9276 * The probe cache only needs to be cleared when open (9e) fails 9277 * with ENXIO (4238046). 9278 */ 9279 /* 9280 * un-conditionally clearing probe cache is ok with 9281 * separate sd/ssd binaries 9282 * x86 platform can be an issue with both parallel 9283 * and fibre in 1 binary 9284 */ 9285 sd_scsi_clear_probe_cache(); 9286 return (ENXIO); 9287 } 9288 9289 /* 9290 * The un_layer_count is to prevent another thread in specfs from 9291 * trying to detach the instance, which can happen when we are 9292 * called from a higher-layer driver instead of thru specfs. 9293 * This will not be needed when DDI provides a layered driver 9294 * interface that allows specfs to know that an instance is in 9295 * use by a layered driver & should not be detached. 9296 * 9297 * Note: the semantics for layered driver opens are exactly one 9298 * close for every open. 9299 */ 9300 if (otyp == OTYP_LYR) { 9301 un->un_layer_count++; 9302 } 9303 9304 /* 9305 * Keep a count of the current # of opens in progress. This is because 9306 * some layered drivers try to call us as a regular open. This can 9307 * cause problems that we cannot prevent, however by keeping this count 9308 * we can at least keep our open and detach routines from racing against 9309 * each other under such conditions. 9310 */ 9311 un->un_opens_in_progress++; 9312 mutex_exit(&sd_detach_mutex); 9313 9314 nodelay = (flag & (FNDELAY | FNONBLOCK)); 9315 part = SDPART(dev); 9316 partmask = 1 << part; 9317 9318 /* 9319 * We use a semaphore here in order to serialize 9320 * open and close requests on the device. 9321 */ 9322 sema_p(&un->un_semoclose); 9323 9324 mutex_enter(SD_MUTEX(un)); 9325 9326 /* 9327 * All device accesses go thru sdstrategy() where we check 9328 * on suspend status but there could be a scsi_poll command, 9329 * which bypasses sdstrategy(), so we need to check pm 9330 * status. 9331 */ 9332 9333 if (!nodelay) { 9334 while ((un->un_state == SD_STATE_SUSPENDED) || 9335 (un->un_state == SD_STATE_PM_CHANGING)) { 9336 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9337 } 9338 9339 mutex_exit(SD_MUTEX(un)); 9340 if (sd_pm_entry(un) != DDI_SUCCESS) { 9341 rval = EIO; 9342 SD_ERROR(SD_LOG_OPEN_CLOSE, un, 9343 "sdopen: sd_pm_entry failed\n"); 9344 goto open_failed_with_pm; 9345 } 9346 mutex_enter(SD_MUTEX(un)); 9347 } 9348 9349 /* check for previous exclusive open */ 9350 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); 9351 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9352 "sdopen: exclopen=%x, flag=%x, regopen=%x\n", 9353 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 9354 9355 if (un->un_exclopen & (partmask)) { 9356 goto excl_open_fail; 9357 } 9358 9359 if (flag & FEXCL) { 9360 int i; 9361 if (un->un_ocmap.lyropen[part]) { 9362 goto excl_open_fail; 9363 } 9364 for (i = 0; i < (OTYPCNT - 1); i++) { 9365 if (un->un_ocmap.regopen[i] & (partmask)) { 9366 goto excl_open_fail; 9367 } 9368 } 9369 } 9370 9371 /* 9372 * Check the write permission if this is a removable media device, 9373 * NDELAY has not been set, and writable permission is requested. 9374 * 9375 * Note: If NDELAY was set and this is write-protected media the WRITE 9376 * attempt will fail with EIO as part of the I/O processing. This is a 9377 * more permissive implementation that allows the open to succeed and 9378 * WRITE attempts to fail when appropriate. 9379 */ 9380 if (un->un_f_chk_wp_open) { 9381 if ((flag & FWRITE) && (!nodelay)) { 9382 mutex_exit(SD_MUTEX(un)); 9383 /* 9384 * Defer the check for write permission on writable 9385 * DVD drive till sdstrategy and will not fail open even 9386 * if FWRITE is set as the device can be writable 9387 * depending upon the media and the media can change 9388 * after the call to open(). 9389 */ 9390 if (un->un_f_dvdram_writable_device == FALSE) { 9391 if (ISCD(un) || sr_check_wp(dev)) { 9392 rval = EROFS; 9393 mutex_enter(SD_MUTEX(un)); 9394 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9395 "write to cd or write protected media\n"); 9396 goto open_fail; 9397 } 9398 } 9399 mutex_enter(SD_MUTEX(un)); 9400 } 9401 } 9402 9403 /* 9404 * If opening in NDELAY/NONBLOCK mode, just return. 9405 * Check if disk is ready and has a valid geometry later. 9406 */ 9407 if (!nodelay) { 9408 mutex_exit(SD_MUTEX(un)); 9409 rval = sd_ready_and_valid(un); 9410 mutex_enter(SD_MUTEX(un)); 9411 /* 9412 * Fail if device is not ready or if the number of disk 9413 * blocks is zero or negative for non CD devices. 9414 */ 9415 9416 nblks = 0; 9417 9418 if (rval == SD_READY_VALID && (!ISCD(un))) { 9419 /* if cmlb_partinfo fails, nblks remains 0 */ 9420 mutex_exit(SD_MUTEX(un)); 9421 (void) cmlb_partinfo(un->un_cmlbhandle, part, &nblks, 9422 NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 9423 mutex_enter(SD_MUTEX(un)); 9424 } 9425 9426 if ((rval != SD_READY_VALID) || 9427 (!ISCD(un) && nblks <= 0)) { 9428 rval = un->un_f_has_removable_media ? ENXIO : EIO; 9429 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9430 "device not ready or invalid disk block value\n"); 9431 goto open_fail; 9432 } 9433 #if defined(__i386) || defined(__amd64) 9434 } else { 9435 uchar_t *cp; 9436 /* 9437 * x86 requires special nodelay handling, so that p0 is 9438 * always defined and accessible. 9439 * Invalidate geometry only if device is not already open. 9440 */ 9441 cp = &un->un_ocmap.chkd[0]; 9442 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9443 if (*cp != (uchar_t)0) { 9444 break; 9445 } 9446 cp++; 9447 } 9448 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9449 mutex_exit(SD_MUTEX(un)); 9450 cmlb_invalidate(un->un_cmlbhandle, 9451 (void *)SD_PATH_DIRECT); 9452 mutex_enter(SD_MUTEX(un)); 9453 } 9454 9455 #endif 9456 } 9457 9458 if (otyp == OTYP_LYR) { 9459 un->un_ocmap.lyropen[part]++; 9460 } else { 9461 un->un_ocmap.regopen[otyp] |= partmask; 9462 } 9463 9464 /* Set up open and exclusive open flags */ 9465 if (flag & FEXCL) { 9466 un->un_exclopen |= (partmask); 9467 } 9468 9469 /* 9470 * If the lun is EFI labeled and lun capacity is greater than the 9471 * capacity contained in the label, log a sys-event to notify the 9472 * interested module. 9473 * To avoid an infinite loop of logging sys-event, we only log the 9474 * event when the lun is not opened in NDELAY mode. The event handler 9475 * should open the lun in NDELAY mode. 9476 */ 9477 if (!(flag & FNDELAY)) { 9478 mutex_exit(SD_MUTEX(un)); 9479 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 9480 (void*)SD_PATH_DIRECT) == 0) { 9481 mutex_enter(SD_MUTEX(un)); 9482 if (un->un_f_blockcount_is_valid && 9483 un->un_blockcount > label_cap) { 9484 mutex_exit(SD_MUTEX(un)); 9485 sd_log_lun_expansion_event(un, 9486 (nodelay ? KM_NOSLEEP : KM_SLEEP)); 9487 mutex_enter(SD_MUTEX(un)); 9488 } 9489 } else { 9490 mutex_enter(SD_MUTEX(un)); 9491 } 9492 } 9493 9494 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9495 "open of part %d type %d\n", part, otyp); 9496 9497 mutex_exit(SD_MUTEX(un)); 9498 if (!nodelay) { 9499 sd_pm_exit(un); 9500 } 9501 9502 sema_v(&un->un_semoclose); 9503 9504 mutex_enter(&sd_detach_mutex); 9505 un->un_opens_in_progress--; 9506 mutex_exit(&sd_detach_mutex); 9507 9508 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n"); 9509 return (DDI_SUCCESS); 9510 9511 excl_open_fail: 9512 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n"); 9513 rval = EBUSY; 9514 9515 open_fail: 9516 mutex_exit(SD_MUTEX(un)); 9517 9518 /* 9519 * On a failed open we must exit the pm management. 9520 */ 9521 if (!nodelay) { 9522 sd_pm_exit(un); 9523 } 9524 open_failed_with_pm: 9525 sema_v(&un->un_semoclose); 9526 9527 mutex_enter(&sd_detach_mutex); 9528 un->un_opens_in_progress--; 9529 if (otyp == OTYP_LYR) { 9530 un->un_layer_count--; 9531 } 9532 mutex_exit(&sd_detach_mutex); 9533 9534 return (rval); 9535 } 9536 9537 9538 /* 9539 * Function: sdclose 9540 * 9541 * Description: Driver's close(9e) entry point function. 9542 * 9543 * Arguments: dev - device number 9544 * flag - file status flag, informational only 9545 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9546 * cred_p - user credential pointer 9547 * 9548 * Return Code: ENXIO 9549 * 9550 * Context: Kernel thread context 9551 */ 9552 /* ARGSUSED */ 9553 static int 9554 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 9555 { 9556 struct sd_lun *un; 9557 uchar_t *cp; 9558 int part; 9559 int nodelay; 9560 int rval = 0; 9561 9562 /* Validate the open type */ 9563 if (otyp >= OTYPCNT) { 9564 return (ENXIO); 9565 } 9566 9567 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9568 return (ENXIO); 9569 } 9570 9571 part = SDPART(dev); 9572 nodelay = flag & (FNDELAY | FNONBLOCK); 9573 9574 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9575 "sdclose: close of part %d type %d\n", part, otyp); 9576 9577 /* 9578 * We use a semaphore here in order to serialize 9579 * open and close requests on the device. 9580 */ 9581 sema_p(&un->un_semoclose); 9582 9583 mutex_enter(SD_MUTEX(un)); 9584 9585 /* Don't proceed if power is being changed. */ 9586 while (un->un_state == SD_STATE_PM_CHANGING) { 9587 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9588 } 9589 9590 if (un->un_exclopen & (1 << part)) { 9591 un->un_exclopen &= ~(1 << part); 9592 } 9593 9594 /* Update the open partition map */ 9595 if (otyp == OTYP_LYR) { 9596 un->un_ocmap.lyropen[part] -= 1; 9597 } else { 9598 un->un_ocmap.regopen[otyp] &= ~(1 << part); 9599 } 9600 9601 cp = &un->un_ocmap.chkd[0]; 9602 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9603 if (*cp != NULL) { 9604 break; 9605 } 9606 cp++; 9607 } 9608 9609 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9610 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n"); 9611 9612 /* 9613 * We avoid persistance upon the last close, and set 9614 * the throttle back to the maximum. 9615 */ 9616 un->un_throttle = un->un_saved_throttle; 9617 9618 if (un->un_state == SD_STATE_OFFLINE) { 9619 if (un->un_f_is_fibre == FALSE) { 9620 scsi_log(SD_DEVINFO(un), sd_label, 9621 CE_WARN, "offline\n"); 9622 } 9623 mutex_exit(SD_MUTEX(un)); 9624 cmlb_invalidate(un->un_cmlbhandle, 9625 (void *)SD_PATH_DIRECT); 9626 mutex_enter(SD_MUTEX(un)); 9627 9628 } else { 9629 /* 9630 * Flush any outstanding writes in NVRAM cache. 9631 * Note: SYNCHRONIZE CACHE is an optional SCSI-2 9632 * cmd, it may not work for non-Pluto devices. 9633 * SYNCHRONIZE CACHE is not required for removables, 9634 * except DVD-RAM drives. 9635 * 9636 * Also note: because SYNCHRONIZE CACHE is currently 9637 * the only command issued here that requires the 9638 * drive be powered up, only do the power up before 9639 * sending the Sync Cache command. If additional 9640 * commands are added which require a powered up 9641 * drive, the following sequence may have to change. 9642 * 9643 * And finally, note that parallel SCSI on SPARC 9644 * only issues a Sync Cache to DVD-RAM, a newly 9645 * supported device. 9646 */ 9647 #if defined(__i386) || defined(__amd64) 9648 if (un->un_f_sync_cache_supported || 9649 un->un_f_dvdram_writable_device == TRUE) { 9650 #else 9651 if (un->un_f_dvdram_writable_device == TRUE) { 9652 #endif 9653 mutex_exit(SD_MUTEX(un)); 9654 if (sd_pm_entry(un) == DDI_SUCCESS) { 9655 rval = 9656 sd_send_scsi_SYNCHRONIZE_CACHE(un, 9657 NULL); 9658 /* ignore error if not supported */ 9659 if (rval == ENOTSUP) { 9660 rval = 0; 9661 } else if (rval != 0) { 9662 rval = EIO; 9663 } 9664 sd_pm_exit(un); 9665 } else { 9666 rval = EIO; 9667 } 9668 mutex_enter(SD_MUTEX(un)); 9669 } 9670 9671 /* 9672 * For devices which supports DOOR_LOCK, send an ALLOW 9673 * MEDIA REMOVAL command, but don't get upset if it 9674 * fails. We need to raise the power of the drive before 9675 * we can call sd_send_scsi_DOORLOCK() 9676 */ 9677 if (un->un_f_doorlock_supported) { 9678 mutex_exit(SD_MUTEX(un)); 9679 if (sd_pm_entry(un) == DDI_SUCCESS) { 9680 rval = sd_send_scsi_DOORLOCK(un, 9681 SD_REMOVAL_ALLOW, SD_PATH_DIRECT); 9682 9683 sd_pm_exit(un); 9684 if (ISCD(un) && (rval != 0) && 9685 (nodelay != 0)) { 9686 rval = ENXIO; 9687 } 9688 } else { 9689 rval = EIO; 9690 } 9691 mutex_enter(SD_MUTEX(un)); 9692 } 9693 9694 /* 9695 * If a device has removable media, invalidate all 9696 * parameters related to media, such as geometry, 9697 * blocksize, and blockcount. 9698 */ 9699 if (un->un_f_has_removable_media) { 9700 sr_ejected(un); 9701 } 9702 9703 /* 9704 * Destroy the cache (if it exists) which was 9705 * allocated for the write maps since this is 9706 * the last close for this media. 9707 */ 9708 if (un->un_wm_cache) { 9709 /* 9710 * Check if there are pending commands. 9711 * and if there are give a warning and 9712 * do not destroy the cache. 9713 */ 9714 if (un->un_ncmds_in_driver > 0) { 9715 scsi_log(SD_DEVINFO(un), 9716 sd_label, CE_WARN, 9717 "Unable to clean up memory " 9718 "because of pending I/O\n"); 9719 } else { 9720 kmem_cache_destroy( 9721 un->un_wm_cache); 9722 un->un_wm_cache = NULL; 9723 } 9724 } 9725 } 9726 } 9727 9728 mutex_exit(SD_MUTEX(un)); 9729 sema_v(&un->un_semoclose); 9730 9731 if (otyp == OTYP_LYR) { 9732 mutex_enter(&sd_detach_mutex); 9733 /* 9734 * The detach routine may run when the layer count 9735 * drops to zero. 9736 */ 9737 un->un_layer_count--; 9738 mutex_exit(&sd_detach_mutex); 9739 } 9740 9741 return (rval); 9742 } 9743 9744 9745 /* 9746 * Function: sd_ready_and_valid 9747 * 9748 * Description: Test if device is ready and has a valid geometry. 9749 * 9750 * Arguments: dev - device number 9751 * un - driver soft state (unit) structure 9752 * 9753 * Return Code: SD_READY_VALID ready and valid label 9754 * SD_NOT_READY_VALID not ready, no label 9755 * SD_RESERVED_BY_OTHERS reservation conflict 9756 * 9757 * Context: Never called at interrupt context. 9758 */ 9759 9760 static int 9761 sd_ready_and_valid(struct sd_lun *un) 9762 { 9763 struct sd_errstats *stp; 9764 uint64_t capacity; 9765 uint_t lbasize; 9766 int rval = SD_READY_VALID; 9767 char name_str[48]; 9768 int is_valid; 9769 9770 ASSERT(un != NULL); 9771 ASSERT(!mutex_owned(SD_MUTEX(un))); 9772 9773 mutex_enter(SD_MUTEX(un)); 9774 /* 9775 * If a device has removable media, we must check if media is 9776 * ready when checking if this device is ready and valid. 9777 */ 9778 if (un->un_f_has_removable_media) { 9779 mutex_exit(SD_MUTEX(un)); 9780 if (sd_send_scsi_TEST_UNIT_READY(un, 0) != 0) { 9781 rval = SD_NOT_READY_VALID; 9782 mutex_enter(SD_MUTEX(un)); 9783 goto done; 9784 } 9785 9786 is_valid = SD_IS_VALID_LABEL(un); 9787 mutex_enter(SD_MUTEX(un)); 9788 if (!is_valid || 9789 (un->un_f_blockcount_is_valid == FALSE) || 9790 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 9791 9792 /* capacity has to be read every open. */ 9793 mutex_exit(SD_MUTEX(un)); 9794 if (sd_send_scsi_READ_CAPACITY(un, &capacity, 9795 &lbasize, SD_PATH_DIRECT) != 0) { 9796 cmlb_invalidate(un->un_cmlbhandle, 9797 (void *)SD_PATH_DIRECT); 9798 mutex_enter(SD_MUTEX(un)); 9799 rval = SD_NOT_READY_VALID; 9800 goto done; 9801 } else { 9802 mutex_enter(SD_MUTEX(un)); 9803 sd_update_block_info(un, lbasize, capacity); 9804 } 9805 } 9806 9807 /* 9808 * Check if the media in the device is writable or not. 9809 */ 9810 if (!is_valid && ISCD(un)) { 9811 sd_check_for_writable_cd(un, SD_PATH_DIRECT); 9812 } 9813 9814 } else { 9815 /* 9816 * Do a test unit ready to clear any unit attention from non-cd 9817 * devices. 9818 */ 9819 mutex_exit(SD_MUTEX(un)); 9820 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 9821 mutex_enter(SD_MUTEX(un)); 9822 } 9823 9824 9825 /* 9826 * If this is a non 512 block device, allocate space for 9827 * the wmap cache. This is being done here since every time 9828 * a media is changed this routine will be called and the 9829 * block size is a function of media rather than device. 9830 */ 9831 if (un->un_f_non_devbsize_supported && NOT_DEVBSIZE(un)) { 9832 if (!(un->un_wm_cache)) { 9833 (void) snprintf(name_str, sizeof (name_str), 9834 "%s%d_cache", 9835 ddi_driver_name(SD_DEVINFO(un)), 9836 ddi_get_instance(SD_DEVINFO(un))); 9837 un->un_wm_cache = kmem_cache_create( 9838 name_str, sizeof (struct sd_w_map), 9839 8, sd_wm_cache_constructor, 9840 sd_wm_cache_destructor, NULL, 9841 (void *)un, NULL, 0); 9842 if (!(un->un_wm_cache)) { 9843 rval = ENOMEM; 9844 goto done; 9845 } 9846 } 9847 } 9848 9849 if (un->un_state == SD_STATE_NORMAL) { 9850 /* 9851 * If the target is not yet ready here (defined by a TUR 9852 * failure), invalidate the geometry and print an 'offline' 9853 * message. This is a legacy message, as the state of the 9854 * target is not actually changed to SD_STATE_OFFLINE. 9855 * 9856 * If the TUR fails for EACCES (Reservation Conflict), 9857 * SD_RESERVED_BY_OTHERS will be returned to indicate 9858 * reservation conflict. If the TUR fails for other 9859 * reasons, SD_NOT_READY_VALID will be returned. 9860 */ 9861 int err; 9862 9863 mutex_exit(SD_MUTEX(un)); 9864 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 9865 mutex_enter(SD_MUTEX(un)); 9866 9867 if (err != 0) { 9868 mutex_exit(SD_MUTEX(un)); 9869 cmlb_invalidate(un->un_cmlbhandle, 9870 (void *)SD_PATH_DIRECT); 9871 mutex_enter(SD_MUTEX(un)); 9872 if (err == EACCES) { 9873 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 9874 "reservation conflict\n"); 9875 rval = SD_RESERVED_BY_OTHERS; 9876 } else { 9877 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 9878 "drive offline\n"); 9879 rval = SD_NOT_READY_VALID; 9880 } 9881 goto done; 9882 } 9883 } 9884 9885 if (un->un_f_format_in_progress == FALSE) { 9886 mutex_exit(SD_MUTEX(un)); 9887 if (cmlb_validate(un->un_cmlbhandle, 0, 9888 (void *)SD_PATH_DIRECT) != 0) { 9889 rval = SD_NOT_READY_VALID; 9890 mutex_enter(SD_MUTEX(un)); 9891 goto done; 9892 } 9893 if (un->un_f_pkstats_enabled) { 9894 sd_set_pstats(un); 9895 SD_TRACE(SD_LOG_IO_PARTITION, un, 9896 "sd_ready_and_valid: un:0x%p pstats created and " 9897 "set\n", un); 9898 } 9899 mutex_enter(SD_MUTEX(un)); 9900 } 9901 9902 /* 9903 * If this device supports DOOR_LOCK command, try and send 9904 * this command to PREVENT MEDIA REMOVAL, but don't get upset 9905 * if it fails. For a CD, however, it is an error 9906 */ 9907 if (un->un_f_doorlock_supported) { 9908 mutex_exit(SD_MUTEX(un)); 9909 if ((sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 9910 SD_PATH_DIRECT) != 0) && ISCD(un)) { 9911 rval = SD_NOT_READY_VALID; 9912 mutex_enter(SD_MUTEX(un)); 9913 goto done; 9914 } 9915 mutex_enter(SD_MUTEX(un)); 9916 } 9917 9918 /* The state has changed, inform the media watch routines */ 9919 un->un_mediastate = DKIO_INSERTED; 9920 cv_broadcast(&un->un_state_cv); 9921 rval = SD_READY_VALID; 9922 9923 done: 9924 9925 /* 9926 * Initialize the capacity kstat value, if no media previously 9927 * (capacity kstat is 0) and a media has been inserted 9928 * (un_blockcount > 0). 9929 */ 9930 if (un->un_errstats != NULL) { 9931 stp = (struct sd_errstats *)un->un_errstats->ks_data; 9932 if ((stp->sd_capacity.value.ui64 == 0) && 9933 (un->un_f_blockcount_is_valid == TRUE)) { 9934 stp->sd_capacity.value.ui64 = 9935 (uint64_t)((uint64_t)un->un_blockcount * 9936 un->un_sys_blocksize); 9937 } 9938 } 9939 9940 mutex_exit(SD_MUTEX(un)); 9941 return (rval); 9942 } 9943 9944 9945 /* 9946 * Function: sdmin 9947 * 9948 * Description: Routine to limit the size of a data transfer. Used in 9949 * conjunction with physio(9F). 9950 * 9951 * Arguments: bp - pointer to the indicated buf(9S) struct. 9952 * 9953 * Context: Kernel thread context. 9954 */ 9955 9956 static void 9957 sdmin(struct buf *bp) 9958 { 9959 struct sd_lun *un; 9960 int instance; 9961 9962 instance = SDUNIT(bp->b_edev); 9963 9964 un = ddi_get_soft_state(sd_state, instance); 9965 ASSERT(un != NULL); 9966 9967 if (bp->b_bcount > un->un_max_xfer_size) { 9968 bp->b_bcount = un->un_max_xfer_size; 9969 } 9970 } 9971 9972 9973 /* 9974 * Function: sdread 9975 * 9976 * Description: Driver's read(9e) entry point function. 9977 * 9978 * Arguments: dev - device number 9979 * uio - structure pointer describing where data is to be stored 9980 * in user's space 9981 * cred_p - user credential pointer 9982 * 9983 * Return Code: ENXIO 9984 * EIO 9985 * EINVAL 9986 * value returned by physio 9987 * 9988 * Context: Kernel thread context. 9989 */ 9990 /* ARGSUSED */ 9991 static int 9992 sdread(dev_t dev, struct uio *uio, cred_t *cred_p) 9993 { 9994 struct sd_lun *un = NULL; 9995 int secmask; 9996 int err; 9997 9998 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9999 return (ENXIO); 10000 } 10001 10002 ASSERT(!mutex_owned(SD_MUTEX(un))); 10003 10004 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10005 mutex_enter(SD_MUTEX(un)); 10006 /* 10007 * Because the call to sd_ready_and_valid will issue I/O we 10008 * must wait here if either the device is suspended or 10009 * if it's power level is changing. 10010 */ 10011 while ((un->un_state == SD_STATE_SUSPENDED) || 10012 (un->un_state == SD_STATE_PM_CHANGING)) { 10013 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10014 } 10015 un->un_ncmds_in_driver++; 10016 mutex_exit(SD_MUTEX(un)); 10017 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 10018 mutex_enter(SD_MUTEX(un)); 10019 un->un_ncmds_in_driver--; 10020 ASSERT(un->un_ncmds_in_driver >= 0); 10021 mutex_exit(SD_MUTEX(un)); 10022 return (EIO); 10023 } 10024 mutex_enter(SD_MUTEX(un)); 10025 un->un_ncmds_in_driver--; 10026 ASSERT(un->un_ncmds_in_driver >= 0); 10027 mutex_exit(SD_MUTEX(un)); 10028 } 10029 10030 /* 10031 * Read requests are restricted to multiples of the system block size. 10032 */ 10033 secmask = un->un_sys_blocksize - 1; 10034 10035 if (uio->uio_loffset & ((offset_t)(secmask))) { 10036 SD_ERROR(SD_LOG_READ_WRITE, un, 10037 "sdread: file offset not modulo %d\n", 10038 un->un_sys_blocksize); 10039 err = EINVAL; 10040 } else if (uio->uio_iov->iov_len & (secmask)) { 10041 SD_ERROR(SD_LOG_READ_WRITE, un, 10042 "sdread: transfer length not modulo %d\n", 10043 un->un_sys_blocksize); 10044 err = EINVAL; 10045 } else { 10046 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio); 10047 } 10048 return (err); 10049 } 10050 10051 10052 /* 10053 * Function: sdwrite 10054 * 10055 * Description: Driver's write(9e) entry point function. 10056 * 10057 * Arguments: dev - device number 10058 * uio - structure pointer describing where data is stored in 10059 * user's space 10060 * cred_p - user credential pointer 10061 * 10062 * Return Code: ENXIO 10063 * EIO 10064 * EINVAL 10065 * value returned by physio 10066 * 10067 * Context: Kernel thread context. 10068 */ 10069 /* ARGSUSED */ 10070 static int 10071 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 10072 { 10073 struct sd_lun *un = NULL; 10074 int secmask; 10075 int err; 10076 10077 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10078 return (ENXIO); 10079 } 10080 10081 ASSERT(!mutex_owned(SD_MUTEX(un))); 10082 10083 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10084 mutex_enter(SD_MUTEX(un)); 10085 /* 10086 * Because the call to sd_ready_and_valid will issue I/O we 10087 * must wait here if either the device is suspended or 10088 * if it's power level is changing. 10089 */ 10090 while ((un->un_state == SD_STATE_SUSPENDED) || 10091 (un->un_state == SD_STATE_PM_CHANGING)) { 10092 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10093 } 10094 un->un_ncmds_in_driver++; 10095 mutex_exit(SD_MUTEX(un)); 10096 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 10097 mutex_enter(SD_MUTEX(un)); 10098 un->un_ncmds_in_driver--; 10099 ASSERT(un->un_ncmds_in_driver >= 0); 10100 mutex_exit(SD_MUTEX(un)); 10101 return (EIO); 10102 } 10103 mutex_enter(SD_MUTEX(un)); 10104 un->un_ncmds_in_driver--; 10105 ASSERT(un->un_ncmds_in_driver >= 0); 10106 mutex_exit(SD_MUTEX(un)); 10107 } 10108 10109 /* 10110 * Write requests are restricted to multiples of the system block size. 10111 */ 10112 secmask = un->un_sys_blocksize - 1; 10113 10114 if (uio->uio_loffset & ((offset_t)(secmask))) { 10115 SD_ERROR(SD_LOG_READ_WRITE, un, 10116 "sdwrite: file offset not modulo %d\n", 10117 un->un_sys_blocksize); 10118 err = EINVAL; 10119 } else if (uio->uio_iov->iov_len & (secmask)) { 10120 SD_ERROR(SD_LOG_READ_WRITE, un, 10121 "sdwrite: transfer length not modulo %d\n", 10122 un->un_sys_blocksize); 10123 err = EINVAL; 10124 } else { 10125 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio); 10126 } 10127 return (err); 10128 } 10129 10130 10131 /* 10132 * Function: sdaread 10133 * 10134 * Description: Driver's aread(9e) entry point function. 10135 * 10136 * Arguments: dev - device number 10137 * aio - structure pointer describing where data is to be stored 10138 * cred_p - user credential pointer 10139 * 10140 * Return Code: ENXIO 10141 * EIO 10142 * EINVAL 10143 * value returned by aphysio 10144 * 10145 * Context: Kernel thread context. 10146 */ 10147 /* ARGSUSED */ 10148 static int 10149 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10150 { 10151 struct sd_lun *un = NULL; 10152 struct uio *uio = aio->aio_uio; 10153 int secmask; 10154 int err; 10155 10156 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10157 return (ENXIO); 10158 } 10159 10160 ASSERT(!mutex_owned(SD_MUTEX(un))); 10161 10162 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10163 mutex_enter(SD_MUTEX(un)); 10164 /* 10165 * Because the call to sd_ready_and_valid will issue I/O we 10166 * must wait here if either the device is suspended or 10167 * if it's power level is changing. 10168 */ 10169 while ((un->un_state == SD_STATE_SUSPENDED) || 10170 (un->un_state == SD_STATE_PM_CHANGING)) { 10171 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10172 } 10173 un->un_ncmds_in_driver++; 10174 mutex_exit(SD_MUTEX(un)); 10175 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 10176 mutex_enter(SD_MUTEX(un)); 10177 un->un_ncmds_in_driver--; 10178 ASSERT(un->un_ncmds_in_driver >= 0); 10179 mutex_exit(SD_MUTEX(un)); 10180 return (EIO); 10181 } 10182 mutex_enter(SD_MUTEX(un)); 10183 un->un_ncmds_in_driver--; 10184 ASSERT(un->un_ncmds_in_driver >= 0); 10185 mutex_exit(SD_MUTEX(un)); 10186 } 10187 10188 /* 10189 * Read requests are restricted to multiples of the system block size. 10190 */ 10191 secmask = un->un_sys_blocksize - 1; 10192 10193 if (uio->uio_loffset & ((offset_t)(secmask))) { 10194 SD_ERROR(SD_LOG_READ_WRITE, un, 10195 "sdaread: file offset not modulo %d\n", 10196 un->un_sys_blocksize); 10197 err = EINVAL; 10198 } else if (uio->uio_iov->iov_len & (secmask)) { 10199 SD_ERROR(SD_LOG_READ_WRITE, un, 10200 "sdaread: transfer length not modulo %d\n", 10201 un->un_sys_blocksize); 10202 err = EINVAL; 10203 } else { 10204 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio); 10205 } 10206 return (err); 10207 } 10208 10209 10210 /* 10211 * Function: sdawrite 10212 * 10213 * Description: Driver's awrite(9e) entry point function. 10214 * 10215 * Arguments: dev - device number 10216 * aio - structure pointer describing where data is stored 10217 * cred_p - user credential pointer 10218 * 10219 * Return Code: ENXIO 10220 * EIO 10221 * EINVAL 10222 * value returned by aphysio 10223 * 10224 * Context: Kernel thread context. 10225 */ 10226 /* ARGSUSED */ 10227 static int 10228 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10229 { 10230 struct sd_lun *un = NULL; 10231 struct uio *uio = aio->aio_uio; 10232 int secmask; 10233 int err; 10234 10235 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10236 return (ENXIO); 10237 } 10238 10239 ASSERT(!mutex_owned(SD_MUTEX(un))); 10240 10241 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10242 mutex_enter(SD_MUTEX(un)); 10243 /* 10244 * Because the call to sd_ready_and_valid will issue I/O we 10245 * must wait here if either the device is suspended or 10246 * if it's power level is changing. 10247 */ 10248 while ((un->un_state == SD_STATE_SUSPENDED) || 10249 (un->un_state == SD_STATE_PM_CHANGING)) { 10250 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10251 } 10252 un->un_ncmds_in_driver++; 10253 mutex_exit(SD_MUTEX(un)); 10254 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 10255 mutex_enter(SD_MUTEX(un)); 10256 un->un_ncmds_in_driver--; 10257 ASSERT(un->un_ncmds_in_driver >= 0); 10258 mutex_exit(SD_MUTEX(un)); 10259 return (EIO); 10260 } 10261 mutex_enter(SD_MUTEX(un)); 10262 un->un_ncmds_in_driver--; 10263 ASSERT(un->un_ncmds_in_driver >= 0); 10264 mutex_exit(SD_MUTEX(un)); 10265 } 10266 10267 /* 10268 * Write requests are restricted to multiples of the system block size. 10269 */ 10270 secmask = un->un_sys_blocksize - 1; 10271 10272 if (uio->uio_loffset & ((offset_t)(secmask))) { 10273 SD_ERROR(SD_LOG_READ_WRITE, un, 10274 "sdawrite: file offset not modulo %d\n", 10275 un->un_sys_blocksize); 10276 err = EINVAL; 10277 } else if (uio->uio_iov->iov_len & (secmask)) { 10278 SD_ERROR(SD_LOG_READ_WRITE, un, 10279 "sdawrite: transfer length not modulo %d\n", 10280 un->un_sys_blocksize); 10281 err = EINVAL; 10282 } else { 10283 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio); 10284 } 10285 return (err); 10286 } 10287 10288 10289 10290 10291 10292 /* 10293 * Driver IO processing follows the following sequence: 10294 * 10295 * sdioctl(9E) sdstrategy(9E) biodone(9F) 10296 * | | ^ 10297 * v v | 10298 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+ 10299 * | | | | 10300 * v | | | 10301 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone() 10302 * | | ^ ^ 10303 * v v | | 10304 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | | 10305 * | | | | 10306 * +---+ | +------------+ +-------+ 10307 * | | | | 10308 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10309 * | v | | 10310 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() | 10311 * | | ^ | 10312 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10313 * | v | | 10314 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() | 10315 * | | ^ | 10316 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10317 * | v | | 10318 * | sd_checksum_iostart() sd_checksum_iodone() | 10319 * | | ^ | 10320 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+ 10321 * | v | | 10322 * | sd_pm_iostart() sd_pm_iodone() | 10323 * | | ^ | 10324 * | | | | 10325 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+ 10326 * | ^ 10327 * v | 10328 * sd_core_iostart() | 10329 * | | 10330 * | +------>(*destroypkt)() 10331 * +-> sd_start_cmds() <-+ | | 10332 * | | | v 10333 * | | | scsi_destroy_pkt(9F) 10334 * | | | 10335 * +->(*initpkt)() +- sdintr() 10336 * | | | | 10337 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx() 10338 * | +-> scsi_setup_cdb(9F) | 10339 * | | 10340 * +--> scsi_transport(9F) | 10341 * | | 10342 * +----> SCSA ---->+ 10343 * 10344 * 10345 * This code is based upon the following presumptions: 10346 * 10347 * - iostart and iodone functions operate on buf(9S) structures. These 10348 * functions perform the necessary operations on the buf(9S) and pass 10349 * them along to the next function in the chain by using the macros 10350 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE() 10351 * (for iodone side functions). 10352 * 10353 * - The iostart side functions may sleep. The iodone side functions 10354 * are called under interrupt context and may NOT sleep. Therefore 10355 * iodone side functions also may not call iostart side functions. 10356 * (NOTE: iostart side functions should NOT sleep for memory, as 10357 * this could result in deadlock.) 10358 * 10359 * - An iostart side function may call its corresponding iodone side 10360 * function directly (if necessary). 10361 * 10362 * - In the event of an error, an iostart side function can return a buf(9S) 10363 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and 10364 * b_error in the usual way of course). 10365 * 10366 * - The taskq mechanism may be used by the iodone side functions to dispatch 10367 * requests to the iostart side functions. The iostart side functions in 10368 * this case would be called under the context of a taskq thread, so it's 10369 * OK for them to block/sleep/spin in this case. 10370 * 10371 * - iostart side functions may allocate "shadow" buf(9S) structs and 10372 * pass them along to the next function in the chain. The corresponding 10373 * iodone side functions must coalesce the "shadow" bufs and return 10374 * the "original" buf to the next higher layer. 10375 * 10376 * - The b_private field of the buf(9S) struct holds a pointer to 10377 * an sd_xbuf struct, which contains information needed to 10378 * construct the scsi_pkt for the command. 10379 * 10380 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each 10381 * layer must acquire & release the SD_MUTEX(un) as needed. 10382 */ 10383 10384 10385 /* 10386 * Create taskq for all targets in the system. This is created at 10387 * _init(9E) and destroyed at _fini(9E). 10388 * 10389 * Note: here we set the minalloc to a reasonably high number to ensure that 10390 * we will have an adequate supply of task entries available at interrupt time. 10391 * This is used in conjunction with the TASKQ_PREPOPULATE flag in 10392 * sd_create_taskq(). Since we do not want to sleep for allocations at 10393 * interrupt time, set maxalloc equal to minalloc. That way we will just fail 10394 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq 10395 * requests any one instant in time. 10396 */ 10397 #define SD_TASKQ_NUMTHREADS 8 10398 #define SD_TASKQ_MINALLOC 256 10399 #define SD_TASKQ_MAXALLOC 256 10400 10401 static taskq_t *sd_tq = NULL; 10402 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq)) 10403 10404 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; 10405 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; 10406 10407 /* 10408 * The following task queue is being created for the write part of 10409 * read-modify-write of non-512 block size devices. 10410 * Limit the number of threads to 1 for now. This number has been chosen 10411 * considering the fact that it applies only to dvd ram drives/MO drives 10412 * currently. Performance for which is not main criteria at this stage. 10413 * Note: It needs to be explored if we can use a single taskq in future 10414 */ 10415 #define SD_WMR_TASKQ_NUMTHREADS 1 10416 static taskq_t *sd_wmr_tq = NULL; 10417 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq)) 10418 10419 /* 10420 * Function: sd_taskq_create 10421 * 10422 * Description: Create taskq thread(s) and preallocate task entries 10423 * 10424 * Return Code: Returns a pointer to the allocated taskq_t. 10425 * 10426 * Context: Can sleep. Requires blockable context. 10427 * 10428 * Notes: - The taskq() facility currently is NOT part of the DDI. 10429 * (definitely NOT recommeded for 3rd-party drivers!) :-) 10430 * - taskq_create() will block for memory, also it will panic 10431 * if it cannot create the requested number of threads. 10432 * - Currently taskq_create() creates threads that cannot be 10433 * swapped. 10434 * - We use TASKQ_PREPOPULATE to ensure we have an adequate 10435 * supply of taskq entries at interrupt time (ie, so that we 10436 * do not have to sleep for memory) 10437 */ 10438 10439 static void 10440 sd_taskq_create(void) 10441 { 10442 char taskq_name[TASKQ_NAMELEN]; 10443 10444 ASSERT(sd_tq == NULL); 10445 ASSERT(sd_wmr_tq == NULL); 10446 10447 (void) snprintf(taskq_name, sizeof (taskq_name), 10448 "%s_drv_taskq", sd_label); 10449 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS, 10450 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 10451 TASKQ_PREPOPULATE)); 10452 10453 (void) snprintf(taskq_name, sizeof (taskq_name), 10454 "%s_rmw_taskq", sd_label); 10455 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS, 10456 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 10457 TASKQ_PREPOPULATE)); 10458 } 10459 10460 10461 /* 10462 * Function: sd_taskq_delete 10463 * 10464 * Description: Complementary cleanup routine for sd_taskq_create(). 10465 * 10466 * Context: Kernel thread context. 10467 */ 10468 10469 static void 10470 sd_taskq_delete(void) 10471 { 10472 ASSERT(sd_tq != NULL); 10473 ASSERT(sd_wmr_tq != NULL); 10474 taskq_destroy(sd_tq); 10475 taskq_destroy(sd_wmr_tq); 10476 sd_tq = NULL; 10477 sd_wmr_tq = NULL; 10478 } 10479 10480 10481 /* 10482 * Function: sdstrategy 10483 * 10484 * Description: Driver's strategy (9E) entry point function. 10485 * 10486 * Arguments: bp - pointer to buf(9S) 10487 * 10488 * Return Code: Always returns zero 10489 * 10490 * Context: Kernel thread context. 10491 */ 10492 10493 static int 10494 sdstrategy(struct buf *bp) 10495 { 10496 struct sd_lun *un; 10497 10498 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 10499 if (un == NULL) { 10500 bioerror(bp, EIO); 10501 bp->b_resid = bp->b_bcount; 10502 biodone(bp); 10503 return (0); 10504 } 10505 /* As was done in the past, fail new cmds. if state is dumping. */ 10506 if (un->un_state == SD_STATE_DUMPING) { 10507 bioerror(bp, ENXIO); 10508 bp->b_resid = bp->b_bcount; 10509 biodone(bp); 10510 return (0); 10511 } 10512 10513 ASSERT(!mutex_owned(SD_MUTEX(un))); 10514 10515 /* 10516 * Commands may sneak in while we released the mutex in 10517 * DDI_SUSPEND, we should block new commands. However, old 10518 * commands that are still in the driver at this point should 10519 * still be allowed to drain. 10520 */ 10521 mutex_enter(SD_MUTEX(un)); 10522 /* 10523 * Must wait here if either the device is suspended or 10524 * if it's power level is changing. 10525 */ 10526 while ((un->un_state == SD_STATE_SUSPENDED) || 10527 (un->un_state == SD_STATE_PM_CHANGING)) { 10528 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10529 } 10530 10531 un->un_ncmds_in_driver++; 10532 10533 /* 10534 * atapi: Since we are running the CD for now in PIO mode we need to 10535 * call bp_mapin here to avoid bp_mapin called interrupt context under 10536 * the HBA's init_pkt routine. 10537 */ 10538 if (un->un_f_cfg_is_atapi == TRUE) { 10539 mutex_exit(SD_MUTEX(un)); 10540 bp_mapin(bp); 10541 mutex_enter(SD_MUTEX(un)); 10542 } 10543 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n", 10544 un->un_ncmds_in_driver); 10545 10546 mutex_exit(SD_MUTEX(un)); 10547 10548 /* 10549 * This will (eventually) allocate the sd_xbuf area and 10550 * call sd_xbuf_strategy(). We just want to return the 10551 * result of ddi_xbuf_qstrategy so that we have an opt- 10552 * imized tail call which saves us a stack frame. 10553 */ 10554 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); 10555 } 10556 10557 10558 /* 10559 * Function: sd_xbuf_strategy 10560 * 10561 * Description: Function for initiating IO operations via the 10562 * ddi_xbuf_qstrategy() mechanism. 10563 * 10564 * Context: Kernel thread context. 10565 */ 10566 10567 static void 10568 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg) 10569 { 10570 struct sd_lun *un = arg; 10571 10572 ASSERT(bp != NULL); 10573 ASSERT(xp != NULL); 10574 ASSERT(un != NULL); 10575 ASSERT(!mutex_owned(SD_MUTEX(un))); 10576 10577 /* 10578 * Initialize the fields in the xbuf and save a pointer to the 10579 * xbuf in bp->b_private. 10580 */ 10581 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL); 10582 10583 /* Send the buf down the iostart chain */ 10584 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp); 10585 } 10586 10587 10588 /* 10589 * Function: sd_xbuf_init 10590 * 10591 * Description: Prepare the given sd_xbuf struct for use. 10592 * 10593 * Arguments: un - ptr to softstate 10594 * bp - ptr to associated buf(9S) 10595 * xp - ptr to associated sd_xbuf 10596 * chain_type - IO chain type to use: 10597 * SD_CHAIN_NULL 10598 * SD_CHAIN_BUFIO 10599 * SD_CHAIN_USCSI 10600 * SD_CHAIN_DIRECT 10601 * SD_CHAIN_DIRECT_PRIORITY 10602 * pktinfop - ptr to private data struct for scsi_pkt(9S) 10603 * initialization; may be NULL if none. 10604 * 10605 * Context: Kernel thread context 10606 */ 10607 10608 static void 10609 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 10610 uchar_t chain_type, void *pktinfop) 10611 { 10612 int index; 10613 10614 ASSERT(un != NULL); 10615 ASSERT(bp != NULL); 10616 ASSERT(xp != NULL); 10617 10618 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n", 10619 bp, chain_type); 10620 10621 xp->xb_un = un; 10622 xp->xb_pktp = NULL; 10623 xp->xb_pktinfo = pktinfop; 10624 xp->xb_private = bp->b_private; 10625 xp->xb_blkno = (daddr_t)bp->b_blkno; 10626 10627 /* 10628 * Set up the iostart and iodone chain indexes in the xbuf, based 10629 * upon the specified chain type to use. 10630 */ 10631 switch (chain_type) { 10632 case SD_CHAIN_NULL: 10633 /* 10634 * Fall thru to just use the values for the buf type, even 10635 * tho for the NULL chain these values will never be used. 10636 */ 10637 /* FALLTHRU */ 10638 case SD_CHAIN_BUFIO: 10639 index = un->un_buf_chain_type; 10640 break; 10641 case SD_CHAIN_USCSI: 10642 index = un->un_uscsi_chain_type; 10643 break; 10644 case SD_CHAIN_DIRECT: 10645 index = un->un_direct_chain_type; 10646 break; 10647 case SD_CHAIN_DIRECT_PRIORITY: 10648 index = un->un_priority_chain_type; 10649 break; 10650 default: 10651 /* We're really broken if we ever get here... */ 10652 panic("sd_xbuf_init: illegal chain type!"); 10653 /*NOTREACHED*/ 10654 } 10655 10656 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index; 10657 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index; 10658 10659 /* 10660 * It might be a bit easier to simply bzero the entire xbuf above, 10661 * but it turns out that since we init a fair number of members anyway, 10662 * we save a fair number cycles by doing explicit assignment of zero. 10663 */ 10664 xp->xb_pkt_flags = 0; 10665 xp->xb_dma_resid = 0; 10666 xp->xb_retry_count = 0; 10667 xp->xb_victim_retry_count = 0; 10668 xp->xb_ua_retry_count = 0; 10669 xp->xb_nr_retry_count = 0; 10670 xp->xb_sense_bp = NULL; 10671 xp->xb_sense_status = 0; 10672 xp->xb_sense_state = 0; 10673 xp->xb_sense_resid = 0; 10674 10675 bp->b_private = xp; 10676 bp->b_flags &= ~(B_DONE | B_ERROR); 10677 bp->b_resid = 0; 10678 bp->av_forw = NULL; 10679 bp->av_back = NULL; 10680 bioerror(bp, 0); 10681 10682 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n"); 10683 } 10684 10685 10686 /* 10687 * Function: sd_uscsi_strategy 10688 * 10689 * Description: Wrapper for calling into the USCSI chain via physio(9F) 10690 * 10691 * Arguments: bp - buf struct ptr 10692 * 10693 * Return Code: Always returns 0 10694 * 10695 * Context: Kernel thread context 10696 */ 10697 10698 static int 10699 sd_uscsi_strategy(struct buf *bp) 10700 { 10701 struct sd_lun *un; 10702 struct sd_uscsi_info *uip; 10703 struct sd_xbuf *xp; 10704 uchar_t chain_type; 10705 10706 ASSERT(bp != NULL); 10707 10708 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 10709 if (un == NULL) { 10710 bioerror(bp, EIO); 10711 bp->b_resid = bp->b_bcount; 10712 biodone(bp); 10713 return (0); 10714 } 10715 10716 ASSERT(!mutex_owned(SD_MUTEX(un))); 10717 10718 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp); 10719 10720 mutex_enter(SD_MUTEX(un)); 10721 /* 10722 * atapi: Since we are running the CD for now in PIO mode we need to 10723 * call bp_mapin here to avoid bp_mapin called interrupt context under 10724 * the HBA's init_pkt routine. 10725 */ 10726 if (un->un_f_cfg_is_atapi == TRUE) { 10727 mutex_exit(SD_MUTEX(un)); 10728 bp_mapin(bp); 10729 mutex_enter(SD_MUTEX(un)); 10730 } 10731 un->un_ncmds_in_driver++; 10732 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n", 10733 un->un_ncmds_in_driver); 10734 mutex_exit(SD_MUTEX(un)); 10735 10736 /* 10737 * A pointer to a struct sd_uscsi_info is expected in bp->b_private 10738 */ 10739 ASSERT(bp->b_private != NULL); 10740 uip = (struct sd_uscsi_info *)bp->b_private; 10741 10742 switch (uip->ui_flags) { 10743 case SD_PATH_DIRECT: 10744 chain_type = SD_CHAIN_DIRECT; 10745 break; 10746 case SD_PATH_DIRECT_PRIORITY: 10747 chain_type = SD_CHAIN_DIRECT_PRIORITY; 10748 break; 10749 default: 10750 chain_type = SD_CHAIN_USCSI; 10751 break; 10752 } 10753 10754 /* 10755 * We may allocate extra buf for external USCSI commands. If the 10756 * application asks for bigger than 20-byte sense data via USCSI, 10757 * SCSA layer will allocate 252 bytes sense buf for that command. 10758 */ 10759 if (((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_rqlen > 10760 SENSE_LENGTH) { 10761 xp = kmem_zalloc(sizeof (struct sd_xbuf) - SENSE_LENGTH + 10762 MAX_SENSE_LENGTH, KM_SLEEP); 10763 } else { 10764 xp = kmem_zalloc(sizeof (struct sd_xbuf), KM_SLEEP); 10765 } 10766 10767 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp); 10768 10769 /* Use the index obtained within xbuf_init */ 10770 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp); 10771 10772 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp); 10773 10774 return (0); 10775 } 10776 10777 /* 10778 * Function: sd_send_scsi_cmd 10779 * 10780 * Description: Runs a USCSI command for user (when called thru sdioctl), 10781 * or for the driver 10782 * 10783 * Arguments: dev - the dev_t for the device 10784 * incmd - ptr to a valid uscsi_cmd struct 10785 * flag - bit flag, indicating open settings, 32/64 bit type 10786 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 10787 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 10788 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 10789 * to use the USCSI "direct" chain and bypass the normal 10790 * command waitq. 10791 * 10792 * Return Code: 0 - successful completion of the given command 10793 * EIO - scsi_uscsi_handle_command() failed 10794 * ENXIO - soft state not found for specified dev 10795 * EINVAL 10796 * EFAULT - copyin/copyout error 10797 * return code of scsi_uscsi_handle_command(): 10798 * EIO 10799 * ENXIO 10800 * EACCES 10801 * 10802 * Context: Waits for command to complete. Can sleep. 10803 */ 10804 10805 static int 10806 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 10807 enum uio_seg dataspace, int path_flag) 10808 { 10809 struct sd_uscsi_info *uip; 10810 struct uscsi_cmd *uscmd; 10811 struct sd_lun *un; 10812 int format = 0; 10813 int rval; 10814 10815 un = ddi_get_soft_state(sd_state, SDUNIT(dev)); 10816 if (un == NULL) { 10817 return (ENXIO); 10818 } 10819 10820 ASSERT(!mutex_owned(SD_MUTEX(un))); 10821 10822 #ifdef SDDEBUG 10823 switch (dataspace) { 10824 case UIO_USERSPACE: 10825 SD_TRACE(SD_LOG_IO, un, 10826 "sd_send_scsi_cmd: entry: un:0x%p UIO_USERSPACE\n", un); 10827 break; 10828 case UIO_SYSSPACE: 10829 SD_TRACE(SD_LOG_IO, un, 10830 "sd_send_scsi_cmd: entry: un:0x%p UIO_SYSSPACE\n", un); 10831 break; 10832 default: 10833 SD_TRACE(SD_LOG_IO, un, 10834 "sd_send_scsi_cmd: entry: un:0x%p UNEXPECTED SPACE\n", un); 10835 break; 10836 } 10837 #endif 10838 10839 rval = scsi_uscsi_alloc_and_copyin((intptr_t)incmd, flag, 10840 SD_ADDRESS(un), &uscmd); 10841 if (rval != 0) { 10842 SD_TRACE(SD_LOG_IO, un, "sd_sense_scsi_cmd: " 10843 "scsi_uscsi_alloc_and_copyin failed\n", un); 10844 return (rval); 10845 } 10846 10847 if ((uscmd->uscsi_cdb != NULL) && 10848 (uscmd->uscsi_cdb[0] == SCMD_FORMAT)) { 10849 mutex_enter(SD_MUTEX(un)); 10850 un->un_f_format_in_progress = TRUE; 10851 mutex_exit(SD_MUTEX(un)); 10852 format = 1; 10853 } 10854 10855 /* 10856 * Allocate an sd_uscsi_info struct and fill it with the info 10857 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 10858 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 10859 * since we allocate the buf here in this function, we do not 10860 * need to preserve the prior contents of b_private. 10861 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 10862 */ 10863 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 10864 uip->ui_flags = path_flag; 10865 uip->ui_cmdp = uscmd; 10866 10867 /* 10868 * Commands sent with priority are intended for error recovery 10869 * situations, and do not have retries performed. 10870 */ 10871 if (path_flag == SD_PATH_DIRECT_PRIORITY) { 10872 uscmd->uscsi_flags |= USCSI_DIAGNOSE; 10873 } 10874 uscmd->uscsi_flags &= ~USCSI_NOINTR; 10875 10876 rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd, 10877 sd_uscsi_strategy, NULL, uip); 10878 10879 #ifdef SDDEBUG 10880 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 10881 "uscsi_status: 0x%02x uscsi_resid:0x%x\n", 10882 uscmd->uscsi_status, uscmd->uscsi_resid); 10883 if (uscmd->uscsi_bufaddr != NULL) { 10884 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 10885 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n", 10886 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen); 10887 if (dataspace == UIO_SYSSPACE) { 10888 SD_DUMP_MEMORY(un, SD_LOG_IO, 10889 "data", (uchar_t *)uscmd->uscsi_bufaddr, 10890 uscmd->uscsi_buflen, SD_LOG_HEX); 10891 } 10892 } 10893 #endif 10894 10895 if (format == 1) { 10896 mutex_enter(SD_MUTEX(un)); 10897 un->un_f_format_in_progress = FALSE; 10898 mutex_exit(SD_MUTEX(un)); 10899 } 10900 10901 (void) scsi_uscsi_copyout_and_free((intptr_t)incmd, uscmd); 10902 kmem_free(uip, sizeof (struct sd_uscsi_info)); 10903 10904 return (rval); 10905 } 10906 10907 10908 /* 10909 * Function: sd_buf_iodone 10910 * 10911 * Description: Frees the sd_xbuf & returns the buf to its originator. 10912 * 10913 * Context: May be called from interrupt context. 10914 */ 10915 /* ARGSUSED */ 10916 static void 10917 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp) 10918 { 10919 struct sd_xbuf *xp; 10920 10921 ASSERT(un != NULL); 10922 ASSERT(bp != NULL); 10923 ASSERT(!mutex_owned(SD_MUTEX(un))); 10924 10925 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n"); 10926 10927 xp = SD_GET_XBUF(bp); 10928 ASSERT(xp != NULL); 10929 10930 mutex_enter(SD_MUTEX(un)); 10931 10932 /* 10933 * Grab time when the cmd completed. 10934 * This is used for determining if the system has been 10935 * idle long enough to make it idle to the PM framework. 10936 * This is for lowering the overhead, and therefore improving 10937 * performance per I/O operation. 10938 */ 10939 un->un_pm_idle_time = ddi_get_time(); 10940 10941 un->un_ncmds_in_driver--; 10942 ASSERT(un->un_ncmds_in_driver >= 0); 10943 SD_INFO(SD_LOG_IO, un, "sd_buf_iodone: un_ncmds_in_driver = %ld\n", 10944 un->un_ncmds_in_driver); 10945 10946 mutex_exit(SD_MUTEX(un)); 10947 10948 ddi_xbuf_done(bp, un->un_xbuf_attr); /* xbuf is gone after this */ 10949 biodone(bp); /* bp is gone after this */ 10950 10951 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n"); 10952 } 10953 10954 10955 /* 10956 * Function: sd_uscsi_iodone 10957 * 10958 * Description: Frees the sd_xbuf & returns the buf to its originator. 10959 * 10960 * Context: May be called from interrupt context. 10961 */ 10962 /* ARGSUSED */ 10963 static void 10964 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 10965 { 10966 struct sd_xbuf *xp; 10967 10968 ASSERT(un != NULL); 10969 ASSERT(bp != NULL); 10970 10971 xp = SD_GET_XBUF(bp); 10972 ASSERT(xp != NULL); 10973 ASSERT(!mutex_owned(SD_MUTEX(un))); 10974 10975 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n"); 10976 10977 bp->b_private = xp->xb_private; 10978 10979 mutex_enter(SD_MUTEX(un)); 10980 10981 /* 10982 * Grab time when the cmd completed. 10983 * This is used for determining if the system has been 10984 * idle long enough to make it idle to the PM framework. 10985 * This is for lowering the overhead, and therefore improving 10986 * performance per I/O operation. 10987 */ 10988 un->un_pm_idle_time = ddi_get_time(); 10989 10990 un->un_ncmds_in_driver--; 10991 ASSERT(un->un_ncmds_in_driver >= 0); 10992 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n", 10993 un->un_ncmds_in_driver); 10994 10995 mutex_exit(SD_MUTEX(un)); 10996 10997 if (((struct uscsi_cmd *)(xp->xb_pktinfo))->uscsi_rqlen > 10998 SENSE_LENGTH) { 10999 kmem_free(xp, sizeof (struct sd_xbuf) - SENSE_LENGTH + 11000 MAX_SENSE_LENGTH); 11001 } else { 11002 kmem_free(xp, sizeof (struct sd_xbuf)); 11003 } 11004 11005 biodone(bp); 11006 11007 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n"); 11008 } 11009 11010 11011 /* 11012 * Function: sd_mapblockaddr_iostart 11013 * 11014 * Description: Verify request lies within the partition limits for 11015 * the indicated minor device. Issue "overrun" buf if 11016 * request would exceed partition range. Converts 11017 * partition-relative block address to absolute. 11018 * 11019 * Context: Can sleep 11020 * 11021 * Issues: This follows what the old code did, in terms of accessing 11022 * some of the partition info in the unit struct without holding 11023 * the mutext. This is a general issue, if the partition info 11024 * can be altered while IO is in progress... as soon as we send 11025 * a buf, its partitioning can be invalid before it gets to the 11026 * device. Probably the right fix is to move partitioning out 11027 * of the driver entirely. 11028 */ 11029 11030 static void 11031 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp) 11032 { 11033 diskaddr_t nblocks; /* #blocks in the given partition */ 11034 daddr_t blocknum; /* Block number specified by the buf */ 11035 size_t requested_nblocks; 11036 size_t available_nblocks; 11037 int partition; 11038 diskaddr_t partition_offset; 11039 struct sd_xbuf *xp; 11040 11041 11042 ASSERT(un != NULL); 11043 ASSERT(bp != NULL); 11044 ASSERT(!mutex_owned(SD_MUTEX(un))); 11045 11046 SD_TRACE(SD_LOG_IO_PARTITION, un, 11047 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp); 11048 11049 xp = SD_GET_XBUF(bp); 11050 ASSERT(xp != NULL); 11051 11052 /* 11053 * If the geometry is not indicated as valid, attempt to access 11054 * the unit & verify the geometry/label. This can be the case for 11055 * removable-media devices, of if the device was opened in 11056 * NDELAY/NONBLOCK mode. 11057 */ 11058 if (!SD_IS_VALID_LABEL(un) && 11059 (sd_ready_and_valid(un) != SD_READY_VALID)) { 11060 /* 11061 * For removable devices it is possible to start an I/O 11062 * without a media by opening the device in nodelay mode. 11063 * Also for writable CDs there can be many scenarios where 11064 * there is no geometry yet but volume manager is trying to 11065 * issue a read() just because it can see TOC on the CD. So 11066 * do not print a message for removables. 11067 */ 11068 if (!un->un_f_has_removable_media) { 11069 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 11070 "i/o to invalid geometry\n"); 11071 } 11072 bioerror(bp, EIO); 11073 bp->b_resid = bp->b_bcount; 11074 SD_BEGIN_IODONE(index, un, bp); 11075 return; 11076 } 11077 11078 partition = SDPART(bp->b_edev); 11079 11080 nblocks = 0; 11081 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 11082 &nblocks, &partition_offset, NULL, NULL, (void *)SD_PATH_DIRECT); 11083 11084 /* 11085 * blocknum is the starting block number of the request. At this 11086 * point it is still relative to the start of the minor device. 11087 */ 11088 blocknum = xp->xb_blkno; 11089 11090 /* 11091 * Legacy: If the starting block number is one past the last block 11092 * in the partition, do not set B_ERROR in the buf. 11093 */ 11094 if (blocknum == nblocks) { 11095 goto error_exit; 11096 } 11097 11098 /* 11099 * Confirm that the first block of the request lies within the 11100 * partition limits. Also the requested number of bytes must be 11101 * a multiple of the system block size. 11102 */ 11103 if ((blocknum < 0) || (blocknum >= nblocks) || 11104 ((bp->b_bcount & (un->un_sys_blocksize - 1)) != 0)) { 11105 bp->b_flags |= B_ERROR; 11106 goto error_exit; 11107 } 11108 11109 /* 11110 * If the requsted # blocks exceeds the available # blocks, that 11111 * is an overrun of the partition. 11112 */ 11113 requested_nblocks = SD_BYTES2SYSBLOCKS(un, bp->b_bcount); 11114 available_nblocks = (size_t)(nblocks - blocknum); 11115 ASSERT(nblocks >= blocknum); 11116 11117 if (requested_nblocks > available_nblocks) { 11118 /* 11119 * Allocate an "overrun" buf to allow the request to proceed 11120 * for the amount of space available in the partition. The 11121 * amount not transferred will be added into the b_resid 11122 * when the operation is complete. The overrun buf 11123 * replaces the original buf here, and the original buf 11124 * is saved inside the overrun buf, for later use. 11125 */ 11126 size_t resid = SD_SYSBLOCKS2BYTES(un, 11127 (offset_t)(requested_nblocks - available_nblocks)); 11128 size_t count = bp->b_bcount - resid; 11129 /* 11130 * Note: count is an unsigned entity thus it'll NEVER 11131 * be less than 0 so ASSERT the original values are 11132 * correct. 11133 */ 11134 ASSERT(bp->b_bcount >= resid); 11135 11136 bp = sd_bioclone_alloc(bp, count, blocknum, 11137 (int (*)(struct buf *)) sd_mapblockaddr_iodone); 11138 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */ 11139 ASSERT(xp != NULL); 11140 } 11141 11142 /* At this point there should be no residual for this buf. */ 11143 ASSERT(bp->b_resid == 0); 11144 11145 /* Convert the block number to an absolute address. */ 11146 xp->xb_blkno += partition_offset; 11147 11148 SD_NEXT_IOSTART(index, un, bp); 11149 11150 SD_TRACE(SD_LOG_IO_PARTITION, un, 11151 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp); 11152 11153 return; 11154 11155 error_exit: 11156 bp->b_resid = bp->b_bcount; 11157 SD_BEGIN_IODONE(index, un, bp); 11158 SD_TRACE(SD_LOG_IO_PARTITION, un, 11159 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp); 11160 } 11161 11162 11163 /* 11164 * Function: sd_mapblockaddr_iodone 11165 * 11166 * Description: Completion-side processing for partition management. 11167 * 11168 * Context: May be called under interrupt context 11169 */ 11170 11171 static void 11172 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp) 11173 { 11174 /* int partition; */ /* Not used, see below. */ 11175 ASSERT(un != NULL); 11176 ASSERT(bp != NULL); 11177 ASSERT(!mutex_owned(SD_MUTEX(un))); 11178 11179 SD_TRACE(SD_LOG_IO_PARTITION, un, 11180 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp); 11181 11182 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) { 11183 /* 11184 * We have an "overrun" buf to deal with... 11185 */ 11186 struct sd_xbuf *xp; 11187 struct buf *obp; /* ptr to the original buf */ 11188 11189 xp = SD_GET_XBUF(bp); 11190 ASSERT(xp != NULL); 11191 11192 /* Retrieve the pointer to the original buf */ 11193 obp = (struct buf *)xp->xb_private; 11194 ASSERT(obp != NULL); 11195 11196 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid); 11197 bioerror(obp, bp->b_error); 11198 11199 sd_bioclone_free(bp); 11200 11201 /* 11202 * Get back the original buf. 11203 * Note that since the restoration of xb_blkno below 11204 * was removed, the sd_xbuf is not needed. 11205 */ 11206 bp = obp; 11207 /* 11208 * xp = SD_GET_XBUF(bp); 11209 * ASSERT(xp != NULL); 11210 */ 11211 } 11212 11213 /* 11214 * Convert sd->xb_blkno back to a minor-device relative value. 11215 * Note: this has been commented out, as it is not needed in the 11216 * current implementation of the driver (ie, since this function 11217 * is at the top of the layering chains, so the info will be 11218 * discarded) and it is in the "hot" IO path. 11219 * 11220 * partition = getminor(bp->b_edev) & SDPART_MASK; 11221 * xp->xb_blkno -= un->un_offset[partition]; 11222 */ 11223 11224 SD_NEXT_IODONE(index, un, bp); 11225 11226 SD_TRACE(SD_LOG_IO_PARTITION, un, 11227 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp); 11228 } 11229 11230 11231 /* 11232 * Function: sd_mapblocksize_iostart 11233 * 11234 * Description: Convert between system block size (un->un_sys_blocksize) 11235 * and target block size (un->un_tgt_blocksize). 11236 * 11237 * Context: Can sleep to allocate resources. 11238 * 11239 * Assumptions: A higher layer has already performed any partition validation, 11240 * and converted the xp->xb_blkno to an absolute value relative 11241 * to the start of the device. 11242 * 11243 * It is also assumed that the higher layer has implemented 11244 * an "overrun" mechanism for the case where the request would 11245 * read/write beyond the end of a partition. In this case we 11246 * assume (and ASSERT) that bp->b_resid == 0. 11247 * 11248 * Note: The implementation for this routine assumes the target 11249 * block size remains constant between allocation and transport. 11250 */ 11251 11252 static void 11253 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp) 11254 { 11255 struct sd_mapblocksize_info *bsp; 11256 struct sd_xbuf *xp; 11257 offset_t first_byte; 11258 daddr_t start_block, end_block; 11259 daddr_t request_bytes; 11260 ushort_t is_aligned = FALSE; 11261 11262 ASSERT(un != NULL); 11263 ASSERT(bp != NULL); 11264 ASSERT(!mutex_owned(SD_MUTEX(un))); 11265 ASSERT(bp->b_resid == 0); 11266 11267 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 11268 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp); 11269 11270 /* 11271 * For a non-writable CD, a write request is an error 11272 */ 11273 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) && 11274 (un->un_f_mmc_writable_media == FALSE)) { 11275 bioerror(bp, EIO); 11276 bp->b_resid = bp->b_bcount; 11277 SD_BEGIN_IODONE(index, un, bp); 11278 return; 11279 } 11280 11281 /* 11282 * We do not need a shadow buf if the device is using 11283 * un->un_sys_blocksize as its block size or if bcount == 0. 11284 * In this case there is no layer-private data block allocated. 11285 */ 11286 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 11287 (bp->b_bcount == 0)) { 11288 goto done; 11289 } 11290 11291 #if defined(__i386) || defined(__amd64) 11292 /* We do not support non-block-aligned transfers for ROD devices */ 11293 ASSERT(!ISROD(un)); 11294 #endif 11295 11296 xp = SD_GET_XBUF(bp); 11297 ASSERT(xp != NULL); 11298 11299 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 11300 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n", 11301 un->un_tgt_blocksize, un->un_sys_blocksize); 11302 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 11303 "request start block:0x%x\n", xp->xb_blkno); 11304 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 11305 "request len:0x%x\n", bp->b_bcount); 11306 11307 /* 11308 * Allocate the layer-private data area for the mapblocksize layer. 11309 * Layers are allowed to use the xp_private member of the sd_xbuf 11310 * struct to store the pointer to their layer-private data block, but 11311 * each layer also has the responsibility of restoring the prior 11312 * contents of xb_private before returning the buf/xbuf to the 11313 * higher layer that sent it. 11314 * 11315 * Here we save the prior contents of xp->xb_private into the 11316 * bsp->mbs_oprivate field of our layer-private data area. This value 11317 * is restored by sd_mapblocksize_iodone() just prior to freeing up 11318 * the layer-private area and returning the buf/xbuf to the layer 11319 * that sent it. 11320 * 11321 * Note that here we use kmem_zalloc for the allocation as there are 11322 * parts of the mapblocksize code that expect certain fields to be 11323 * zero unless explicitly set to a required value. 11324 */ 11325 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 11326 bsp->mbs_oprivate = xp->xb_private; 11327 xp->xb_private = bsp; 11328 11329 /* 11330 * This treats the data on the disk (target) as an array of bytes. 11331 * first_byte is the byte offset, from the beginning of the device, 11332 * to the location of the request. This is converted from a 11333 * un->un_sys_blocksize block address to a byte offset, and then back 11334 * to a block address based upon a un->un_tgt_blocksize block size. 11335 * 11336 * xp->xb_blkno should be absolute upon entry into this function, 11337 * but, but it is based upon partitions that use the "system" 11338 * block size. It must be adjusted to reflect the block size of 11339 * the target. 11340 * 11341 * Note that end_block is actually the block that follows the last 11342 * block of the request, but that's what is needed for the computation. 11343 */ 11344 first_byte = SD_SYSBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 11345 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize; 11346 end_block = (first_byte + bp->b_bcount + un->un_tgt_blocksize - 1) / 11347 un->un_tgt_blocksize; 11348 11349 /* request_bytes is rounded up to a multiple of the target block size */ 11350 request_bytes = (end_block - start_block) * un->un_tgt_blocksize; 11351 11352 /* 11353 * See if the starting address of the request and the request 11354 * length are aligned on a un->un_tgt_blocksize boundary. If aligned 11355 * then we do not need to allocate a shadow buf to handle the request. 11356 */ 11357 if (((first_byte % un->un_tgt_blocksize) == 0) && 11358 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) { 11359 is_aligned = TRUE; 11360 } 11361 11362 if ((bp->b_flags & B_READ) == 0) { 11363 /* 11364 * Lock the range for a write operation. An aligned request is 11365 * considered a simple write; otherwise the request must be a 11366 * read-modify-write. 11367 */ 11368 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1, 11369 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW); 11370 } 11371 11372 /* 11373 * Alloc a shadow buf if the request is not aligned. Also, this is 11374 * where the READ command is generated for a read-modify-write. (The 11375 * write phase is deferred until after the read completes.) 11376 */ 11377 if (is_aligned == FALSE) { 11378 11379 struct sd_mapblocksize_info *shadow_bsp; 11380 struct sd_xbuf *shadow_xp; 11381 struct buf *shadow_bp; 11382 11383 /* 11384 * Allocate the shadow buf and it associated xbuf. Note that 11385 * after this call the xb_blkno value in both the original 11386 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the 11387 * same: absolute relative to the start of the device, and 11388 * adjusted for the target block size. The b_blkno in the 11389 * shadow buf will also be set to this value. We should never 11390 * change b_blkno in the original bp however. 11391 * 11392 * Note also that the shadow buf will always need to be a 11393 * READ command, regardless of whether the incoming command 11394 * is a READ or a WRITE. 11395 */ 11396 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ, 11397 xp->xb_blkno, 11398 (int (*)(struct buf *)) sd_mapblocksize_iodone); 11399 11400 shadow_xp = SD_GET_XBUF(shadow_bp); 11401 11402 /* 11403 * Allocate the layer-private data for the shadow buf. 11404 * (No need to preserve xb_private in the shadow xbuf.) 11405 */ 11406 shadow_xp->xb_private = shadow_bsp = 11407 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 11408 11409 /* 11410 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone 11411 * to figure out where the start of the user data is (based upon 11412 * the system block size) in the data returned by the READ 11413 * command (which will be based upon the target blocksize). Note 11414 * that this is only really used if the request is unaligned. 11415 */ 11416 bsp->mbs_copy_offset = (ssize_t)(first_byte - 11417 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize)); 11418 ASSERT((bsp->mbs_copy_offset >= 0) && 11419 (bsp->mbs_copy_offset < un->un_tgt_blocksize)); 11420 11421 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset; 11422 11423 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index; 11424 11425 /* Transfer the wmap (if any) to the shadow buf */ 11426 shadow_bsp->mbs_wmp = bsp->mbs_wmp; 11427 bsp->mbs_wmp = NULL; 11428 11429 /* 11430 * The shadow buf goes on from here in place of the 11431 * original buf. 11432 */ 11433 shadow_bsp->mbs_orig_bp = bp; 11434 bp = shadow_bp; 11435 } 11436 11437 SD_INFO(SD_LOG_IO_RMMEDIA, un, 11438 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno); 11439 SD_INFO(SD_LOG_IO_RMMEDIA, un, 11440 "sd_mapblocksize_iostart: tgt request len:0x%x\n", 11441 request_bytes); 11442 SD_INFO(SD_LOG_IO_RMMEDIA, un, 11443 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp); 11444 11445 done: 11446 SD_NEXT_IOSTART(index, un, bp); 11447 11448 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 11449 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp); 11450 } 11451 11452 11453 /* 11454 * Function: sd_mapblocksize_iodone 11455 * 11456 * Description: Completion side processing for block-size mapping. 11457 * 11458 * Context: May be called under interrupt context 11459 */ 11460 11461 static void 11462 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp) 11463 { 11464 struct sd_mapblocksize_info *bsp; 11465 struct sd_xbuf *xp; 11466 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */ 11467 struct buf *orig_bp; /* ptr to the original buf */ 11468 offset_t shadow_end; 11469 offset_t request_end; 11470 offset_t shadow_start; 11471 ssize_t copy_offset; 11472 size_t copy_length; 11473 size_t shortfall; 11474 uint_t is_write; /* TRUE if this bp is a WRITE */ 11475 uint_t has_wmap; /* TRUE is this bp has a wmap */ 11476 11477 ASSERT(un != NULL); 11478 ASSERT(bp != NULL); 11479 11480 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 11481 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp); 11482 11483 /* 11484 * There is no shadow buf or layer-private data if the target is 11485 * using un->un_sys_blocksize as its block size or if bcount == 0. 11486 */ 11487 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 11488 (bp->b_bcount == 0)) { 11489 goto exit; 11490 } 11491 11492 xp = SD_GET_XBUF(bp); 11493 ASSERT(xp != NULL); 11494 11495 /* Retrieve the pointer to the layer-private data area from the xbuf. */ 11496 bsp = xp->xb_private; 11497 11498 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE; 11499 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE; 11500 11501 if (is_write) { 11502 /* 11503 * For a WRITE request we must free up the block range that 11504 * we have locked up. This holds regardless of whether this is 11505 * an aligned write request or a read-modify-write request. 11506 */ 11507 sd_range_unlock(un, bsp->mbs_wmp); 11508 bsp->mbs_wmp = NULL; 11509 } 11510 11511 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) { 11512 /* 11513 * An aligned read or write command will have no shadow buf; 11514 * there is not much else to do with it. 11515 */ 11516 goto done; 11517 } 11518 11519 orig_bp = bsp->mbs_orig_bp; 11520 ASSERT(orig_bp != NULL); 11521 orig_xp = SD_GET_XBUF(orig_bp); 11522 ASSERT(orig_xp != NULL); 11523 ASSERT(!mutex_owned(SD_MUTEX(un))); 11524 11525 if (!is_write && has_wmap) { 11526 /* 11527 * A READ with a wmap means this is the READ phase of a 11528 * read-modify-write. If an error occurred on the READ then 11529 * we do not proceed with the WRITE phase or copy any data. 11530 * Just release the write maps and return with an error. 11531 */ 11532 if ((bp->b_resid != 0) || (bp->b_error != 0)) { 11533 orig_bp->b_resid = orig_bp->b_bcount; 11534 bioerror(orig_bp, bp->b_error); 11535 sd_range_unlock(un, bsp->mbs_wmp); 11536 goto freebuf_done; 11537 } 11538 } 11539 11540 /* 11541 * Here is where we set up to copy the data from the shadow buf 11542 * into the space associated with the original buf. 11543 * 11544 * To deal with the conversion between block sizes, these 11545 * computations treat the data as an array of bytes, with the 11546 * first byte (byte 0) corresponding to the first byte in the 11547 * first block on the disk. 11548 */ 11549 11550 /* 11551 * shadow_start and shadow_len indicate the location and size of 11552 * the data returned with the shadow IO request. 11553 */ 11554 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 11555 shadow_end = shadow_start + bp->b_bcount - bp->b_resid; 11556 11557 /* 11558 * copy_offset gives the offset (in bytes) from the start of the first 11559 * block of the READ request to the beginning of the data. We retrieve 11560 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved 11561 * there by sd_mapblockize_iostart(). copy_length gives the amount of 11562 * data to be copied (in bytes). 11563 */ 11564 copy_offset = bsp->mbs_copy_offset; 11565 ASSERT((copy_offset >= 0) && (copy_offset < un->un_tgt_blocksize)); 11566 copy_length = orig_bp->b_bcount; 11567 request_end = shadow_start + copy_offset + orig_bp->b_bcount; 11568 11569 /* 11570 * Set up the resid and error fields of orig_bp as appropriate. 11571 */ 11572 if (shadow_end >= request_end) { 11573 /* We got all the requested data; set resid to zero */ 11574 orig_bp->b_resid = 0; 11575 } else { 11576 /* 11577 * We failed to get enough data to fully satisfy the original 11578 * request. Just copy back whatever data we got and set 11579 * up the residual and error code as required. 11580 * 11581 * 'shortfall' is the amount by which the data received with the 11582 * shadow buf has "fallen short" of the requested amount. 11583 */ 11584 shortfall = (size_t)(request_end - shadow_end); 11585 11586 if (shortfall > orig_bp->b_bcount) { 11587 /* 11588 * We did not get enough data to even partially 11589 * fulfill the original request. The residual is 11590 * equal to the amount requested. 11591 */ 11592 orig_bp->b_resid = orig_bp->b_bcount; 11593 } else { 11594 /* 11595 * We did not get all the data that we requested 11596 * from the device, but we will try to return what 11597 * portion we did get. 11598 */ 11599 orig_bp->b_resid = shortfall; 11600 } 11601 ASSERT(copy_length >= orig_bp->b_resid); 11602 copy_length -= orig_bp->b_resid; 11603 } 11604 11605 /* Propagate the error code from the shadow buf to the original buf */ 11606 bioerror(orig_bp, bp->b_error); 11607 11608 if (is_write) { 11609 goto freebuf_done; /* No data copying for a WRITE */ 11610 } 11611 11612 if (has_wmap) { 11613 /* 11614 * This is a READ command from the READ phase of a 11615 * read-modify-write request. We have to copy the data given 11616 * by the user OVER the data returned by the READ command, 11617 * then convert the command from a READ to a WRITE and send 11618 * it back to the target. 11619 */ 11620 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset, 11621 copy_length); 11622 11623 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */ 11624 11625 /* 11626 * Dispatch the WRITE command to the taskq thread, which 11627 * will in turn send the command to the target. When the 11628 * WRITE command completes, we (sd_mapblocksize_iodone()) 11629 * will get called again as part of the iodone chain 11630 * processing for it. Note that we will still be dealing 11631 * with the shadow buf at that point. 11632 */ 11633 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp, 11634 KM_NOSLEEP) != 0) { 11635 /* 11636 * Dispatch was successful so we are done. Return 11637 * without going any higher up the iodone chain. Do 11638 * not free up any layer-private data until after the 11639 * WRITE completes. 11640 */ 11641 return; 11642 } 11643 11644 /* 11645 * Dispatch of the WRITE command failed; set up the error 11646 * condition and send this IO back up the iodone chain. 11647 */ 11648 bioerror(orig_bp, EIO); 11649 orig_bp->b_resid = orig_bp->b_bcount; 11650 11651 } else { 11652 /* 11653 * This is a regular READ request (ie, not a RMW). Copy the 11654 * data from the shadow buf into the original buf. The 11655 * copy_offset compensates for any "misalignment" between the 11656 * shadow buf (with its un->un_tgt_blocksize blocks) and the 11657 * original buf (with its un->un_sys_blocksize blocks). 11658 */ 11659 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr, 11660 copy_length); 11661 } 11662 11663 freebuf_done: 11664 11665 /* 11666 * At this point we still have both the shadow buf AND the original 11667 * buf to deal with, as well as the layer-private data area in each. 11668 * Local variables are as follows: 11669 * 11670 * bp -- points to shadow buf 11671 * xp -- points to xbuf of shadow buf 11672 * bsp -- points to layer-private data area of shadow buf 11673 * orig_bp -- points to original buf 11674 * 11675 * First free the shadow buf and its associated xbuf, then free the 11676 * layer-private data area from the shadow buf. There is no need to 11677 * restore xb_private in the shadow xbuf. 11678 */ 11679 sd_shadow_buf_free(bp); 11680 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 11681 11682 /* 11683 * Now update the local variables to point to the original buf, xbuf, 11684 * and layer-private area. 11685 */ 11686 bp = orig_bp; 11687 xp = SD_GET_XBUF(bp); 11688 ASSERT(xp != NULL); 11689 ASSERT(xp == orig_xp); 11690 bsp = xp->xb_private; 11691 ASSERT(bsp != NULL); 11692 11693 done: 11694 /* 11695 * Restore xb_private to whatever it was set to by the next higher 11696 * layer in the chain, then free the layer-private data area. 11697 */ 11698 xp->xb_private = bsp->mbs_oprivate; 11699 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 11700 11701 exit: 11702 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp), 11703 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp); 11704 11705 SD_NEXT_IODONE(index, un, bp); 11706 } 11707 11708 11709 /* 11710 * Function: sd_checksum_iostart 11711 * 11712 * Description: A stub function for a layer that's currently not used. 11713 * For now just a placeholder. 11714 * 11715 * Context: Kernel thread context 11716 */ 11717 11718 static void 11719 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp) 11720 { 11721 ASSERT(un != NULL); 11722 ASSERT(bp != NULL); 11723 ASSERT(!mutex_owned(SD_MUTEX(un))); 11724 SD_NEXT_IOSTART(index, un, bp); 11725 } 11726 11727 11728 /* 11729 * Function: sd_checksum_iodone 11730 * 11731 * Description: A stub function for a layer that's currently not used. 11732 * For now just a placeholder. 11733 * 11734 * Context: May be called under interrupt context 11735 */ 11736 11737 static void 11738 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp) 11739 { 11740 ASSERT(un != NULL); 11741 ASSERT(bp != NULL); 11742 ASSERT(!mutex_owned(SD_MUTEX(un))); 11743 SD_NEXT_IODONE(index, un, bp); 11744 } 11745 11746 11747 /* 11748 * Function: sd_checksum_uscsi_iostart 11749 * 11750 * Description: A stub function for a layer that's currently not used. 11751 * For now just a placeholder. 11752 * 11753 * Context: Kernel thread context 11754 */ 11755 11756 static void 11757 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp) 11758 { 11759 ASSERT(un != NULL); 11760 ASSERT(bp != NULL); 11761 ASSERT(!mutex_owned(SD_MUTEX(un))); 11762 SD_NEXT_IOSTART(index, un, bp); 11763 } 11764 11765 11766 /* 11767 * Function: sd_checksum_uscsi_iodone 11768 * 11769 * Description: A stub function for a layer that's currently not used. 11770 * For now just a placeholder. 11771 * 11772 * Context: May be called under interrupt context 11773 */ 11774 11775 static void 11776 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 11777 { 11778 ASSERT(un != NULL); 11779 ASSERT(bp != NULL); 11780 ASSERT(!mutex_owned(SD_MUTEX(un))); 11781 SD_NEXT_IODONE(index, un, bp); 11782 } 11783 11784 11785 /* 11786 * Function: sd_pm_iostart 11787 * 11788 * Description: iostart-side routine for Power mangement. 11789 * 11790 * Context: Kernel thread context 11791 */ 11792 11793 static void 11794 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp) 11795 { 11796 ASSERT(un != NULL); 11797 ASSERT(bp != NULL); 11798 ASSERT(!mutex_owned(SD_MUTEX(un))); 11799 ASSERT(!mutex_owned(&un->un_pm_mutex)); 11800 11801 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n"); 11802 11803 if (sd_pm_entry(un) != DDI_SUCCESS) { 11804 /* 11805 * Set up to return the failed buf back up the 'iodone' 11806 * side of the calling chain. 11807 */ 11808 bioerror(bp, EIO); 11809 bp->b_resid = bp->b_bcount; 11810 11811 SD_BEGIN_IODONE(index, un, bp); 11812 11813 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 11814 return; 11815 } 11816 11817 SD_NEXT_IOSTART(index, un, bp); 11818 11819 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 11820 } 11821 11822 11823 /* 11824 * Function: sd_pm_iodone 11825 * 11826 * Description: iodone-side routine for power mangement. 11827 * 11828 * Context: may be called from interrupt context 11829 */ 11830 11831 static void 11832 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp) 11833 { 11834 ASSERT(un != NULL); 11835 ASSERT(bp != NULL); 11836 ASSERT(!mutex_owned(&un->un_pm_mutex)); 11837 11838 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n"); 11839 11840 /* 11841 * After attach the following flag is only read, so don't 11842 * take the penalty of acquiring a mutex for it. 11843 */ 11844 if (un->un_f_pm_is_enabled == TRUE) { 11845 sd_pm_exit(un); 11846 } 11847 11848 SD_NEXT_IODONE(index, un, bp); 11849 11850 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n"); 11851 } 11852 11853 11854 /* 11855 * Function: sd_core_iostart 11856 * 11857 * Description: Primary driver function for enqueuing buf(9S) structs from 11858 * the system and initiating IO to the target device 11859 * 11860 * Context: Kernel thread context. Can sleep. 11861 * 11862 * Assumptions: - The given xp->xb_blkno is absolute 11863 * (ie, relative to the start of the device). 11864 * - The IO is to be done using the native blocksize of 11865 * the device, as specified in un->un_tgt_blocksize. 11866 */ 11867 /* ARGSUSED */ 11868 static void 11869 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp) 11870 { 11871 struct sd_xbuf *xp; 11872 11873 ASSERT(un != NULL); 11874 ASSERT(bp != NULL); 11875 ASSERT(!mutex_owned(SD_MUTEX(un))); 11876 ASSERT(bp->b_resid == 0); 11877 11878 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp); 11879 11880 xp = SD_GET_XBUF(bp); 11881 ASSERT(xp != NULL); 11882 11883 mutex_enter(SD_MUTEX(un)); 11884 11885 /* 11886 * If we are currently in the failfast state, fail any new IO 11887 * that has B_FAILFAST set, then return. 11888 */ 11889 if ((bp->b_flags & B_FAILFAST) && 11890 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) { 11891 mutex_exit(SD_MUTEX(un)); 11892 bioerror(bp, EIO); 11893 bp->b_resid = bp->b_bcount; 11894 SD_BEGIN_IODONE(index, un, bp); 11895 return; 11896 } 11897 11898 if (SD_IS_DIRECT_PRIORITY(xp)) { 11899 /* 11900 * Priority command -- transport it immediately. 11901 * 11902 * Note: We may want to assert that USCSI_DIAGNOSE is set, 11903 * because all direct priority commands should be associated 11904 * with error recovery actions which we don't want to retry. 11905 */ 11906 sd_start_cmds(un, bp); 11907 } else { 11908 /* 11909 * Normal command -- add it to the wait queue, then start 11910 * transporting commands from the wait queue. 11911 */ 11912 sd_add_buf_to_waitq(un, bp); 11913 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 11914 sd_start_cmds(un, NULL); 11915 } 11916 11917 mutex_exit(SD_MUTEX(un)); 11918 11919 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp); 11920 } 11921 11922 11923 /* 11924 * Function: sd_init_cdb_limits 11925 * 11926 * Description: This is to handle scsi_pkt initialization differences 11927 * between the driver platforms. 11928 * 11929 * Legacy behaviors: 11930 * 11931 * If the block number or the sector count exceeds the 11932 * capabilities of a Group 0 command, shift over to a 11933 * Group 1 command. We don't blindly use Group 1 11934 * commands because a) some drives (CDC Wren IVs) get a 11935 * bit confused, and b) there is probably a fair amount 11936 * of speed difference for a target to receive and decode 11937 * a 10 byte command instead of a 6 byte command. 11938 * 11939 * The xfer time difference of 6 vs 10 byte CDBs is 11940 * still significant so this code is still worthwhile. 11941 * 10 byte CDBs are very inefficient with the fas HBA driver 11942 * and older disks. Each CDB byte took 1 usec with some 11943 * popular disks. 11944 * 11945 * Context: Must be called at attach time 11946 */ 11947 11948 static void 11949 sd_init_cdb_limits(struct sd_lun *un) 11950 { 11951 int hba_cdb_limit; 11952 11953 /* 11954 * Use CDB_GROUP1 commands for most devices except for 11955 * parallel SCSI fixed drives in which case we get better 11956 * performance using CDB_GROUP0 commands (where applicable). 11957 */ 11958 un->un_mincdb = SD_CDB_GROUP1; 11959 #if !defined(__fibre) 11960 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) && 11961 !un->un_f_has_removable_media) { 11962 un->un_mincdb = SD_CDB_GROUP0; 11963 } 11964 #endif 11965 11966 /* 11967 * Try to read the max-cdb-length supported by HBA. 11968 */ 11969 un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1); 11970 if (0 >= un->un_max_hba_cdb) { 11971 un->un_max_hba_cdb = CDB_GROUP4; 11972 hba_cdb_limit = SD_CDB_GROUP4; 11973 } else if (0 < un->un_max_hba_cdb && 11974 un->un_max_hba_cdb < CDB_GROUP1) { 11975 hba_cdb_limit = SD_CDB_GROUP0; 11976 } else if (CDB_GROUP1 <= un->un_max_hba_cdb && 11977 un->un_max_hba_cdb < CDB_GROUP5) { 11978 hba_cdb_limit = SD_CDB_GROUP1; 11979 } else if (CDB_GROUP5 <= un->un_max_hba_cdb && 11980 un->un_max_hba_cdb < CDB_GROUP4) { 11981 hba_cdb_limit = SD_CDB_GROUP5; 11982 } else { 11983 hba_cdb_limit = SD_CDB_GROUP4; 11984 } 11985 11986 /* 11987 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4 11988 * commands for fixed disks unless we are building for a 32 bit 11989 * kernel. 11990 */ 11991 #ifdef _LP64 11992 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 11993 min(hba_cdb_limit, SD_CDB_GROUP4); 11994 #else 11995 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 11996 min(hba_cdb_limit, SD_CDB_GROUP1); 11997 #endif 11998 11999 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) 12000 ? sizeof (struct scsi_arq_status) : 1); 12001 un->un_cmd_timeout = (ushort_t)sd_io_time; 12002 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; 12003 } 12004 12005 12006 /* 12007 * Function: sd_initpkt_for_buf 12008 * 12009 * Description: Allocate and initialize for transport a scsi_pkt struct, 12010 * based upon the info specified in the given buf struct. 12011 * 12012 * Assumes the xb_blkno in the request is absolute (ie, 12013 * relative to the start of the device (NOT partition!). 12014 * Also assumes that the request is using the native block 12015 * size of the device (as returned by the READ CAPACITY 12016 * command). 12017 * 12018 * Return Code: SD_PKT_ALLOC_SUCCESS 12019 * SD_PKT_ALLOC_FAILURE 12020 * SD_PKT_ALLOC_FAILURE_NO_DMA 12021 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 12022 * 12023 * Context: Kernel thread and may be called from software interrupt context 12024 * as part of a sdrunout callback. This function may not block or 12025 * call routines that block 12026 */ 12027 12028 static int 12029 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp) 12030 { 12031 struct sd_xbuf *xp; 12032 struct scsi_pkt *pktp = NULL; 12033 struct sd_lun *un; 12034 size_t blockcount; 12035 daddr_t startblock; 12036 int rval; 12037 int cmd_flags; 12038 12039 ASSERT(bp != NULL); 12040 ASSERT(pktpp != NULL); 12041 xp = SD_GET_XBUF(bp); 12042 ASSERT(xp != NULL); 12043 un = SD_GET_UN(bp); 12044 ASSERT(un != NULL); 12045 ASSERT(mutex_owned(SD_MUTEX(un))); 12046 ASSERT(bp->b_resid == 0); 12047 12048 SD_TRACE(SD_LOG_IO_CORE, un, 12049 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp); 12050 12051 mutex_exit(SD_MUTEX(un)); 12052 12053 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12054 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) { 12055 /* 12056 * Already have a scsi_pkt -- just need DMA resources. 12057 * We must recompute the CDB in case the mapping returns 12058 * a nonzero pkt_resid. 12059 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer 12060 * that is being retried, the unmap/remap of the DMA resouces 12061 * will result in the entire transfer starting over again 12062 * from the very first block. 12063 */ 12064 ASSERT(xp->xb_pktp != NULL); 12065 pktp = xp->xb_pktp; 12066 } else { 12067 pktp = NULL; 12068 } 12069 #endif /* __i386 || __amd64 */ 12070 12071 startblock = xp->xb_blkno; /* Absolute block num. */ 12072 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 12073 12074 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK); 12075 12076 /* 12077 * sd_setup_rw_pkt will determine the appropriate CDB group to use, 12078 * call scsi_init_pkt, and build the CDB. 12079 */ 12080 rval = sd_setup_rw_pkt(un, &pktp, bp, 12081 cmd_flags, sdrunout, (caddr_t)un, 12082 startblock, blockcount); 12083 12084 if (rval == 0) { 12085 /* 12086 * Success. 12087 * 12088 * If partial DMA is being used and required for this transfer. 12089 * set it up here. 12090 */ 12091 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 && 12092 (pktp->pkt_resid != 0)) { 12093 12094 /* 12095 * Save the CDB length and pkt_resid for the 12096 * next xfer 12097 */ 12098 xp->xb_dma_resid = pktp->pkt_resid; 12099 12100 /* rezero resid */ 12101 pktp->pkt_resid = 0; 12102 12103 } else { 12104 xp->xb_dma_resid = 0; 12105 } 12106 12107 pktp->pkt_flags = un->un_tagflags; 12108 pktp->pkt_time = un->un_cmd_timeout; 12109 pktp->pkt_comp = sdintr; 12110 12111 pktp->pkt_private = bp; 12112 *pktpp = pktp; 12113 12114 SD_TRACE(SD_LOG_IO_CORE, un, 12115 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp); 12116 12117 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12118 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED; 12119 #endif 12120 12121 mutex_enter(SD_MUTEX(un)); 12122 return (SD_PKT_ALLOC_SUCCESS); 12123 12124 } 12125 12126 /* 12127 * SD_PKT_ALLOC_FAILURE is the only expected failure code 12128 * from sd_setup_rw_pkt. 12129 */ 12130 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 12131 12132 if (rval == SD_PKT_ALLOC_FAILURE) { 12133 *pktpp = NULL; 12134 /* 12135 * Set the driver state to RWAIT to indicate the driver 12136 * is waiting on resource allocations. The driver will not 12137 * suspend, pm_suspend, or detatch while the state is RWAIT. 12138 */ 12139 mutex_enter(SD_MUTEX(un)); 12140 New_state(un, SD_STATE_RWAIT); 12141 12142 SD_ERROR(SD_LOG_IO_CORE, un, 12143 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp); 12144 12145 if ((bp->b_flags & B_ERROR) != 0) { 12146 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 12147 } 12148 return (SD_PKT_ALLOC_FAILURE); 12149 } else { 12150 /* 12151 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL 12152 * 12153 * This should never happen. Maybe someone messed with the 12154 * kernel's minphys? 12155 */ 12156 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12157 "Request rejected: too large for CDB: " 12158 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount); 12159 SD_ERROR(SD_LOG_IO_CORE, un, 12160 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp); 12161 mutex_enter(SD_MUTEX(un)); 12162 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 12163 12164 } 12165 } 12166 12167 12168 /* 12169 * Function: sd_destroypkt_for_buf 12170 * 12171 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing). 12172 * 12173 * Context: Kernel thread or interrupt context 12174 */ 12175 12176 static void 12177 sd_destroypkt_for_buf(struct buf *bp) 12178 { 12179 ASSERT(bp != NULL); 12180 ASSERT(SD_GET_UN(bp) != NULL); 12181 12182 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 12183 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp); 12184 12185 ASSERT(SD_GET_PKTP(bp) != NULL); 12186 scsi_destroy_pkt(SD_GET_PKTP(bp)); 12187 12188 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 12189 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp); 12190 } 12191 12192 /* 12193 * Function: sd_setup_rw_pkt 12194 * 12195 * Description: Determines appropriate CDB group for the requested LBA 12196 * and transfer length, calls scsi_init_pkt, and builds 12197 * the CDB. Do not use for partial DMA transfers except 12198 * for the initial transfer since the CDB size must 12199 * remain constant. 12200 * 12201 * Context: Kernel thread and may be called from software interrupt 12202 * context as part of a sdrunout callback. This function may not 12203 * block or call routines that block 12204 */ 12205 12206 12207 int 12208 sd_setup_rw_pkt(struct sd_lun *un, 12209 struct scsi_pkt **pktpp, struct buf *bp, int flags, 12210 int (*callback)(caddr_t), caddr_t callback_arg, 12211 diskaddr_t lba, uint32_t blockcount) 12212 { 12213 struct scsi_pkt *return_pktp; 12214 union scsi_cdb *cdbp; 12215 struct sd_cdbinfo *cp = NULL; 12216 int i; 12217 12218 /* 12219 * See which size CDB to use, based upon the request. 12220 */ 12221 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) { 12222 12223 /* 12224 * Check lba and block count against sd_cdbtab limits. 12225 * In the partial DMA case, we have to use the same size 12226 * CDB for all the transfers. Check lba + blockcount 12227 * against the max LBA so we know that segment of the 12228 * transfer can use the CDB we select. 12229 */ 12230 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) && 12231 (blockcount <= sd_cdbtab[i].sc_maxlen)) { 12232 12233 /* 12234 * The command will fit into the CDB type 12235 * specified by sd_cdbtab[i]. 12236 */ 12237 cp = sd_cdbtab + i; 12238 12239 /* 12240 * Call scsi_init_pkt so we can fill in the 12241 * CDB. 12242 */ 12243 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp, 12244 bp, cp->sc_grpcode, un->un_status_len, 0, 12245 flags, callback, callback_arg); 12246 12247 if (return_pktp != NULL) { 12248 12249 /* 12250 * Return new value of pkt 12251 */ 12252 *pktpp = return_pktp; 12253 12254 /* 12255 * To be safe, zero the CDB insuring there is 12256 * no leftover data from a previous command. 12257 */ 12258 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode); 12259 12260 /* 12261 * Handle partial DMA mapping 12262 */ 12263 if (return_pktp->pkt_resid != 0) { 12264 12265 /* 12266 * Not going to xfer as many blocks as 12267 * originally expected 12268 */ 12269 blockcount -= 12270 SD_BYTES2TGTBLOCKS(un, 12271 return_pktp->pkt_resid); 12272 } 12273 12274 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp; 12275 12276 /* 12277 * Set command byte based on the CDB 12278 * type we matched. 12279 */ 12280 cdbp->scc_cmd = cp->sc_grpmask | 12281 ((bp->b_flags & B_READ) ? 12282 SCMD_READ : SCMD_WRITE); 12283 12284 SD_FILL_SCSI1_LUN(un, return_pktp); 12285 12286 /* 12287 * Fill in LBA and length 12288 */ 12289 ASSERT((cp->sc_grpcode == CDB_GROUP1) || 12290 (cp->sc_grpcode == CDB_GROUP4) || 12291 (cp->sc_grpcode == CDB_GROUP0) || 12292 (cp->sc_grpcode == CDB_GROUP5)); 12293 12294 if (cp->sc_grpcode == CDB_GROUP1) { 12295 FORMG1ADDR(cdbp, lba); 12296 FORMG1COUNT(cdbp, blockcount); 12297 return (0); 12298 } else if (cp->sc_grpcode == CDB_GROUP4) { 12299 FORMG4LONGADDR(cdbp, lba); 12300 FORMG4COUNT(cdbp, blockcount); 12301 return (0); 12302 } else if (cp->sc_grpcode == CDB_GROUP0) { 12303 FORMG0ADDR(cdbp, lba); 12304 FORMG0COUNT(cdbp, blockcount); 12305 return (0); 12306 } else if (cp->sc_grpcode == CDB_GROUP5) { 12307 FORMG5ADDR(cdbp, lba); 12308 FORMG5COUNT(cdbp, blockcount); 12309 return (0); 12310 } 12311 12312 /* 12313 * It should be impossible to not match one 12314 * of the CDB types above, so we should never 12315 * reach this point. Set the CDB command byte 12316 * to test-unit-ready to avoid writing 12317 * to somewhere we don't intend. 12318 */ 12319 cdbp->scc_cmd = SCMD_TEST_UNIT_READY; 12320 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 12321 } else { 12322 /* 12323 * Couldn't get scsi_pkt 12324 */ 12325 return (SD_PKT_ALLOC_FAILURE); 12326 } 12327 } 12328 } 12329 12330 /* 12331 * None of the available CDB types were suitable. This really 12332 * should never happen: on a 64 bit system we support 12333 * READ16/WRITE16 which will hold an entire 64 bit disk address 12334 * and on a 32 bit system we will refuse to bind to a device 12335 * larger than 2TB so addresses will never be larger than 32 bits. 12336 */ 12337 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 12338 } 12339 12340 /* 12341 * Function: sd_setup_next_rw_pkt 12342 * 12343 * Description: Setup packet for partial DMA transfers, except for the 12344 * initial transfer. sd_setup_rw_pkt should be used for 12345 * the initial transfer. 12346 * 12347 * Context: Kernel thread and may be called from interrupt context. 12348 */ 12349 12350 int 12351 sd_setup_next_rw_pkt(struct sd_lun *un, 12352 struct scsi_pkt *pktp, struct buf *bp, 12353 diskaddr_t lba, uint32_t blockcount) 12354 { 12355 uchar_t com; 12356 union scsi_cdb *cdbp; 12357 uchar_t cdb_group_id; 12358 12359 ASSERT(pktp != NULL); 12360 ASSERT(pktp->pkt_cdbp != NULL); 12361 12362 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 12363 com = cdbp->scc_cmd; 12364 cdb_group_id = CDB_GROUPID(com); 12365 12366 ASSERT((cdb_group_id == CDB_GROUPID_0) || 12367 (cdb_group_id == CDB_GROUPID_1) || 12368 (cdb_group_id == CDB_GROUPID_4) || 12369 (cdb_group_id == CDB_GROUPID_5)); 12370 12371 /* 12372 * Move pkt to the next portion of the xfer. 12373 * func is NULL_FUNC so we do not have to release 12374 * the disk mutex here. 12375 */ 12376 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0, 12377 NULL_FUNC, NULL) == pktp) { 12378 /* Success. Handle partial DMA */ 12379 if (pktp->pkt_resid != 0) { 12380 blockcount -= 12381 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid); 12382 } 12383 12384 cdbp->scc_cmd = com; 12385 SD_FILL_SCSI1_LUN(un, pktp); 12386 if (cdb_group_id == CDB_GROUPID_1) { 12387 FORMG1ADDR(cdbp, lba); 12388 FORMG1COUNT(cdbp, blockcount); 12389 return (0); 12390 } else if (cdb_group_id == CDB_GROUPID_4) { 12391 FORMG4LONGADDR(cdbp, lba); 12392 FORMG4COUNT(cdbp, blockcount); 12393 return (0); 12394 } else if (cdb_group_id == CDB_GROUPID_0) { 12395 FORMG0ADDR(cdbp, lba); 12396 FORMG0COUNT(cdbp, blockcount); 12397 return (0); 12398 } else if (cdb_group_id == CDB_GROUPID_5) { 12399 FORMG5ADDR(cdbp, lba); 12400 FORMG5COUNT(cdbp, blockcount); 12401 return (0); 12402 } 12403 12404 /* Unreachable */ 12405 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 12406 } 12407 12408 /* 12409 * Error setting up next portion of cmd transfer. 12410 * Something is definitely very wrong and this 12411 * should not happen. 12412 */ 12413 return (SD_PKT_ALLOC_FAILURE); 12414 } 12415 12416 /* 12417 * Function: sd_initpkt_for_uscsi 12418 * 12419 * Description: Allocate and initialize for transport a scsi_pkt struct, 12420 * based upon the info specified in the given uscsi_cmd struct. 12421 * 12422 * Return Code: SD_PKT_ALLOC_SUCCESS 12423 * SD_PKT_ALLOC_FAILURE 12424 * SD_PKT_ALLOC_FAILURE_NO_DMA 12425 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 12426 * 12427 * Context: Kernel thread and may be called from software interrupt context 12428 * as part of a sdrunout callback. This function may not block or 12429 * call routines that block 12430 */ 12431 12432 static int 12433 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp) 12434 { 12435 struct uscsi_cmd *uscmd; 12436 struct sd_xbuf *xp; 12437 struct scsi_pkt *pktp; 12438 struct sd_lun *un; 12439 uint32_t flags = 0; 12440 12441 ASSERT(bp != NULL); 12442 ASSERT(pktpp != NULL); 12443 xp = SD_GET_XBUF(bp); 12444 ASSERT(xp != NULL); 12445 un = SD_GET_UN(bp); 12446 ASSERT(un != NULL); 12447 ASSERT(mutex_owned(SD_MUTEX(un))); 12448 12449 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 12450 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 12451 ASSERT(uscmd != NULL); 12452 12453 SD_TRACE(SD_LOG_IO_CORE, un, 12454 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp); 12455 12456 /* 12457 * Allocate the scsi_pkt for the command. 12458 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path 12459 * during scsi_init_pkt time and will continue to use the 12460 * same path as long as the same scsi_pkt is used without 12461 * intervening scsi_dma_free(). Since uscsi command does 12462 * not call scsi_dmafree() before retry failed command, it 12463 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT 12464 * set such that scsi_vhci can use other available path for 12465 * retry. Besides, ucsci command does not allow DMA breakup, 12466 * so there is no need to set PKT_DMA_PARTIAL flag. 12467 */ 12468 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 12469 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 12470 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 12471 ((int)(uscmd->uscsi_rqlen) + sizeof (struct scsi_arq_status) 12472 - sizeof (struct scsi_extended_sense)), 0, 12473 (un->un_pkt_flags & ~PKT_DMA_PARTIAL) | PKT_XARQ, 12474 sdrunout, (caddr_t)un); 12475 } else { 12476 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 12477 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 12478 sizeof (struct scsi_arq_status), 0, 12479 (un->un_pkt_flags & ~PKT_DMA_PARTIAL), 12480 sdrunout, (caddr_t)un); 12481 } 12482 12483 if (pktp == NULL) { 12484 *pktpp = NULL; 12485 /* 12486 * Set the driver state to RWAIT to indicate the driver 12487 * is waiting on resource allocations. The driver will not 12488 * suspend, pm_suspend, or detatch while the state is RWAIT. 12489 */ 12490 New_state(un, SD_STATE_RWAIT); 12491 12492 SD_ERROR(SD_LOG_IO_CORE, un, 12493 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp); 12494 12495 if ((bp->b_flags & B_ERROR) != 0) { 12496 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 12497 } 12498 return (SD_PKT_ALLOC_FAILURE); 12499 } 12500 12501 /* 12502 * We do not do DMA breakup for USCSI commands, so return failure 12503 * here if all the needed DMA resources were not allocated. 12504 */ 12505 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) && 12506 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) { 12507 scsi_destroy_pkt(pktp); 12508 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: " 12509 "No partial DMA for USCSI. exit: buf:0x%p\n", bp); 12510 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL); 12511 } 12512 12513 /* Init the cdb from the given uscsi struct */ 12514 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp, 12515 uscmd->uscsi_cdb[0], 0, 0, 0); 12516 12517 SD_FILL_SCSI1_LUN(un, pktp); 12518 12519 /* 12520 * Set up the optional USCSI flags. See the uscsi (7I) man page 12521 * for listing of the supported flags. 12522 */ 12523 12524 if (uscmd->uscsi_flags & USCSI_SILENT) { 12525 flags |= FLAG_SILENT; 12526 } 12527 12528 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) { 12529 flags |= FLAG_DIAGNOSE; 12530 } 12531 12532 if (uscmd->uscsi_flags & USCSI_ISOLATE) { 12533 flags |= FLAG_ISOLATE; 12534 } 12535 12536 if (un->un_f_is_fibre == FALSE) { 12537 if (uscmd->uscsi_flags & USCSI_RENEGOT) { 12538 flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 12539 } 12540 } 12541 12542 /* 12543 * Set the pkt flags here so we save time later. 12544 * Note: These flags are NOT in the uscsi man page!!! 12545 */ 12546 if (uscmd->uscsi_flags & USCSI_HEAD) { 12547 flags |= FLAG_HEAD; 12548 } 12549 12550 if (uscmd->uscsi_flags & USCSI_NOINTR) { 12551 flags |= FLAG_NOINTR; 12552 } 12553 12554 /* 12555 * For tagged queueing, things get a bit complicated. 12556 * Check first for head of queue and last for ordered queue. 12557 * If neither head nor order, use the default driver tag flags. 12558 */ 12559 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) { 12560 if (uscmd->uscsi_flags & USCSI_HTAG) { 12561 flags |= FLAG_HTAG; 12562 } else if (uscmd->uscsi_flags & USCSI_OTAG) { 12563 flags |= FLAG_OTAG; 12564 } else { 12565 flags |= un->un_tagflags & FLAG_TAGMASK; 12566 } 12567 } 12568 12569 if (uscmd->uscsi_flags & USCSI_NODISCON) { 12570 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON; 12571 } 12572 12573 pktp->pkt_flags = flags; 12574 12575 /* Transfer uscsi information to scsi_pkt */ 12576 (void) scsi_uscsi_pktinit(uscmd, pktp); 12577 12578 /* Copy the caller's CDB into the pkt... */ 12579 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen); 12580 12581 if (uscmd->uscsi_timeout == 0) { 12582 pktp->pkt_time = un->un_uscsi_timeout; 12583 } else { 12584 pktp->pkt_time = uscmd->uscsi_timeout; 12585 } 12586 12587 /* need it later to identify USCSI request in sdintr */ 12588 xp->xb_pkt_flags |= SD_XB_USCSICMD; 12589 12590 xp->xb_sense_resid = uscmd->uscsi_rqresid; 12591 12592 pktp->pkt_private = bp; 12593 pktp->pkt_comp = sdintr; 12594 *pktpp = pktp; 12595 12596 SD_TRACE(SD_LOG_IO_CORE, un, 12597 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp); 12598 12599 return (SD_PKT_ALLOC_SUCCESS); 12600 } 12601 12602 12603 /* 12604 * Function: sd_destroypkt_for_uscsi 12605 * 12606 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi 12607 * IOs.. Also saves relevant info into the associated uscsi_cmd 12608 * struct. 12609 * 12610 * Context: May be called under interrupt context 12611 */ 12612 12613 static void 12614 sd_destroypkt_for_uscsi(struct buf *bp) 12615 { 12616 struct uscsi_cmd *uscmd; 12617 struct sd_xbuf *xp; 12618 struct scsi_pkt *pktp; 12619 struct sd_lun *un; 12620 12621 ASSERT(bp != NULL); 12622 xp = SD_GET_XBUF(bp); 12623 ASSERT(xp != NULL); 12624 un = SD_GET_UN(bp); 12625 ASSERT(un != NULL); 12626 ASSERT(!mutex_owned(SD_MUTEX(un))); 12627 pktp = SD_GET_PKTP(bp); 12628 ASSERT(pktp != NULL); 12629 12630 SD_TRACE(SD_LOG_IO_CORE, un, 12631 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp); 12632 12633 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 12634 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 12635 ASSERT(uscmd != NULL); 12636 12637 /* Save the status and the residual into the uscsi_cmd struct */ 12638 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 12639 uscmd->uscsi_resid = bp->b_resid; 12640 12641 /* Transfer scsi_pkt information to uscsi */ 12642 (void) scsi_uscsi_pktfini(pktp, uscmd); 12643 12644 /* 12645 * If enabled, copy any saved sense data into the area specified 12646 * by the uscsi command. 12647 */ 12648 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 12649 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 12650 /* 12651 * Note: uscmd->uscsi_rqbuf should always point to a buffer 12652 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd()) 12653 */ 12654 uscmd->uscsi_rqstatus = xp->xb_sense_status; 12655 uscmd->uscsi_rqresid = xp->xb_sense_resid; 12656 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 12657 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 12658 MAX_SENSE_LENGTH); 12659 } else { 12660 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 12661 SENSE_LENGTH); 12662 } 12663 } 12664 12665 /* We are done with the scsi_pkt; free it now */ 12666 ASSERT(SD_GET_PKTP(bp) != NULL); 12667 scsi_destroy_pkt(SD_GET_PKTP(bp)); 12668 12669 SD_TRACE(SD_LOG_IO_CORE, un, 12670 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp); 12671 } 12672 12673 12674 /* 12675 * Function: sd_bioclone_alloc 12676 * 12677 * Description: Allocate a buf(9S) and init it as per the given buf 12678 * and the various arguments. The associated sd_xbuf 12679 * struct is (nearly) duplicated. The struct buf *bp 12680 * argument is saved in new_xp->xb_private. 12681 * 12682 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 12683 * datalen - size of data area for the shadow bp 12684 * blkno - starting LBA 12685 * func - function pointer for b_iodone in the shadow buf. (May 12686 * be NULL if none.) 12687 * 12688 * Return Code: Pointer to allocates buf(9S) struct 12689 * 12690 * Context: Can sleep. 12691 */ 12692 12693 static struct buf * 12694 sd_bioclone_alloc(struct buf *bp, size_t datalen, 12695 daddr_t blkno, int (*func)(struct buf *)) 12696 { 12697 struct sd_lun *un; 12698 struct sd_xbuf *xp; 12699 struct sd_xbuf *new_xp; 12700 struct buf *new_bp; 12701 12702 ASSERT(bp != NULL); 12703 xp = SD_GET_XBUF(bp); 12704 ASSERT(xp != NULL); 12705 un = SD_GET_UN(bp); 12706 ASSERT(un != NULL); 12707 ASSERT(!mutex_owned(SD_MUTEX(un))); 12708 12709 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func, 12710 NULL, KM_SLEEP); 12711 12712 new_bp->b_lblkno = blkno; 12713 12714 /* 12715 * Allocate an xbuf for the shadow bp and copy the contents of the 12716 * original xbuf into it. 12717 */ 12718 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 12719 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 12720 12721 /* 12722 * The given bp is automatically saved in the xb_private member 12723 * of the new xbuf. Callers are allowed to depend on this. 12724 */ 12725 new_xp->xb_private = bp; 12726 12727 new_bp->b_private = new_xp; 12728 12729 return (new_bp); 12730 } 12731 12732 /* 12733 * Function: sd_shadow_buf_alloc 12734 * 12735 * Description: Allocate a buf(9S) and init it as per the given buf 12736 * and the various arguments. The associated sd_xbuf 12737 * struct is (nearly) duplicated. The struct buf *bp 12738 * argument is saved in new_xp->xb_private. 12739 * 12740 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 12741 * datalen - size of data area for the shadow bp 12742 * bflags - B_READ or B_WRITE (pseudo flag) 12743 * blkno - starting LBA 12744 * func - function pointer for b_iodone in the shadow buf. (May 12745 * be NULL if none.) 12746 * 12747 * Return Code: Pointer to allocates buf(9S) struct 12748 * 12749 * Context: Can sleep. 12750 */ 12751 12752 static struct buf * 12753 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags, 12754 daddr_t blkno, int (*func)(struct buf *)) 12755 { 12756 struct sd_lun *un; 12757 struct sd_xbuf *xp; 12758 struct sd_xbuf *new_xp; 12759 struct buf *new_bp; 12760 12761 ASSERT(bp != NULL); 12762 xp = SD_GET_XBUF(bp); 12763 ASSERT(xp != NULL); 12764 un = SD_GET_UN(bp); 12765 ASSERT(un != NULL); 12766 ASSERT(!mutex_owned(SD_MUTEX(un))); 12767 12768 if (bp->b_flags & (B_PAGEIO | B_PHYS)) { 12769 bp_mapin(bp); 12770 } 12771 12772 bflags &= (B_READ | B_WRITE); 12773 #if defined(__i386) || defined(__amd64) 12774 new_bp = getrbuf(KM_SLEEP); 12775 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP); 12776 new_bp->b_bcount = datalen; 12777 new_bp->b_flags = bflags | 12778 (bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW)); 12779 #else 12780 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL, 12781 datalen, bflags, SLEEP_FUNC, NULL); 12782 #endif 12783 new_bp->av_forw = NULL; 12784 new_bp->av_back = NULL; 12785 new_bp->b_dev = bp->b_dev; 12786 new_bp->b_blkno = blkno; 12787 new_bp->b_iodone = func; 12788 new_bp->b_edev = bp->b_edev; 12789 new_bp->b_resid = 0; 12790 12791 /* We need to preserve the B_FAILFAST flag */ 12792 if (bp->b_flags & B_FAILFAST) { 12793 new_bp->b_flags |= B_FAILFAST; 12794 } 12795 12796 /* 12797 * Allocate an xbuf for the shadow bp and copy the contents of the 12798 * original xbuf into it. 12799 */ 12800 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 12801 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 12802 12803 /* Need later to copy data between the shadow buf & original buf! */ 12804 new_xp->xb_pkt_flags |= PKT_CONSISTENT; 12805 12806 /* 12807 * The given bp is automatically saved in the xb_private member 12808 * of the new xbuf. Callers are allowed to depend on this. 12809 */ 12810 new_xp->xb_private = bp; 12811 12812 new_bp->b_private = new_xp; 12813 12814 return (new_bp); 12815 } 12816 12817 /* 12818 * Function: sd_bioclone_free 12819 * 12820 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations 12821 * in the larger than partition operation. 12822 * 12823 * Context: May be called under interrupt context 12824 */ 12825 12826 static void 12827 sd_bioclone_free(struct buf *bp) 12828 { 12829 struct sd_xbuf *xp; 12830 12831 ASSERT(bp != NULL); 12832 xp = SD_GET_XBUF(bp); 12833 ASSERT(xp != NULL); 12834 12835 /* 12836 * Call bp_mapout() before freeing the buf, in case a lower 12837 * layer or HBA had done a bp_mapin(). we must do this here 12838 * as we are the "originator" of the shadow buf. 12839 */ 12840 bp_mapout(bp); 12841 12842 /* 12843 * Null out b_iodone before freeing the bp, to ensure that the driver 12844 * never gets confused by a stale value in this field. (Just a little 12845 * extra defensiveness here.) 12846 */ 12847 bp->b_iodone = NULL; 12848 12849 freerbuf(bp); 12850 12851 kmem_free(xp, sizeof (struct sd_xbuf)); 12852 } 12853 12854 /* 12855 * Function: sd_shadow_buf_free 12856 * 12857 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations. 12858 * 12859 * Context: May be called under interrupt context 12860 */ 12861 12862 static void 12863 sd_shadow_buf_free(struct buf *bp) 12864 { 12865 struct sd_xbuf *xp; 12866 12867 ASSERT(bp != NULL); 12868 xp = SD_GET_XBUF(bp); 12869 ASSERT(xp != NULL); 12870 12871 #if defined(__sparc) 12872 /* 12873 * Call bp_mapout() before freeing the buf, in case a lower 12874 * layer or HBA had done a bp_mapin(). we must do this here 12875 * as we are the "originator" of the shadow buf. 12876 */ 12877 bp_mapout(bp); 12878 #endif 12879 12880 /* 12881 * Null out b_iodone before freeing the bp, to ensure that the driver 12882 * never gets confused by a stale value in this field. (Just a little 12883 * extra defensiveness here.) 12884 */ 12885 bp->b_iodone = NULL; 12886 12887 #if defined(__i386) || defined(__amd64) 12888 kmem_free(bp->b_un.b_addr, bp->b_bcount); 12889 freerbuf(bp); 12890 #else 12891 scsi_free_consistent_buf(bp); 12892 #endif 12893 12894 kmem_free(xp, sizeof (struct sd_xbuf)); 12895 } 12896 12897 12898 /* 12899 * Function: sd_print_transport_rejected_message 12900 * 12901 * Description: This implements the ludicrously complex rules for printing 12902 * a "transport rejected" message. This is to address the 12903 * specific problem of having a flood of this error message 12904 * produced when a failover occurs. 12905 * 12906 * Context: Any. 12907 */ 12908 12909 static void 12910 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp, 12911 int code) 12912 { 12913 ASSERT(un != NULL); 12914 ASSERT(mutex_owned(SD_MUTEX(un))); 12915 ASSERT(xp != NULL); 12916 12917 /* 12918 * Print the "transport rejected" message under the following 12919 * conditions: 12920 * 12921 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set 12922 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR. 12923 * - If the error code IS a TRAN_FATAL_ERROR, then the message is 12924 * printed the FIRST time a TRAN_FATAL_ERROR is returned from 12925 * scsi_transport(9F) (which indicates that the target might have 12926 * gone off-line). This uses the un->un_tran_fatal_count 12927 * count, which is incremented whenever a TRAN_FATAL_ERROR is 12928 * received, and reset to zero whenver a TRAN_ACCEPT is returned 12929 * from scsi_transport(). 12930 * 12931 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of 12932 * the preceeding cases in order for the message to be printed. 12933 */ 12934 if ((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) { 12935 if ((sd_level_mask & SD_LOGMASK_DIAG) || 12936 (code != TRAN_FATAL_ERROR) || 12937 (un->un_tran_fatal_count == 1)) { 12938 switch (code) { 12939 case TRAN_BADPKT: 12940 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12941 "transport rejected bad packet\n"); 12942 break; 12943 case TRAN_FATAL_ERROR: 12944 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12945 "transport rejected fatal error\n"); 12946 break; 12947 default: 12948 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12949 "transport rejected (%d)\n", code); 12950 break; 12951 } 12952 } 12953 } 12954 } 12955 12956 12957 /* 12958 * Function: sd_add_buf_to_waitq 12959 * 12960 * Description: Add the given buf(9S) struct to the wait queue for the 12961 * instance. If sorting is enabled, then the buf is added 12962 * to the queue via an elevator sort algorithm (a la 12963 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key. 12964 * If sorting is not enabled, then the buf is just added 12965 * to the end of the wait queue. 12966 * 12967 * Return Code: void 12968 * 12969 * Context: Does not sleep/block, therefore technically can be called 12970 * from any context. However if sorting is enabled then the 12971 * execution time is indeterminate, and may take long if 12972 * the wait queue grows large. 12973 */ 12974 12975 static void 12976 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp) 12977 { 12978 struct buf *ap; 12979 12980 ASSERT(bp != NULL); 12981 ASSERT(un != NULL); 12982 ASSERT(mutex_owned(SD_MUTEX(un))); 12983 12984 /* If the queue is empty, add the buf as the only entry & return. */ 12985 if (un->un_waitq_headp == NULL) { 12986 ASSERT(un->un_waitq_tailp == NULL); 12987 un->un_waitq_headp = un->un_waitq_tailp = bp; 12988 bp->av_forw = NULL; 12989 return; 12990 } 12991 12992 ASSERT(un->un_waitq_tailp != NULL); 12993 12994 /* 12995 * If sorting is disabled, just add the buf to the tail end of 12996 * the wait queue and return. 12997 */ 12998 if (un->un_f_disksort_disabled) { 12999 un->un_waitq_tailp->av_forw = bp; 13000 un->un_waitq_tailp = bp; 13001 bp->av_forw = NULL; 13002 return; 13003 } 13004 13005 /* 13006 * Sort thru the list of requests currently on the wait queue 13007 * and add the new buf request at the appropriate position. 13008 * 13009 * The un->un_waitq_headp is an activity chain pointer on which 13010 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The 13011 * first queue holds those requests which are positioned after 13012 * the current SD_GET_BLKNO() (in the first request); the second holds 13013 * requests which came in after their SD_GET_BLKNO() number was passed. 13014 * Thus we implement a one way scan, retracting after reaching 13015 * the end of the drive to the first request on the second 13016 * queue, at which time it becomes the first queue. 13017 * A one-way scan is natural because of the way UNIX read-ahead 13018 * blocks are allocated. 13019 * 13020 * If we lie after the first request, then we must locate the 13021 * second request list and add ourselves to it. 13022 */ 13023 ap = un->un_waitq_headp; 13024 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) { 13025 while (ap->av_forw != NULL) { 13026 /* 13027 * Look for an "inversion" in the (normally 13028 * ascending) block numbers. This indicates 13029 * the start of the second request list. 13030 */ 13031 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) { 13032 /* 13033 * Search the second request list for the 13034 * first request at a larger block number. 13035 * We go before that; however if there is 13036 * no such request, we go at the end. 13037 */ 13038 do { 13039 if (SD_GET_BLKNO(bp) < 13040 SD_GET_BLKNO(ap->av_forw)) { 13041 goto insert; 13042 } 13043 ap = ap->av_forw; 13044 } while (ap->av_forw != NULL); 13045 goto insert; /* after last */ 13046 } 13047 ap = ap->av_forw; 13048 } 13049 13050 /* 13051 * No inversions... we will go after the last, and 13052 * be the first request in the second request list. 13053 */ 13054 goto insert; 13055 } 13056 13057 /* 13058 * Request is at/after the current request... 13059 * sort in the first request list. 13060 */ 13061 while (ap->av_forw != NULL) { 13062 /* 13063 * We want to go after the current request (1) if 13064 * there is an inversion after it (i.e. it is the end 13065 * of the first request list), or (2) if the next 13066 * request is a larger block no. than our request. 13067 */ 13068 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) || 13069 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) { 13070 goto insert; 13071 } 13072 ap = ap->av_forw; 13073 } 13074 13075 /* 13076 * Neither a second list nor a larger request, therefore 13077 * we go at the end of the first list (which is the same 13078 * as the end of the whole schebang). 13079 */ 13080 insert: 13081 bp->av_forw = ap->av_forw; 13082 ap->av_forw = bp; 13083 13084 /* 13085 * If we inserted onto the tail end of the waitq, make sure the 13086 * tail pointer is updated. 13087 */ 13088 if (ap == un->un_waitq_tailp) { 13089 un->un_waitq_tailp = bp; 13090 } 13091 } 13092 13093 13094 /* 13095 * Function: sd_start_cmds 13096 * 13097 * Description: Remove and transport cmds from the driver queues. 13098 * 13099 * Arguments: un - pointer to the unit (soft state) struct for the target. 13100 * 13101 * immed_bp - ptr to a buf to be transported immediately. Only 13102 * the immed_bp is transported; bufs on the waitq are not 13103 * processed and the un_retry_bp is not checked. If immed_bp is 13104 * NULL, then normal queue processing is performed. 13105 * 13106 * Context: May be called from kernel thread context, interrupt context, 13107 * or runout callback context. This function may not block or 13108 * call routines that block. 13109 */ 13110 13111 static void 13112 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp) 13113 { 13114 struct sd_xbuf *xp; 13115 struct buf *bp; 13116 void (*statp)(kstat_io_t *); 13117 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13118 void (*saved_statp)(kstat_io_t *); 13119 #endif 13120 int rval; 13121 13122 ASSERT(un != NULL); 13123 ASSERT(mutex_owned(SD_MUTEX(un))); 13124 ASSERT(un->un_ncmds_in_transport >= 0); 13125 ASSERT(un->un_throttle >= 0); 13126 13127 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); 13128 13129 do { 13130 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13131 saved_statp = NULL; 13132 #endif 13133 13134 /* 13135 * If we are syncing or dumping, fail the command to 13136 * avoid recursively calling back into scsi_transport(). 13137 * The dump I/O itself uses a separate code path so this 13138 * only prevents non-dump I/O from being sent while dumping. 13139 * File system sync takes place before dumping begins. 13140 * During panic, filesystem I/O is allowed provided 13141 * un_in_callback is <= 1. This is to prevent recursion 13142 * such as sd_start_cmds -> scsi_transport -> sdintr -> 13143 * sd_start_cmds and so on. See panic.c for more information 13144 * about the states the system can be in during panic. 13145 */ 13146 if ((un->un_state == SD_STATE_DUMPING) || 13147 (ddi_in_panic() && (un->un_in_callback > 1))) { 13148 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13149 "sd_start_cmds: panicking\n"); 13150 goto exit; 13151 } 13152 13153 if ((bp = immed_bp) != NULL) { 13154 /* 13155 * We have a bp that must be transported immediately. 13156 * It's OK to transport the immed_bp here without doing 13157 * the throttle limit check because the immed_bp is 13158 * always used in a retry/recovery case. This means 13159 * that we know we are not at the throttle limit by 13160 * virtue of the fact that to get here we must have 13161 * already gotten a command back via sdintr(). This also 13162 * relies on (1) the command on un_retry_bp preventing 13163 * further commands from the waitq from being issued; 13164 * and (2) the code in sd_retry_command checking the 13165 * throttle limit before issuing a delayed or immediate 13166 * retry. This holds even if the throttle limit is 13167 * currently ratcheted down from its maximum value. 13168 */ 13169 statp = kstat_runq_enter; 13170 if (bp == un->un_retry_bp) { 13171 ASSERT((un->un_retry_statp == NULL) || 13172 (un->un_retry_statp == kstat_waitq_enter) || 13173 (un->un_retry_statp == 13174 kstat_runq_back_to_waitq)); 13175 /* 13176 * If the waitq kstat was incremented when 13177 * sd_set_retry_bp() queued this bp for a retry, 13178 * then we must set up statp so that the waitq 13179 * count will get decremented correctly below. 13180 * Also we must clear un->un_retry_statp to 13181 * ensure that we do not act on a stale value 13182 * in this field. 13183 */ 13184 if ((un->un_retry_statp == kstat_waitq_enter) || 13185 (un->un_retry_statp == 13186 kstat_runq_back_to_waitq)) { 13187 statp = kstat_waitq_to_runq; 13188 } 13189 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13190 saved_statp = un->un_retry_statp; 13191 #endif 13192 un->un_retry_statp = NULL; 13193 13194 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 13195 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p " 13196 "un_throttle:%d un_ncmds_in_transport:%d\n", 13197 un, un->un_retry_bp, un->un_throttle, 13198 un->un_ncmds_in_transport); 13199 } else { 13200 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: " 13201 "processing priority bp:0x%p\n", bp); 13202 } 13203 13204 } else if ((bp = un->un_waitq_headp) != NULL) { 13205 /* 13206 * A command on the waitq is ready to go, but do not 13207 * send it if: 13208 * 13209 * (1) the throttle limit has been reached, or 13210 * (2) a retry is pending, or 13211 * (3) a START_STOP_UNIT callback pending, or 13212 * (4) a callback for a SD_PATH_DIRECT_PRIORITY 13213 * command is pending. 13214 * 13215 * For all of these conditions, IO processing will 13216 * restart after the condition is cleared. 13217 */ 13218 if (un->un_ncmds_in_transport >= un->un_throttle) { 13219 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13220 "sd_start_cmds: exiting, " 13221 "throttle limit reached!\n"); 13222 goto exit; 13223 } 13224 if (un->un_retry_bp != NULL) { 13225 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13226 "sd_start_cmds: exiting, retry pending!\n"); 13227 goto exit; 13228 } 13229 if (un->un_startstop_timeid != NULL) { 13230 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13231 "sd_start_cmds: exiting, " 13232 "START_STOP pending!\n"); 13233 goto exit; 13234 } 13235 if (un->un_direct_priority_timeid != NULL) { 13236 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13237 "sd_start_cmds: exiting, " 13238 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n"); 13239 goto exit; 13240 } 13241 13242 /* Dequeue the command */ 13243 un->un_waitq_headp = bp->av_forw; 13244 if (un->un_waitq_headp == NULL) { 13245 un->un_waitq_tailp = NULL; 13246 } 13247 bp->av_forw = NULL; 13248 statp = kstat_waitq_to_runq; 13249 SD_TRACE(SD_LOG_IO_CORE, un, 13250 "sd_start_cmds: processing waitq bp:0x%p\n", bp); 13251 13252 } else { 13253 /* No work to do so bail out now */ 13254 SD_TRACE(SD_LOG_IO_CORE, un, 13255 "sd_start_cmds: no more work, exiting!\n"); 13256 goto exit; 13257 } 13258 13259 /* 13260 * Reset the state to normal. This is the mechanism by which 13261 * the state transitions from either SD_STATE_RWAIT or 13262 * SD_STATE_OFFLINE to SD_STATE_NORMAL. 13263 * If state is SD_STATE_PM_CHANGING then this command is 13264 * part of the device power control and the state must 13265 * not be put back to normal. Doing so would would 13266 * allow new commands to proceed when they shouldn't, 13267 * the device may be going off. 13268 */ 13269 if ((un->un_state != SD_STATE_SUSPENDED) && 13270 (un->un_state != SD_STATE_PM_CHANGING)) { 13271 New_state(un, SD_STATE_NORMAL); 13272 } 13273 13274 xp = SD_GET_XBUF(bp); 13275 ASSERT(xp != NULL); 13276 13277 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13278 /* 13279 * Allocate the scsi_pkt if we need one, or attach DMA 13280 * resources if we have a scsi_pkt that needs them. The 13281 * latter should only occur for commands that are being 13282 * retried. 13283 */ 13284 if ((xp->xb_pktp == NULL) || 13285 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { 13286 #else 13287 if (xp->xb_pktp == NULL) { 13288 #endif 13289 /* 13290 * There is no scsi_pkt allocated for this buf. Call 13291 * the initpkt function to allocate & init one. 13292 * 13293 * The scsi_init_pkt runout callback functionality is 13294 * implemented as follows: 13295 * 13296 * 1) The initpkt function always calls 13297 * scsi_init_pkt(9F) with sdrunout specified as the 13298 * callback routine. 13299 * 2) A successful packet allocation is initialized and 13300 * the I/O is transported. 13301 * 3) The I/O associated with an allocation resource 13302 * failure is left on its queue to be retried via 13303 * runout or the next I/O. 13304 * 4) The I/O associated with a DMA error is removed 13305 * from the queue and failed with EIO. Processing of 13306 * the transport queues is also halted to be 13307 * restarted via runout or the next I/O. 13308 * 5) The I/O associated with a CDB size or packet 13309 * size error is removed from the queue and failed 13310 * with EIO. Processing of the transport queues is 13311 * continued. 13312 * 13313 * Note: there is no interface for canceling a runout 13314 * callback. To prevent the driver from detaching or 13315 * suspending while a runout is pending the driver 13316 * state is set to SD_STATE_RWAIT 13317 * 13318 * Note: using the scsi_init_pkt callback facility can 13319 * result in an I/O request persisting at the head of 13320 * the list which cannot be satisfied even after 13321 * multiple retries. In the future the driver may 13322 * implement some kind of maximum runout count before 13323 * failing an I/O. 13324 * 13325 * Note: the use of funcp below may seem superfluous, 13326 * but it helps warlock figure out the correct 13327 * initpkt function calls (see [s]sd.wlcmd). 13328 */ 13329 struct scsi_pkt *pktp; 13330 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp); 13331 13332 ASSERT(bp != un->un_rqs_bp); 13333 13334 funcp = sd_initpkt_map[xp->xb_chain_iostart]; 13335 switch ((*funcp)(bp, &pktp)) { 13336 case SD_PKT_ALLOC_SUCCESS: 13337 xp->xb_pktp = pktp; 13338 SD_TRACE(SD_LOG_IO_CORE, un, 13339 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n", 13340 pktp); 13341 goto got_pkt; 13342 13343 case SD_PKT_ALLOC_FAILURE: 13344 /* 13345 * Temporary (hopefully) resource depletion. 13346 * Since retries and RQS commands always have a 13347 * scsi_pkt allocated, these cases should never 13348 * get here. So the only cases this needs to 13349 * handle is a bp from the waitq (which we put 13350 * back onto the waitq for sdrunout), or a bp 13351 * sent as an immed_bp (which we just fail). 13352 */ 13353 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13354 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n"); 13355 13356 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13357 13358 if (bp == immed_bp) { 13359 /* 13360 * If SD_XB_DMA_FREED is clear, then 13361 * this is a failure to allocate a 13362 * scsi_pkt, and we must fail the 13363 * command. 13364 */ 13365 if ((xp->xb_pkt_flags & 13366 SD_XB_DMA_FREED) == 0) { 13367 break; 13368 } 13369 13370 /* 13371 * If this immediate command is NOT our 13372 * un_retry_bp, then we must fail it. 13373 */ 13374 if (bp != un->un_retry_bp) { 13375 break; 13376 } 13377 13378 /* 13379 * We get here if this cmd is our 13380 * un_retry_bp that was DMAFREED, but 13381 * scsi_init_pkt() failed to reallocate 13382 * DMA resources when we attempted to 13383 * retry it. This can happen when an 13384 * mpxio failover is in progress, but 13385 * we don't want to just fail the 13386 * command in this case. 13387 * 13388 * Use timeout(9F) to restart it after 13389 * a 100ms delay. We don't want to 13390 * let sdrunout() restart it, because 13391 * sdrunout() is just supposed to start 13392 * commands that are sitting on the 13393 * wait queue. The un_retry_bp stays 13394 * set until the command completes, but 13395 * sdrunout can be called many times 13396 * before that happens. Since sdrunout 13397 * cannot tell if the un_retry_bp is 13398 * already in the transport, it could 13399 * end up calling scsi_transport() for 13400 * the un_retry_bp multiple times. 13401 * 13402 * Also: don't schedule the callback 13403 * if some other callback is already 13404 * pending. 13405 */ 13406 if (un->un_retry_statp == NULL) { 13407 /* 13408 * restore the kstat pointer to 13409 * keep kstat counts coherent 13410 * when we do retry the command. 13411 */ 13412 un->un_retry_statp = 13413 saved_statp; 13414 } 13415 13416 if ((un->un_startstop_timeid == NULL) && 13417 (un->un_retry_timeid == NULL) && 13418 (un->un_direct_priority_timeid == 13419 NULL)) { 13420 13421 un->un_retry_timeid = 13422 timeout( 13423 sd_start_retry_command, 13424 un, SD_RESTART_TIMEOUT); 13425 } 13426 goto exit; 13427 } 13428 13429 #else 13430 if (bp == immed_bp) { 13431 break; /* Just fail the command */ 13432 } 13433 #endif 13434 13435 /* Add the buf back to the head of the waitq */ 13436 bp->av_forw = un->un_waitq_headp; 13437 un->un_waitq_headp = bp; 13438 if (un->un_waitq_tailp == NULL) { 13439 un->un_waitq_tailp = bp; 13440 } 13441 goto exit; 13442 13443 case SD_PKT_ALLOC_FAILURE_NO_DMA: 13444 /* 13445 * HBA DMA resource failure. Fail the command 13446 * and continue processing of the queues. 13447 */ 13448 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13449 "sd_start_cmds: " 13450 "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); 13451 break; 13452 13453 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: 13454 /* 13455 * Note:x86: Partial DMA mapping not supported 13456 * for USCSI commands, and all the needed DMA 13457 * resources were not allocated. 13458 */ 13459 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13460 "sd_start_cmds: " 13461 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); 13462 break; 13463 13464 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: 13465 /* 13466 * Note:x86: Request cannot fit into CDB based 13467 * on lba and len. 13468 */ 13469 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13470 "sd_start_cmds: " 13471 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); 13472 break; 13473 13474 default: 13475 /* Should NEVER get here! */ 13476 panic("scsi_initpkt error"); 13477 /*NOTREACHED*/ 13478 } 13479 13480 /* 13481 * Fatal error in allocating a scsi_pkt for this buf. 13482 * Update kstats & return the buf with an error code. 13483 * We must use sd_return_failed_command_no_restart() to 13484 * avoid a recursive call back into sd_start_cmds(). 13485 * However this also means that we must keep processing 13486 * the waitq here in order to avoid stalling. 13487 */ 13488 if (statp == kstat_waitq_to_runq) { 13489 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 13490 } 13491 sd_return_failed_command_no_restart(un, bp, EIO); 13492 if (bp == immed_bp) { 13493 /* immed_bp is gone by now, so clear this */ 13494 immed_bp = NULL; 13495 } 13496 continue; 13497 } 13498 got_pkt: 13499 if (bp == immed_bp) { 13500 /* goto the head of the class.... */ 13501 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 13502 } 13503 13504 un->un_ncmds_in_transport++; 13505 SD_UPDATE_KSTATS(un, statp, bp); 13506 13507 /* 13508 * Call scsi_transport() to send the command to the target. 13509 * According to SCSA architecture, we must drop the mutex here 13510 * before calling scsi_transport() in order to avoid deadlock. 13511 * Note that the scsi_pkt's completion routine can be executed 13512 * (from interrupt context) even before the call to 13513 * scsi_transport() returns. 13514 */ 13515 SD_TRACE(SD_LOG_IO_CORE, un, 13516 "sd_start_cmds: calling scsi_transport()\n"); 13517 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp); 13518 13519 mutex_exit(SD_MUTEX(un)); 13520 rval = scsi_transport(xp->xb_pktp); 13521 mutex_enter(SD_MUTEX(un)); 13522 13523 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13524 "sd_start_cmds: scsi_transport() returned %d\n", rval); 13525 13526 switch (rval) { 13527 case TRAN_ACCEPT: 13528 /* Clear this with every pkt accepted by the HBA */ 13529 un->un_tran_fatal_count = 0; 13530 break; /* Success; try the next cmd (if any) */ 13531 13532 case TRAN_BUSY: 13533 un->un_ncmds_in_transport--; 13534 ASSERT(un->un_ncmds_in_transport >= 0); 13535 13536 /* 13537 * Don't retry request sense, the sense data 13538 * is lost when another request is sent. 13539 * Free up the rqs buf and retry 13540 * the original failed cmd. Update kstat. 13541 */ 13542 if (bp == un->un_rqs_bp) { 13543 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13544 bp = sd_mark_rqs_idle(un, xp); 13545 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 13546 NULL, NULL, EIO, un->un_busy_timeout / 500, 13547 kstat_waitq_enter); 13548 goto exit; 13549 } 13550 13551 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13552 /* 13553 * Free the DMA resources for the scsi_pkt. This will 13554 * allow mpxio to select another path the next time 13555 * we call scsi_transport() with this scsi_pkt. 13556 * See sdintr() for the rationalization behind this. 13557 */ 13558 if ((un->un_f_is_fibre == TRUE) && 13559 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 13560 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) { 13561 scsi_dmafree(xp->xb_pktp); 13562 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 13563 } 13564 #endif 13565 13566 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) { 13567 /* 13568 * Commands that are SD_PATH_DIRECT_PRIORITY 13569 * are for error recovery situations. These do 13570 * not use the normal command waitq, so if they 13571 * get a TRAN_BUSY we cannot put them back onto 13572 * the waitq for later retry. One possible 13573 * problem is that there could already be some 13574 * other command on un_retry_bp that is waiting 13575 * for this one to complete, so we would be 13576 * deadlocked if we put this command back onto 13577 * the waitq for later retry (since un_retry_bp 13578 * must complete before the driver gets back to 13579 * commands on the waitq). 13580 * 13581 * To avoid deadlock we must schedule a callback 13582 * that will restart this command after a set 13583 * interval. This should keep retrying for as 13584 * long as the underlying transport keeps 13585 * returning TRAN_BUSY (just like for other 13586 * commands). Use the same timeout interval as 13587 * for the ordinary TRAN_BUSY retry. 13588 */ 13589 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13590 "sd_start_cmds: scsi_transport() returned " 13591 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n"); 13592 13593 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13594 un->un_direct_priority_timeid = 13595 timeout(sd_start_direct_priority_command, 13596 bp, un->un_busy_timeout / 500); 13597 13598 goto exit; 13599 } 13600 13601 /* 13602 * For TRAN_BUSY, we want to reduce the throttle value, 13603 * unless we are retrying a command. 13604 */ 13605 if (bp != un->un_retry_bp) { 13606 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); 13607 } 13608 13609 /* 13610 * Set up the bp to be tried again 10 ms later. 13611 * Note:x86: Is there a timeout value in the sd_lun 13612 * for this condition? 13613 */ 13614 sd_set_retry_bp(un, bp, un->un_busy_timeout / 500, 13615 kstat_runq_back_to_waitq); 13616 goto exit; 13617 13618 case TRAN_FATAL_ERROR: 13619 un->un_tran_fatal_count++; 13620 /* FALLTHRU */ 13621 13622 case TRAN_BADPKT: 13623 default: 13624 un->un_ncmds_in_transport--; 13625 ASSERT(un->un_ncmds_in_transport >= 0); 13626 13627 /* 13628 * If this is our REQUEST SENSE command with a 13629 * transport error, we must get back the pointers 13630 * to the original buf, and mark the REQUEST 13631 * SENSE command as "available". 13632 */ 13633 if (bp == un->un_rqs_bp) { 13634 bp = sd_mark_rqs_idle(un, xp); 13635 xp = SD_GET_XBUF(bp); 13636 } else { 13637 /* 13638 * Legacy behavior: do not update transport 13639 * error count for request sense commands. 13640 */ 13641 SD_UPDATE_ERRSTATS(un, sd_transerrs); 13642 } 13643 13644 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 13645 sd_print_transport_rejected_message(un, xp, rval); 13646 13647 /* 13648 * We must use sd_return_failed_command_no_restart() to 13649 * avoid a recursive call back into sd_start_cmds(). 13650 * However this also means that we must keep processing 13651 * the waitq here in order to avoid stalling. 13652 */ 13653 sd_return_failed_command_no_restart(un, bp, EIO); 13654 13655 /* 13656 * Notify any threads waiting in sd_ddi_suspend() that 13657 * a command completion has occurred. 13658 */ 13659 if (un->un_state == SD_STATE_SUSPENDED) { 13660 cv_broadcast(&un->un_disk_busy_cv); 13661 } 13662 13663 if (bp == immed_bp) { 13664 /* immed_bp is gone by now, so clear this */ 13665 immed_bp = NULL; 13666 } 13667 break; 13668 } 13669 13670 } while (immed_bp == NULL); 13671 13672 exit: 13673 ASSERT(mutex_owned(SD_MUTEX(un))); 13674 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n"); 13675 } 13676 13677 13678 /* 13679 * Function: sd_return_command 13680 * 13681 * Description: Returns a command to its originator (with or without an 13682 * error). Also starts commands waiting to be transported 13683 * to the target. 13684 * 13685 * Context: May be called from interrupt, kernel, or timeout context 13686 */ 13687 13688 static void 13689 sd_return_command(struct sd_lun *un, struct buf *bp) 13690 { 13691 struct sd_xbuf *xp; 13692 struct scsi_pkt *pktp; 13693 13694 ASSERT(bp != NULL); 13695 ASSERT(un != NULL); 13696 ASSERT(mutex_owned(SD_MUTEX(un))); 13697 ASSERT(bp != un->un_rqs_bp); 13698 xp = SD_GET_XBUF(bp); 13699 ASSERT(xp != NULL); 13700 13701 pktp = SD_GET_PKTP(bp); 13702 13703 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n"); 13704 13705 /* 13706 * Note: check for the "sdrestart failed" case. 13707 */ 13708 if ((un->un_partial_dma_supported == 1) && 13709 ((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) && 13710 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) && 13711 (xp->xb_pktp->pkt_resid == 0)) { 13712 13713 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) { 13714 /* 13715 * Successfully set up next portion of cmd 13716 * transfer, try sending it 13717 */ 13718 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 13719 NULL, NULL, 0, (clock_t)0, NULL); 13720 sd_start_cmds(un, NULL); 13721 return; /* Note:x86: need a return here? */ 13722 } 13723 } 13724 13725 /* 13726 * If this is the failfast bp, clear it from un_failfast_bp. This 13727 * can happen if upon being re-tried the failfast bp either 13728 * succeeded or encountered another error (possibly even a different 13729 * error than the one that precipitated the failfast state, but in 13730 * that case it would have had to exhaust retries as well). Regardless, 13731 * this should not occur whenever the instance is in the active 13732 * failfast state. 13733 */ 13734 if (bp == un->un_failfast_bp) { 13735 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 13736 un->un_failfast_bp = NULL; 13737 } 13738 13739 /* 13740 * Clear the failfast state upon successful completion of ANY cmd. 13741 */ 13742 if (bp->b_error == 0) { 13743 un->un_failfast_state = SD_FAILFAST_INACTIVE; 13744 } 13745 13746 /* 13747 * This is used if the command was retried one or more times. Show that 13748 * we are done with it, and allow processing of the waitq to resume. 13749 */ 13750 if (bp == un->un_retry_bp) { 13751 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13752 "sd_return_command: un:0x%p: " 13753 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 13754 un->un_retry_bp = NULL; 13755 un->un_retry_statp = NULL; 13756 } 13757 13758 SD_UPDATE_RDWR_STATS(un, bp); 13759 SD_UPDATE_PARTITION_STATS(un, bp); 13760 13761 switch (un->un_state) { 13762 case SD_STATE_SUSPENDED: 13763 /* 13764 * Notify any threads waiting in sd_ddi_suspend() that 13765 * a command completion has occurred. 13766 */ 13767 cv_broadcast(&un->un_disk_busy_cv); 13768 break; 13769 default: 13770 sd_start_cmds(un, NULL); 13771 break; 13772 } 13773 13774 /* Return this command up the iodone chain to its originator. */ 13775 mutex_exit(SD_MUTEX(un)); 13776 13777 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 13778 xp->xb_pktp = NULL; 13779 13780 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 13781 13782 ASSERT(!mutex_owned(SD_MUTEX(un))); 13783 mutex_enter(SD_MUTEX(un)); 13784 13785 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n"); 13786 } 13787 13788 13789 /* 13790 * Function: sd_return_failed_command 13791 * 13792 * Description: Command completion when an error occurred. 13793 * 13794 * Context: May be called from interrupt context 13795 */ 13796 13797 static void 13798 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode) 13799 { 13800 ASSERT(bp != NULL); 13801 ASSERT(un != NULL); 13802 ASSERT(mutex_owned(SD_MUTEX(un))); 13803 13804 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13805 "sd_return_failed_command: entry\n"); 13806 13807 /* 13808 * b_resid could already be nonzero due to a partial data 13809 * transfer, so do not change it here. 13810 */ 13811 SD_BIOERROR(bp, errcode); 13812 13813 sd_return_command(un, bp); 13814 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13815 "sd_return_failed_command: exit\n"); 13816 } 13817 13818 13819 /* 13820 * Function: sd_return_failed_command_no_restart 13821 * 13822 * Description: Same as sd_return_failed_command, but ensures that no 13823 * call back into sd_start_cmds will be issued. 13824 * 13825 * Context: May be called from interrupt context 13826 */ 13827 13828 static void 13829 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp, 13830 int errcode) 13831 { 13832 struct sd_xbuf *xp; 13833 13834 ASSERT(bp != NULL); 13835 ASSERT(un != NULL); 13836 ASSERT(mutex_owned(SD_MUTEX(un))); 13837 xp = SD_GET_XBUF(bp); 13838 ASSERT(xp != NULL); 13839 ASSERT(errcode != 0); 13840 13841 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13842 "sd_return_failed_command_no_restart: entry\n"); 13843 13844 /* 13845 * b_resid could already be nonzero due to a partial data 13846 * transfer, so do not change it here. 13847 */ 13848 SD_BIOERROR(bp, errcode); 13849 13850 /* 13851 * If this is the failfast bp, clear it. This can happen if the 13852 * failfast bp encounterd a fatal error when we attempted to 13853 * re-try it (such as a scsi_transport(9F) failure). However 13854 * we should NOT be in an active failfast state if the failfast 13855 * bp is not NULL. 13856 */ 13857 if (bp == un->un_failfast_bp) { 13858 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 13859 un->un_failfast_bp = NULL; 13860 } 13861 13862 if (bp == un->un_retry_bp) { 13863 /* 13864 * This command was retried one or more times. Show that we are 13865 * done with it, and allow processing of the waitq to resume. 13866 */ 13867 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13868 "sd_return_failed_command_no_restart: " 13869 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 13870 un->un_retry_bp = NULL; 13871 un->un_retry_statp = NULL; 13872 } 13873 13874 SD_UPDATE_RDWR_STATS(un, bp); 13875 SD_UPDATE_PARTITION_STATS(un, bp); 13876 13877 mutex_exit(SD_MUTEX(un)); 13878 13879 if (xp->xb_pktp != NULL) { 13880 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 13881 xp->xb_pktp = NULL; 13882 } 13883 13884 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 13885 13886 mutex_enter(SD_MUTEX(un)); 13887 13888 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13889 "sd_return_failed_command_no_restart: exit\n"); 13890 } 13891 13892 13893 /* 13894 * Function: sd_retry_command 13895 * 13896 * Description: queue up a command for retry, or (optionally) fail it 13897 * if retry counts are exhausted. 13898 * 13899 * Arguments: un - Pointer to the sd_lun struct for the target. 13900 * 13901 * bp - Pointer to the buf for the command to be retried. 13902 * 13903 * retry_check_flag - Flag to see which (if any) of the retry 13904 * counts should be decremented/checked. If the indicated 13905 * retry count is exhausted, then the command will not be 13906 * retried; it will be failed instead. This should use a 13907 * value equal to one of the following: 13908 * 13909 * SD_RETRIES_NOCHECK 13910 * SD_RESD_RETRIES_STANDARD 13911 * SD_RETRIES_VICTIM 13912 * 13913 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE 13914 * if the check should be made to see of FLAG_ISOLATE is set 13915 * in the pkt. If FLAG_ISOLATE is set, then the command is 13916 * not retried, it is simply failed. 13917 * 13918 * user_funcp - Ptr to function to call before dispatching the 13919 * command. May be NULL if no action needs to be performed. 13920 * (Primarily intended for printing messages.) 13921 * 13922 * user_arg - Optional argument to be passed along to 13923 * the user_funcp call. 13924 * 13925 * failure_code - errno return code to set in the bp if the 13926 * command is going to be failed. 13927 * 13928 * retry_delay - Retry delay interval in (clock_t) units. May 13929 * be zero which indicates that the retry should be retried 13930 * immediately (ie, without an intervening delay). 13931 * 13932 * statp - Ptr to kstat function to be updated if the command 13933 * is queued for a delayed retry. May be NULL if no kstat 13934 * update is desired. 13935 * 13936 * Context: May be called from interrupt context. 13937 */ 13938 13939 static void 13940 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag, 13941 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int 13942 code), void *user_arg, int failure_code, clock_t retry_delay, 13943 void (*statp)(kstat_io_t *)) 13944 { 13945 struct sd_xbuf *xp; 13946 struct scsi_pkt *pktp; 13947 13948 ASSERT(un != NULL); 13949 ASSERT(mutex_owned(SD_MUTEX(un))); 13950 ASSERT(bp != NULL); 13951 xp = SD_GET_XBUF(bp); 13952 ASSERT(xp != NULL); 13953 pktp = SD_GET_PKTP(bp); 13954 ASSERT(pktp != NULL); 13955 13956 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 13957 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp); 13958 13959 /* 13960 * If we are syncing or dumping, fail the command to avoid 13961 * recursively calling back into scsi_transport(). 13962 */ 13963 if (ddi_in_panic()) { 13964 goto fail_command_no_log; 13965 } 13966 13967 /* 13968 * We should never be be retrying a command with FLAG_DIAGNOSE set, so 13969 * log an error and fail the command. 13970 */ 13971 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 13972 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 13973 "ERROR, retrying FLAG_DIAGNOSE command.\n"); 13974 sd_dump_memory(un, SD_LOG_IO, "CDB", 13975 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 13976 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 13977 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 13978 goto fail_command; 13979 } 13980 13981 /* 13982 * If we are suspended, then put the command onto head of the 13983 * wait queue since we don't want to start more commands, and 13984 * clear the un_retry_bp. Next time when we are resumed, will 13985 * handle the command in the wait queue. 13986 */ 13987 switch (un->un_state) { 13988 case SD_STATE_SUSPENDED: 13989 case SD_STATE_DUMPING: 13990 bp->av_forw = un->un_waitq_headp; 13991 un->un_waitq_headp = bp; 13992 if (un->un_waitq_tailp == NULL) { 13993 un->un_waitq_tailp = bp; 13994 } 13995 if (bp == un->un_retry_bp) { 13996 un->un_retry_bp = NULL; 13997 un->un_retry_statp = NULL; 13998 } 13999 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 14000 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: " 14001 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp); 14002 return; 14003 default: 14004 break; 14005 } 14006 14007 /* 14008 * If the caller wants us to check FLAG_ISOLATE, then see if that 14009 * is set; if it is then we do not want to retry the command. 14010 * Normally, FLAG_ISOLATE is only used with USCSI cmds. 14011 */ 14012 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) { 14013 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) { 14014 goto fail_command; 14015 } 14016 } 14017 14018 14019 /* 14020 * If SD_RETRIES_FAILFAST is set, it indicates that either a 14021 * command timeout or a selection timeout has occurred. This means 14022 * that we were unable to establish an kind of communication with 14023 * the target, and subsequent retries and/or commands are likely 14024 * to encounter similar results and take a long time to complete. 14025 * 14026 * If this is a failfast error condition, we need to update the 14027 * failfast state, even if this bp does not have B_FAILFAST set. 14028 */ 14029 if (retry_check_flag & SD_RETRIES_FAILFAST) { 14030 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) { 14031 ASSERT(un->un_failfast_bp == NULL); 14032 /* 14033 * If we are already in the active failfast state, and 14034 * another failfast error condition has been detected, 14035 * then fail this command if it has B_FAILFAST set. 14036 * If B_FAILFAST is clear, then maintain the legacy 14037 * behavior of retrying heroically, even tho this will 14038 * take a lot more time to fail the command. 14039 */ 14040 if (bp->b_flags & B_FAILFAST) { 14041 goto fail_command; 14042 } 14043 } else { 14044 /* 14045 * We're not in the active failfast state, but we 14046 * have a failfast error condition, so we must begin 14047 * transition to the next state. We do this regardless 14048 * of whether or not this bp has B_FAILFAST set. 14049 */ 14050 if (un->un_failfast_bp == NULL) { 14051 /* 14052 * This is the first bp to meet a failfast 14053 * condition so save it on un_failfast_bp & 14054 * do normal retry processing. Do not enter 14055 * active failfast state yet. This marks 14056 * entry into the "failfast pending" state. 14057 */ 14058 un->un_failfast_bp = bp; 14059 14060 } else if (un->un_failfast_bp == bp) { 14061 /* 14062 * This is the second time *this* bp has 14063 * encountered a failfast error condition, 14064 * so enter active failfast state & flush 14065 * queues as appropriate. 14066 */ 14067 un->un_failfast_state = SD_FAILFAST_ACTIVE; 14068 un->un_failfast_bp = NULL; 14069 sd_failfast_flushq(un); 14070 14071 /* 14072 * Fail this bp now if B_FAILFAST set; 14073 * otherwise continue with retries. (It would 14074 * be pretty ironic if this bp succeeded on a 14075 * subsequent retry after we just flushed all 14076 * the queues). 14077 */ 14078 if (bp->b_flags & B_FAILFAST) { 14079 goto fail_command; 14080 } 14081 14082 #if !defined(lint) && !defined(__lint) 14083 } else { 14084 /* 14085 * If neither of the preceeding conditionals 14086 * was true, it means that there is some 14087 * *other* bp that has met an inital failfast 14088 * condition and is currently either being 14089 * retried or is waiting to be retried. In 14090 * that case we should perform normal retry 14091 * processing on *this* bp, since there is a 14092 * chance that the current failfast condition 14093 * is transient and recoverable. If that does 14094 * not turn out to be the case, then retries 14095 * will be cleared when the wait queue is 14096 * flushed anyway. 14097 */ 14098 #endif 14099 } 14100 } 14101 } else { 14102 /* 14103 * SD_RETRIES_FAILFAST is clear, which indicates that we 14104 * likely were able to at least establish some level of 14105 * communication with the target and subsequent commands 14106 * and/or retries are likely to get through to the target, 14107 * In this case we want to be aggressive about clearing 14108 * the failfast state. Note that this does not affect 14109 * the "failfast pending" condition. 14110 */ 14111 un->un_failfast_state = SD_FAILFAST_INACTIVE; 14112 } 14113 14114 14115 /* 14116 * Check the specified retry count to see if we can still do 14117 * any retries with this pkt before we should fail it. 14118 */ 14119 switch (retry_check_flag & SD_RETRIES_MASK) { 14120 case SD_RETRIES_VICTIM: 14121 /* 14122 * Check the victim retry count. If exhausted, then fall 14123 * thru & check against the standard retry count. 14124 */ 14125 if (xp->xb_victim_retry_count < un->un_victim_retry_count) { 14126 /* Increment count & proceed with the retry */ 14127 xp->xb_victim_retry_count++; 14128 break; 14129 } 14130 /* Victim retries exhausted, fall back to std. retries... */ 14131 /* FALLTHRU */ 14132 14133 case SD_RETRIES_STANDARD: 14134 if (xp->xb_retry_count >= un->un_retry_count) { 14135 /* Retries exhausted, fail the command */ 14136 SD_TRACE(SD_LOG_IO_CORE, un, 14137 "sd_retry_command: retries exhausted!\n"); 14138 /* 14139 * update b_resid for failed SCMD_READ & SCMD_WRITE 14140 * commands with nonzero pkt_resid. 14141 */ 14142 if ((pktp->pkt_reason == CMD_CMPLT) && 14143 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) && 14144 (pktp->pkt_resid != 0)) { 14145 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F; 14146 if ((op == SCMD_READ) || (op == SCMD_WRITE)) { 14147 SD_UPDATE_B_RESID(bp, pktp); 14148 } 14149 } 14150 goto fail_command; 14151 } 14152 xp->xb_retry_count++; 14153 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14154 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 14155 break; 14156 14157 case SD_RETRIES_UA: 14158 if (xp->xb_ua_retry_count >= sd_ua_retry_count) { 14159 /* Retries exhausted, fail the command */ 14160 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14161 "Unit Attention retries exhausted. " 14162 "Check the target.\n"); 14163 goto fail_command; 14164 } 14165 xp->xb_ua_retry_count++; 14166 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14167 "sd_retry_command: retry count:%d\n", 14168 xp->xb_ua_retry_count); 14169 break; 14170 14171 case SD_RETRIES_BUSY: 14172 if (xp->xb_retry_count >= un->un_busy_retry_count) { 14173 /* Retries exhausted, fail the command */ 14174 SD_TRACE(SD_LOG_IO_CORE, un, 14175 "sd_retry_command: retries exhausted!\n"); 14176 goto fail_command; 14177 } 14178 xp->xb_retry_count++; 14179 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14180 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 14181 break; 14182 14183 case SD_RETRIES_NOCHECK: 14184 default: 14185 /* No retry count to check. Just proceed with the retry */ 14186 break; 14187 } 14188 14189 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 14190 14191 /* 14192 * If we were given a zero timeout, we must attempt to retry the 14193 * command immediately (ie, without a delay). 14194 */ 14195 if (retry_delay == 0) { 14196 /* 14197 * Check some limiting conditions to see if we can actually 14198 * do the immediate retry. If we cannot, then we must 14199 * fall back to queueing up a delayed retry. 14200 */ 14201 if (un->un_ncmds_in_transport >= un->un_throttle) { 14202 /* 14203 * We are at the throttle limit for the target, 14204 * fall back to delayed retry. 14205 */ 14206 retry_delay = un->un_busy_timeout; 14207 statp = kstat_waitq_enter; 14208 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14209 "sd_retry_command: immed. retry hit " 14210 "throttle!\n"); 14211 } else { 14212 /* 14213 * We're clear to proceed with the immediate retry. 14214 * First call the user-provided function (if any) 14215 */ 14216 if (user_funcp != NULL) { 14217 (*user_funcp)(un, bp, user_arg, 14218 SD_IMMEDIATE_RETRY_ISSUED); 14219 #ifdef __lock_lint 14220 sd_print_incomplete_msg(un, bp, user_arg, 14221 SD_IMMEDIATE_RETRY_ISSUED); 14222 sd_print_cmd_incomplete_msg(un, bp, user_arg, 14223 SD_IMMEDIATE_RETRY_ISSUED); 14224 sd_print_sense_failed_msg(un, bp, user_arg, 14225 SD_IMMEDIATE_RETRY_ISSUED); 14226 #endif 14227 } 14228 14229 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14230 "sd_retry_command: issuing immediate retry\n"); 14231 14232 /* 14233 * Call sd_start_cmds() to transport the command to 14234 * the target. 14235 */ 14236 sd_start_cmds(un, bp); 14237 14238 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14239 "sd_retry_command exit\n"); 14240 return; 14241 } 14242 } 14243 14244 /* 14245 * Set up to retry the command after a delay. 14246 * First call the user-provided function (if any) 14247 */ 14248 if (user_funcp != NULL) { 14249 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED); 14250 } 14251 14252 sd_set_retry_bp(un, bp, retry_delay, statp); 14253 14254 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 14255 return; 14256 14257 fail_command: 14258 14259 if (user_funcp != NULL) { 14260 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED); 14261 } 14262 14263 fail_command_no_log: 14264 14265 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14266 "sd_retry_command: returning failed command\n"); 14267 14268 sd_return_failed_command(un, bp, failure_code); 14269 14270 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 14271 } 14272 14273 14274 /* 14275 * Function: sd_set_retry_bp 14276 * 14277 * Description: Set up the given bp for retry. 14278 * 14279 * Arguments: un - ptr to associated softstate 14280 * bp - ptr to buf(9S) for the command 14281 * retry_delay - time interval before issuing retry (may be 0) 14282 * statp - optional pointer to kstat function 14283 * 14284 * Context: May be called under interrupt context 14285 */ 14286 14287 static void 14288 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, 14289 void (*statp)(kstat_io_t *)) 14290 { 14291 ASSERT(un != NULL); 14292 ASSERT(mutex_owned(SD_MUTEX(un))); 14293 ASSERT(bp != NULL); 14294 14295 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14296 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp); 14297 14298 /* 14299 * Indicate that the command is being retried. This will not allow any 14300 * other commands on the wait queue to be transported to the target 14301 * until this command has been completed (success or failure). The 14302 * "retry command" is not transported to the target until the given 14303 * time delay expires, unless the user specified a 0 retry_delay. 14304 * 14305 * Note: the timeout(9F) callback routine is what actually calls 14306 * sd_start_cmds() to transport the command, with the exception of a 14307 * zero retry_delay. The only current implementor of a zero retry delay 14308 * is the case where a START_STOP_UNIT is sent to spin-up a device. 14309 */ 14310 if (un->un_retry_bp == NULL) { 14311 ASSERT(un->un_retry_statp == NULL); 14312 un->un_retry_bp = bp; 14313 14314 /* 14315 * If the user has not specified a delay the command should 14316 * be queued and no timeout should be scheduled. 14317 */ 14318 if (retry_delay == 0) { 14319 /* 14320 * Save the kstat pointer that will be used in the 14321 * call to SD_UPDATE_KSTATS() below, so that 14322 * sd_start_cmds() can correctly decrement the waitq 14323 * count when it is time to transport this command. 14324 */ 14325 un->un_retry_statp = statp; 14326 goto done; 14327 } 14328 } 14329 14330 if (un->un_retry_bp == bp) { 14331 /* 14332 * Save the kstat pointer that will be used in the call to 14333 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can 14334 * correctly decrement the waitq count when it is time to 14335 * transport this command. 14336 */ 14337 un->un_retry_statp = statp; 14338 14339 /* 14340 * Schedule a timeout if: 14341 * 1) The user has specified a delay. 14342 * 2) There is not a START_STOP_UNIT callback pending. 14343 * 14344 * If no delay has been specified, then it is up to the caller 14345 * to ensure that IO processing continues without stalling. 14346 * Effectively, this means that the caller will issue the 14347 * required call to sd_start_cmds(). The START_STOP_UNIT 14348 * callback does this after the START STOP UNIT command has 14349 * completed. In either of these cases we should not schedule 14350 * a timeout callback here. Also don't schedule the timeout if 14351 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart. 14352 */ 14353 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) && 14354 (un->un_direct_priority_timeid == NULL)) { 14355 un->un_retry_timeid = 14356 timeout(sd_start_retry_command, un, retry_delay); 14357 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14358 "sd_set_retry_bp: setting timeout: un: 0x%p" 14359 " bp:0x%p un_retry_timeid:0x%p\n", 14360 un, bp, un->un_retry_timeid); 14361 } 14362 } else { 14363 /* 14364 * We only get in here if there is already another command 14365 * waiting to be retried. In this case, we just put the 14366 * given command onto the wait queue, so it can be transported 14367 * after the current retry command has completed. 14368 * 14369 * Also we have to make sure that if the command at the head 14370 * of the wait queue is the un_failfast_bp, that we do not 14371 * put ahead of it any other commands that are to be retried. 14372 */ 14373 if ((un->un_failfast_bp != NULL) && 14374 (un->un_failfast_bp == un->un_waitq_headp)) { 14375 /* 14376 * Enqueue this command AFTER the first command on 14377 * the wait queue (which is also un_failfast_bp). 14378 */ 14379 bp->av_forw = un->un_waitq_headp->av_forw; 14380 un->un_waitq_headp->av_forw = bp; 14381 if (un->un_waitq_headp == un->un_waitq_tailp) { 14382 un->un_waitq_tailp = bp; 14383 } 14384 } else { 14385 /* Enqueue this command at the head of the waitq. */ 14386 bp->av_forw = un->un_waitq_headp; 14387 un->un_waitq_headp = bp; 14388 if (un->un_waitq_tailp == NULL) { 14389 un->un_waitq_tailp = bp; 14390 } 14391 } 14392 14393 if (statp == NULL) { 14394 statp = kstat_waitq_enter; 14395 } 14396 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14397 "sd_set_retry_bp: un:0x%p already delayed retry\n", un); 14398 } 14399 14400 done: 14401 if (statp != NULL) { 14402 SD_UPDATE_KSTATS(un, statp, bp); 14403 } 14404 14405 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14406 "sd_set_retry_bp: exit un:0x%p\n", un); 14407 } 14408 14409 14410 /* 14411 * Function: sd_start_retry_command 14412 * 14413 * Description: Start the command that has been waiting on the target's 14414 * retry queue. Called from timeout(9F) context after the 14415 * retry delay interval has expired. 14416 * 14417 * Arguments: arg - pointer to associated softstate for the device. 14418 * 14419 * Context: timeout(9F) thread context. May not sleep. 14420 */ 14421 14422 static void 14423 sd_start_retry_command(void *arg) 14424 { 14425 struct sd_lun *un = arg; 14426 14427 ASSERT(un != NULL); 14428 ASSERT(!mutex_owned(SD_MUTEX(un))); 14429 14430 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14431 "sd_start_retry_command: entry\n"); 14432 14433 mutex_enter(SD_MUTEX(un)); 14434 14435 un->un_retry_timeid = NULL; 14436 14437 if (un->un_retry_bp != NULL) { 14438 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14439 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n", 14440 un, un->un_retry_bp); 14441 sd_start_cmds(un, un->un_retry_bp); 14442 } 14443 14444 mutex_exit(SD_MUTEX(un)); 14445 14446 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14447 "sd_start_retry_command: exit\n"); 14448 } 14449 14450 14451 /* 14452 * Function: sd_start_direct_priority_command 14453 * 14454 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had 14455 * received TRAN_BUSY when we called scsi_transport() to send it 14456 * to the underlying HBA. This function is called from timeout(9F) 14457 * context after the delay interval has expired. 14458 * 14459 * Arguments: arg - pointer to associated buf(9S) to be restarted. 14460 * 14461 * Context: timeout(9F) thread context. May not sleep. 14462 */ 14463 14464 static void 14465 sd_start_direct_priority_command(void *arg) 14466 { 14467 struct buf *priority_bp = arg; 14468 struct sd_lun *un; 14469 14470 ASSERT(priority_bp != NULL); 14471 un = SD_GET_UN(priority_bp); 14472 ASSERT(un != NULL); 14473 ASSERT(!mutex_owned(SD_MUTEX(un))); 14474 14475 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14476 "sd_start_direct_priority_command: entry\n"); 14477 14478 mutex_enter(SD_MUTEX(un)); 14479 un->un_direct_priority_timeid = NULL; 14480 sd_start_cmds(un, priority_bp); 14481 mutex_exit(SD_MUTEX(un)); 14482 14483 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14484 "sd_start_direct_priority_command: exit\n"); 14485 } 14486 14487 14488 /* 14489 * Function: sd_send_request_sense_command 14490 * 14491 * Description: Sends a REQUEST SENSE command to the target 14492 * 14493 * Context: May be called from interrupt context. 14494 */ 14495 14496 static void 14497 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 14498 struct scsi_pkt *pktp) 14499 { 14500 ASSERT(bp != NULL); 14501 ASSERT(un != NULL); 14502 ASSERT(mutex_owned(SD_MUTEX(un))); 14503 14504 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: " 14505 "entry: buf:0x%p\n", bp); 14506 14507 /* 14508 * If we are syncing or dumping, then fail the command to avoid a 14509 * recursive callback into scsi_transport(). Also fail the command 14510 * if we are suspended (legacy behavior). 14511 */ 14512 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 14513 (un->un_state == SD_STATE_DUMPING)) { 14514 sd_return_failed_command(un, bp, EIO); 14515 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14516 "sd_send_request_sense_command: syncing/dumping, exit\n"); 14517 return; 14518 } 14519 14520 /* 14521 * Retry the failed command and don't issue the request sense if: 14522 * 1) the sense buf is busy 14523 * 2) we have 1 or more outstanding commands on the target 14524 * (the sense data will be cleared or invalidated any way) 14525 * 14526 * Note: There could be an issue with not checking a retry limit here, 14527 * the problem is determining which retry limit to check. 14528 */ 14529 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { 14530 /* Don't retry if the command is flagged as non-retryable */ 14531 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 14532 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 14533 NULL, NULL, 0, un->un_busy_timeout, 14534 kstat_waitq_enter); 14535 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14536 "sd_send_request_sense_command: " 14537 "at full throttle, retrying exit\n"); 14538 } else { 14539 sd_return_failed_command(un, bp, EIO); 14540 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14541 "sd_send_request_sense_command: " 14542 "at full throttle, non-retryable exit\n"); 14543 } 14544 return; 14545 } 14546 14547 sd_mark_rqs_busy(un, bp); 14548 sd_start_cmds(un, un->un_rqs_bp); 14549 14550 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14551 "sd_send_request_sense_command: exit\n"); 14552 } 14553 14554 14555 /* 14556 * Function: sd_mark_rqs_busy 14557 * 14558 * Description: Indicate that the request sense bp for this instance is 14559 * in use. 14560 * 14561 * Context: May be called under interrupt context 14562 */ 14563 14564 static void 14565 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp) 14566 { 14567 struct sd_xbuf *sense_xp; 14568 14569 ASSERT(un != NULL); 14570 ASSERT(bp != NULL); 14571 ASSERT(mutex_owned(SD_MUTEX(un))); 14572 ASSERT(un->un_sense_isbusy == 0); 14573 14574 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: " 14575 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un); 14576 14577 sense_xp = SD_GET_XBUF(un->un_rqs_bp); 14578 ASSERT(sense_xp != NULL); 14579 14580 SD_INFO(SD_LOG_IO, un, 14581 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp); 14582 14583 ASSERT(sense_xp->xb_pktp != NULL); 14584 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) 14585 == (FLAG_SENSING | FLAG_HEAD)); 14586 14587 un->un_sense_isbusy = 1; 14588 un->un_rqs_bp->b_resid = 0; 14589 sense_xp->xb_pktp->pkt_resid = 0; 14590 sense_xp->xb_pktp->pkt_reason = 0; 14591 14592 /* So we can get back the bp at interrupt time! */ 14593 sense_xp->xb_sense_bp = bp; 14594 14595 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH); 14596 14597 /* 14598 * Mark this buf as awaiting sense data. (This is already set in 14599 * the pkt_flags for the RQS packet.) 14600 */ 14601 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING; 14602 14603 sense_xp->xb_retry_count = 0; 14604 sense_xp->xb_victim_retry_count = 0; 14605 sense_xp->xb_ua_retry_count = 0; 14606 sense_xp->xb_nr_retry_count = 0; 14607 sense_xp->xb_dma_resid = 0; 14608 14609 /* Clean up the fields for auto-request sense */ 14610 sense_xp->xb_sense_status = 0; 14611 sense_xp->xb_sense_state = 0; 14612 sense_xp->xb_sense_resid = 0; 14613 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data)); 14614 14615 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n"); 14616 } 14617 14618 14619 /* 14620 * Function: sd_mark_rqs_idle 14621 * 14622 * Description: SD_MUTEX must be held continuously through this routine 14623 * to prevent reuse of the rqs struct before the caller can 14624 * complete it's processing. 14625 * 14626 * Return Code: Pointer to the RQS buf 14627 * 14628 * Context: May be called under interrupt context 14629 */ 14630 14631 static struct buf * 14632 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp) 14633 { 14634 struct buf *bp; 14635 ASSERT(un != NULL); 14636 ASSERT(sense_xp != NULL); 14637 ASSERT(mutex_owned(SD_MUTEX(un))); 14638 ASSERT(un->un_sense_isbusy != 0); 14639 14640 un->un_sense_isbusy = 0; 14641 bp = sense_xp->xb_sense_bp; 14642 sense_xp->xb_sense_bp = NULL; 14643 14644 /* This pkt is no longer interested in getting sense data */ 14645 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING; 14646 14647 return (bp); 14648 } 14649 14650 14651 14652 /* 14653 * Function: sd_alloc_rqs 14654 * 14655 * Description: Set up the unit to receive auto request sense data 14656 * 14657 * Return Code: DDI_SUCCESS or DDI_FAILURE 14658 * 14659 * Context: Called under attach(9E) context 14660 */ 14661 14662 static int 14663 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un) 14664 { 14665 struct sd_xbuf *xp; 14666 14667 ASSERT(un != NULL); 14668 ASSERT(!mutex_owned(SD_MUTEX(un))); 14669 ASSERT(un->un_rqs_bp == NULL); 14670 ASSERT(un->un_rqs_pktp == NULL); 14671 14672 /* 14673 * First allocate the required buf and scsi_pkt structs, then set up 14674 * the CDB in the scsi_pkt for a REQUEST SENSE command. 14675 */ 14676 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL, 14677 MAX_SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL); 14678 if (un->un_rqs_bp == NULL) { 14679 return (DDI_FAILURE); 14680 } 14681 14682 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp, 14683 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL); 14684 14685 if (un->un_rqs_pktp == NULL) { 14686 sd_free_rqs(un); 14687 return (DDI_FAILURE); 14688 } 14689 14690 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */ 14691 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp, 14692 SCMD_REQUEST_SENSE, 0, MAX_SENSE_LENGTH, 0); 14693 14694 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp); 14695 14696 /* Set up the other needed members in the ARQ scsi_pkt. */ 14697 un->un_rqs_pktp->pkt_comp = sdintr; 14698 un->un_rqs_pktp->pkt_time = sd_io_time; 14699 un->un_rqs_pktp->pkt_flags |= 14700 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */ 14701 14702 /* 14703 * Allocate & init the sd_xbuf struct for the RQS command. Do not 14704 * provide any intpkt, destroypkt routines as we take care of 14705 * scsi_pkt allocation/freeing here and in sd_free_rqs(). 14706 */ 14707 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 14708 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL); 14709 xp->xb_pktp = un->un_rqs_pktp; 14710 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14711 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n", 14712 un, xp, un->un_rqs_pktp, un->un_rqs_bp); 14713 14714 /* 14715 * Save the pointer to the request sense private bp so it can 14716 * be retrieved in sdintr. 14717 */ 14718 un->un_rqs_pktp->pkt_private = un->un_rqs_bp; 14719 ASSERT(un->un_rqs_bp->b_private == xp); 14720 14721 /* 14722 * See if the HBA supports auto-request sense for the specified 14723 * target/lun. If it does, then try to enable it (if not already 14724 * enabled). 14725 * 14726 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return 14727 * failure, while for other HBAs (pln) scsi_ifsetcap will always 14728 * return success. However, in both of these cases ARQ is always 14729 * enabled and scsi_ifgetcap will always return true. The best approach 14730 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). 14731 * 14732 * The 3rd case is the HBA (adp) always return enabled on 14733 * scsi_ifgetgetcap even when it's not enable, the best approach 14734 * is issue a scsi_ifsetcap then a scsi_ifgetcap 14735 * Note: this case is to circumvent the Adaptec bug. (x86 only) 14736 */ 14737 14738 if (un->un_f_is_fibre == TRUE) { 14739 un->un_f_arq_enabled = TRUE; 14740 } else { 14741 #if defined(__i386) || defined(__amd64) 14742 /* 14743 * Circumvent the Adaptec bug, remove this code when 14744 * the bug is fixed 14745 */ 14746 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); 14747 #endif 14748 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { 14749 case 0: 14750 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14751 "sd_alloc_rqs: HBA supports ARQ\n"); 14752 /* 14753 * ARQ is supported by this HBA but currently is not 14754 * enabled. Attempt to enable it and if successful then 14755 * mark this instance as ARQ enabled. 14756 */ 14757 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1) 14758 == 1) { 14759 /* Successfully enabled ARQ in the HBA */ 14760 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14761 "sd_alloc_rqs: ARQ enabled\n"); 14762 un->un_f_arq_enabled = TRUE; 14763 } else { 14764 /* Could not enable ARQ in the HBA */ 14765 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14766 "sd_alloc_rqs: failed ARQ enable\n"); 14767 un->un_f_arq_enabled = FALSE; 14768 } 14769 break; 14770 case 1: 14771 /* 14772 * ARQ is supported by this HBA and is already enabled. 14773 * Just mark ARQ as enabled for this instance. 14774 */ 14775 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14776 "sd_alloc_rqs: ARQ already enabled\n"); 14777 un->un_f_arq_enabled = TRUE; 14778 break; 14779 default: 14780 /* 14781 * ARQ is not supported by this HBA; disable it for this 14782 * instance. 14783 */ 14784 SD_INFO(SD_LOG_ATTACH_DETACH, un, 14785 "sd_alloc_rqs: HBA does not support ARQ\n"); 14786 un->un_f_arq_enabled = FALSE; 14787 break; 14788 } 14789 } 14790 14791 return (DDI_SUCCESS); 14792 } 14793 14794 14795 /* 14796 * Function: sd_free_rqs 14797 * 14798 * Description: Cleanup for the pre-instance RQS command. 14799 * 14800 * Context: Kernel thread context 14801 */ 14802 14803 static void 14804 sd_free_rqs(struct sd_lun *un) 14805 { 14806 ASSERT(un != NULL); 14807 14808 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n"); 14809 14810 /* 14811 * If consistent memory is bound to a scsi_pkt, the pkt 14812 * has to be destroyed *before* freeing the consistent memory. 14813 * Don't change the sequence of this operations. 14814 * scsi_destroy_pkt() might access memory, which isn't allowed, 14815 * after it was freed in scsi_free_consistent_buf(). 14816 */ 14817 if (un->un_rqs_pktp != NULL) { 14818 scsi_destroy_pkt(un->un_rqs_pktp); 14819 un->un_rqs_pktp = NULL; 14820 } 14821 14822 if (un->un_rqs_bp != NULL) { 14823 struct sd_xbuf *xp = SD_GET_XBUF(un->un_rqs_bp); 14824 if (xp != NULL) { 14825 kmem_free(xp, sizeof (struct sd_xbuf)); 14826 } 14827 scsi_free_consistent_buf(un->un_rqs_bp); 14828 un->un_rqs_bp = NULL; 14829 } 14830 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n"); 14831 } 14832 14833 14834 14835 /* 14836 * Function: sd_reduce_throttle 14837 * 14838 * Description: Reduces the maximum # of outstanding commands on a 14839 * target to the current number of outstanding commands. 14840 * Queues a tiemout(9F) callback to restore the limit 14841 * after a specified interval has elapsed. 14842 * Typically used when we get a TRAN_BUSY return code 14843 * back from scsi_transport(). 14844 * 14845 * Arguments: un - ptr to the sd_lun softstate struct 14846 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL 14847 * 14848 * Context: May be called from interrupt context 14849 */ 14850 14851 static void 14852 sd_reduce_throttle(struct sd_lun *un, int throttle_type) 14853 { 14854 ASSERT(un != NULL); 14855 ASSERT(mutex_owned(SD_MUTEX(un))); 14856 ASSERT(un->un_ncmds_in_transport >= 0); 14857 14858 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 14859 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n", 14860 un, un->un_throttle, un->un_ncmds_in_transport); 14861 14862 if (un->un_throttle > 1) { 14863 if (un->un_f_use_adaptive_throttle == TRUE) { 14864 switch (throttle_type) { 14865 case SD_THROTTLE_TRAN_BUSY: 14866 if (un->un_busy_throttle == 0) { 14867 un->un_busy_throttle = un->un_throttle; 14868 } 14869 break; 14870 case SD_THROTTLE_QFULL: 14871 un->un_busy_throttle = 0; 14872 break; 14873 default: 14874 ASSERT(FALSE); 14875 } 14876 14877 if (un->un_ncmds_in_transport > 0) { 14878 un->un_throttle = un->un_ncmds_in_transport; 14879 } 14880 14881 } else { 14882 if (un->un_ncmds_in_transport == 0) { 14883 un->un_throttle = 1; 14884 } else { 14885 un->un_throttle = un->un_ncmds_in_transport; 14886 } 14887 } 14888 } 14889 14890 /* Reschedule the timeout if none is currently active */ 14891 if (un->un_reset_throttle_timeid == NULL) { 14892 un->un_reset_throttle_timeid = timeout(sd_restore_throttle, 14893 un, SD_THROTTLE_RESET_INTERVAL); 14894 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14895 "sd_reduce_throttle: timeout scheduled!\n"); 14896 } 14897 14898 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 14899 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle); 14900 } 14901 14902 14903 14904 /* 14905 * Function: sd_restore_throttle 14906 * 14907 * Description: Callback function for timeout(9F). Resets the current 14908 * value of un->un_throttle to its default. 14909 * 14910 * Arguments: arg - pointer to associated softstate for the device. 14911 * 14912 * Context: May be called from interrupt context 14913 */ 14914 14915 static void 14916 sd_restore_throttle(void *arg) 14917 { 14918 struct sd_lun *un = arg; 14919 14920 ASSERT(un != NULL); 14921 ASSERT(!mutex_owned(SD_MUTEX(un))); 14922 14923 mutex_enter(SD_MUTEX(un)); 14924 14925 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 14926 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle); 14927 14928 un->un_reset_throttle_timeid = NULL; 14929 14930 if (un->un_f_use_adaptive_throttle == TRUE) { 14931 /* 14932 * If un_busy_throttle is nonzero, then it contains the 14933 * value that un_throttle was when we got a TRAN_BUSY back 14934 * from scsi_transport(). We want to revert back to this 14935 * value. 14936 * 14937 * In the QFULL case, the throttle limit will incrementally 14938 * increase until it reaches max throttle. 14939 */ 14940 if (un->un_busy_throttle > 0) { 14941 un->un_throttle = un->un_busy_throttle; 14942 un->un_busy_throttle = 0; 14943 } else { 14944 /* 14945 * increase throttle by 10% open gate slowly, schedule 14946 * another restore if saved throttle has not been 14947 * reached 14948 */ 14949 short throttle; 14950 if (sd_qfull_throttle_enable) { 14951 throttle = un->un_throttle + 14952 max((un->un_throttle / 10), 1); 14953 un->un_throttle = 14954 (throttle < un->un_saved_throttle) ? 14955 throttle : un->un_saved_throttle; 14956 if (un->un_throttle < un->un_saved_throttle) { 14957 un->un_reset_throttle_timeid = 14958 timeout(sd_restore_throttle, 14959 un, 14960 SD_QFULL_THROTTLE_RESET_INTERVAL); 14961 } 14962 } 14963 } 14964 14965 /* 14966 * If un_throttle has fallen below the low-water mark, we 14967 * restore the maximum value here (and allow it to ratchet 14968 * down again if necessary). 14969 */ 14970 if (un->un_throttle < un->un_min_throttle) { 14971 un->un_throttle = un->un_saved_throttle; 14972 } 14973 } else { 14974 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 14975 "restoring limit from 0x%x to 0x%x\n", 14976 un->un_throttle, un->un_saved_throttle); 14977 un->un_throttle = un->un_saved_throttle; 14978 } 14979 14980 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14981 "sd_restore_throttle: calling sd_start_cmds!\n"); 14982 14983 sd_start_cmds(un, NULL); 14984 14985 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14986 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n", 14987 un, un->un_throttle); 14988 14989 mutex_exit(SD_MUTEX(un)); 14990 14991 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n"); 14992 } 14993 14994 /* 14995 * Function: sdrunout 14996 * 14997 * Description: Callback routine for scsi_init_pkt when a resource allocation 14998 * fails. 14999 * 15000 * Arguments: arg - a pointer to the sd_lun unit struct for the particular 15001 * soft state instance. 15002 * 15003 * Return Code: The scsi_init_pkt routine allows for the callback function to 15004 * return a 0 indicating the callback should be rescheduled or a 1 15005 * indicating not to reschedule. This routine always returns 1 15006 * because the driver always provides a callback function to 15007 * scsi_init_pkt. This results in a callback always being scheduled 15008 * (via the scsi_init_pkt callback implementation) if a resource 15009 * failure occurs. 15010 * 15011 * Context: This callback function may not block or call routines that block 15012 * 15013 * Note: Using the scsi_init_pkt callback facility can result in an I/O 15014 * request persisting at the head of the list which cannot be 15015 * satisfied even after multiple retries. In the future the driver 15016 * may implement some time of maximum runout count before failing 15017 * an I/O. 15018 */ 15019 15020 static int 15021 sdrunout(caddr_t arg) 15022 { 15023 struct sd_lun *un = (struct sd_lun *)arg; 15024 15025 ASSERT(un != NULL); 15026 ASSERT(!mutex_owned(SD_MUTEX(un))); 15027 15028 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n"); 15029 15030 mutex_enter(SD_MUTEX(un)); 15031 sd_start_cmds(un, NULL); 15032 mutex_exit(SD_MUTEX(un)); 15033 /* 15034 * This callback routine always returns 1 (i.e. do not reschedule) 15035 * because we always specify sdrunout as the callback handler for 15036 * scsi_init_pkt inside the call to sd_start_cmds. 15037 */ 15038 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n"); 15039 return (1); 15040 } 15041 15042 15043 /* 15044 * Function: sdintr 15045 * 15046 * Description: Completion callback routine for scsi_pkt(9S) structs 15047 * sent to the HBA driver via scsi_transport(9F). 15048 * 15049 * Context: Interrupt context 15050 */ 15051 15052 static void 15053 sdintr(struct scsi_pkt *pktp) 15054 { 15055 struct buf *bp; 15056 struct sd_xbuf *xp; 15057 struct sd_lun *un; 15058 size_t actual_len; 15059 15060 ASSERT(pktp != NULL); 15061 bp = (struct buf *)pktp->pkt_private; 15062 ASSERT(bp != NULL); 15063 xp = SD_GET_XBUF(bp); 15064 ASSERT(xp != NULL); 15065 ASSERT(xp->xb_pktp != NULL); 15066 un = SD_GET_UN(bp); 15067 ASSERT(un != NULL); 15068 ASSERT(!mutex_owned(SD_MUTEX(un))); 15069 15070 #ifdef SD_FAULT_INJECTION 15071 15072 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n"); 15073 /* SD FaultInjection */ 15074 sd_faultinjection(pktp); 15075 15076 #endif /* SD_FAULT_INJECTION */ 15077 15078 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p," 15079 " xp:0x%p, un:0x%p\n", bp, xp, un); 15080 15081 mutex_enter(SD_MUTEX(un)); 15082 15083 /* Reduce the count of the #commands currently in transport */ 15084 un->un_ncmds_in_transport--; 15085 ASSERT(un->un_ncmds_in_transport >= 0); 15086 15087 /* Increment counter to indicate that the callback routine is active */ 15088 un->un_in_callback++; 15089 15090 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 15091 15092 #ifdef SDDEBUG 15093 if (bp == un->un_retry_bp) { 15094 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: " 15095 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n", 15096 un, un->un_retry_bp, un->un_ncmds_in_transport); 15097 } 15098 #endif 15099 15100 /* 15101 * If pkt_reason is CMD_DEV_GONE, fail the command, and update the media 15102 * state if needed. 15103 */ 15104 if (pktp->pkt_reason == CMD_DEV_GONE) { 15105 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15106 "Command failed to complete...Device is gone\n"); 15107 if (un->un_mediastate != DKIO_DEV_GONE) { 15108 un->un_mediastate = DKIO_DEV_GONE; 15109 cv_broadcast(&un->un_state_cv); 15110 } 15111 sd_return_failed_command(un, bp, EIO); 15112 goto exit; 15113 } 15114 15115 if (pktp->pkt_state & STATE_XARQ_DONE) { 15116 SD_TRACE(SD_LOG_COMMON, un, 15117 "sdintr: extra sense data received. pkt=%p\n", pktp); 15118 } 15119 15120 /* 15121 * First see if the pkt has auto-request sense data with it.... 15122 * Look at the packet state first so we don't take a performance 15123 * hit looking at the arq enabled flag unless absolutely necessary. 15124 */ 15125 if ((pktp->pkt_state & STATE_ARQ_DONE) && 15126 (un->un_f_arq_enabled == TRUE)) { 15127 /* 15128 * The HBA did an auto request sense for this command so check 15129 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 15130 * driver command that should not be retried. 15131 */ 15132 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 15133 /* 15134 * Save the relevant sense info into the xp for the 15135 * original cmd. 15136 */ 15137 struct scsi_arq_status *asp; 15138 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 15139 xp->xb_sense_status = 15140 *((uchar_t *)(&(asp->sts_rqpkt_status))); 15141 xp->xb_sense_state = asp->sts_rqpkt_state; 15142 xp->xb_sense_resid = asp->sts_rqpkt_resid; 15143 if (pktp->pkt_state & STATE_XARQ_DONE) { 15144 actual_len = MAX_SENSE_LENGTH - 15145 xp->xb_sense_resid; 15146 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 15147 MAX_SENSE_LENGTH); 15148 } else { 15149 if (xp->xb_sense_resid > SENSE_LENGTH) { 15150 actual_len = MAX_SENSE_LENGTH - 15151 xp->xb_sense_resid; 15152 } else { 15153 actual_len = SENSE_LENGTH - 15154 xp->xb_sense_resid; 15155 } 15156 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 15157 if ((((struct uscsi_cmd *) 15158 (xp->xb_pktinfo))->uscsi_rqlen) > 15159 actual_len) { 15160 xp->xb_sense_resid = 15161 (((struct uscsi_cmd *) 15162 (xp->xb_pktinfo))-> 15163 uscsi_rqlen) - actual_len; 15164 } else { 15165 xp->xb_sense_resid = 0; 15166 } 15167 } 15168 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 15169 SENSE_LENGTH); 15170 } 15171 15172 /* fail the command */ 15173 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15174 "sdintr: arq done and FLAG_DIAGNOSE set\n"); 15175 sd_return_failed_command(un, bp, EIO); 15176 goto exit; 15177 } 15178 15179 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 15180 /* 15181 * We want to either retry or fail this command, so free 15182 * the DMA resources here. If we retry the command then 15183 * the DMA resources will be reallocated in sd_start_cmds(). 15184 * Note that when PKT_DMA_PARTIAL is used, this reallocation 15185 * causes the *entire* transfer to start over again from the 15186 * beginning of the request, even for PARTIAL chunks that 15187 * have already transferred successfully. 15188 */ 15189 if ((un->un_f_is_fibre == TRUE) && 15190 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 15191 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 15192 scsi_dmafree(pktp); 15193 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 15194 } 15195 #endif 15196 15197 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15198 "sdintr: arq done, sd_handle_auto_request_sense\n"); 15199 15200 sd_handle_auto_request_sense(un, bp, xp, pktp); 15201 goto exit; 15202 } 15203 15204 /* Next see if this is the REQUEST SENSE pkt for the instance */ 15205 if (pktp->pkt_flags & FLAG_SENSING) { 15206 /* This pktp is from the unit's REQUEST_SENSE command */ 15207 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15208 "sdintr: sd_handle_request_sense\n"); 15209 sd_handle_request_sense(un, bp, xp, pktp); 15210 goto exit; 15211 } 15212 15213 /* 15214 * Check to see if the command successfully completed as requested; 15215 * this is the most common case (and also the hot performance path). 15216 * 15217 * Requirements for successful completion are: 15218 * pkt_reason is CMD_CMPLT and packet status is status good. 15219 * In addition: 15220 * - A residual of zero indicates successful completion no matter what 15221 * the command is. 15222 * - If the residual is not zero and the command is not a read or 15223 * write, then it's still defined as successful completion. In other 15224 * words, if the command is a read or write the residual must be 15225 * zero for successful completion. 15226 * - If the residual is not zero and the command is a read or 15227 * write, and it's a USCSICMD, then it's still defined as 15228 * successful completion. 15229 */ 15230 if ((pktp->pkt_reason == CMD_CMPLT) && 15231 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) { 15232 15233 /* 15234 * Since this command is returned with a good status, we 15235 * can reset the count for Sonoma failover. 15236 */ 15237 un->un_sonoma_failure_count = 0; 15238 15239 /* 15240 * Return all USCSI commands on good status 15241 */ 15242 if (pktp->pkt_resid == 0) { 15243 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15244 "sdintr: returning command for resid == 0\n"); 15245 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) && 15246 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) { 15247 SD_UPDATE_B_RESID(bp, pktp); 15248 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15249 "sdintr: returning command for resid != 0\n"); 15250 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 15251 SD_UPDATE_B_RESID(bp, pktp); 15252 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15253 "sdintr: returning uscsi command\n"); 15254 } else { 15255 goto not_successful; 15256 } 15257 sd_return_command(un, bp); 15258 15259 /* 15260 * Decrement counter to indicate that the callback routine 15261 * is done. 15262 */ 15263 un->un_in_callback--; 15264 ASSERT(un->un_in_callback >= 0); 15265 mutex_exit(SD_MUTEX(un)); 15266 15267 return; 15268 } 15269 15270 not_successful: 15271 15272 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 15273 /* 15274 * The following is based upon knowledge of the underlying transport 15275 * and its use of DMA resources. This code should be removed when 15276 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor 15277 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf() 15278 * and sd_start_cmds(). 15279 * 15280 * Free any DMA resources associated with this command if there 15281 * is a chance it could be retried or enqueued for later retry. 15282 * If we keep the DMA binding then mpxio cannot reissue the 15283 * command on another path whenever a path failure occurs. 15284 * 15285 * Note that when PKT_DMA_PARTIAL is used, free/reallocation 15286 * causes the *entire* transfer to start over again from the 15287 * beginning of the request, even for PARTIAL chunks that 15288 * have already transferred successfully. 15289 * 15290 * This is only done for non-uscsi commands (and also skipped for the 15291 * driver's internal RQS command). Also just do this for Fibre Channel 15292 * devices as these are the only ones that support mpxio. 15293 */ 15294 if ((un->un_f_is_fibre == TRUE) && 15295 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 15296 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 15297 scsi_dmafree(pktp); 15298 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 15299 } 15300 #endif 15301 15302 /* 15303 * The command did not successfully complete as requested so check 15304 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 15305 * driver command that should not be retried so just return. If 15306 * FLAG_DIAGNOSE is not set the error will be processed below. 15307 */ 15308 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 15309 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15310 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n"); 15311 /* 15312 * Issue a request sense if a check condition caused the error 15313 * (we handle the auto request sense case above), otherwise 15314 * just fail the command. 15315 */ 15316 if ((pktp->pkt_reason == CMD_CMPLT) && 15317 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { 15318 sd_send_request_sense_command(un, bp, pktp); 15319 } else { 15320 sd_return_failed_command(un, bp, EIO); 15321 } 15322 goto exit; 15323 } 15324 15325 /* 15326 * The command did not successfully complete as requested so process 15327 * the error, retry, and/or attempt recovery. 15328 */ 15329 switch (pktp->pkt_reason) { 15330 case CMD_CMPLT: 15331 switch (SD_GET_PKT_STATUS(pktp)) { 15332 case STATUS_GOOD: 15333 /* 15334 * The command completed successfully with a non-zero 15335 * residual 15336 */ 15337 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15338 "sdintr: STATUS_GOOD \n"); 15339 sd_pkt_status_good(un, bp, xp, pktp); 15340 break; 15341 15342 case STATUS_CHECK: 15343 case STATUS_TERMINATED: 15344 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15345 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n"); 15346 sd_pkt_status_check_condition(un, bp, xp, pktp); 15347 break; 15348 15349 case STATUS_BUSY: 15350 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15351 "sdintr: STATUS_BUSY\n"); 15352 sd_pkt_status_busy(un, bp, xp, pktp); 15353 break; 15354 15355 case STATUS_RESERVATION_CONFLICT: 15356 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15357 "sdintr: STATUS_RESERVATION_CONFLICT\n"); 15358 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 15359 break; 15360 15361 case STATUS_QFULL: 15362 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15363 "sdintr: STATUS_QFULL\n"); 15364 sd_pkt_status_qfull(un, bp, xp, pktp); 15365 break; 15366 15367 case STATUS_MET: 15368 case STATUS_INTERMEDIATE: 15369 case STATUS_SCSI2: 15370 case STATUS_INTERMEDIATE_MET: 15371 case STATUS_ACA_ACTIVE: 15372 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 15373 "Unexpected SCSI status received: 0x%x\n", 15374 SD_GET_PKT_STATUS(pktp)); 15375 sd_return_failed_command(un, bp, EIO); 15376 break; 15377 15378 default: 15379 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 15380 "Invalid SCSI status received: 0x%x\n", 15381 SD_GET_PKT_STATUS(pktp)); 15382 sd_return_failed_command(un, bp, EIO); 15383 break; 15384 15385 } 15386 break; 15387 15388 case CMD_INCOMPLETE: 15389 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15390 "sdintr: CMD_INCOMPLETE\n"); 15391 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp); 15392 break; 15393 case CMD_TRAN_ERR: 15394 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15395 "sdintr: CMD_TRAN_ERR\n"); 15396 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp); 15397 break; 15398 case CMD_RESET: 15399 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15400 "sdintr: CMD_RESET \n"); 15401 sd_pkt_reason_cmd_reset(un, bp, xp, pktp); 15402 break; 15403 case CMD_ABORTED: 15404 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15405 "sdintr: CMD_ABORTED \n"); 15406 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp); 15407 break; 15408 case CMD_TIMEOUT: 15409 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15410 "sdintr: CMD_TIMEOUT\n"); 15411 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp); 15412 break; 15413 case CMD_UNX_BUS_FREE: 15414 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15415 "sdintr: CMD_UNX_BUS_FREE \n"); 15416 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp); 15417 break; 15418 case CMD_TAG_REJECT: 15419 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15420 "sdintr: CMD_TAG_REJECT\n"); 15421 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp); 15422 break; 15423 default: 15424 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15425 "sdintr: default\n"); 15426 sd_pkt_reason_default(un, bp, xp, pktp); 15427 break; 15428 } 15429 15430 exit: 15431 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n"); 15432 15433 /* Decrement counter to indicate that the callback routine is done. */ 15434 un->un_in_callback--; 15435 ASSERT(un->un_in_callback >= 0); 15436 15437 /* 15438 * At this point, the pkt has been dispatched, ie, it is either 15439 * being re-tried or has been returned to its caller and should 15440 * not be referenced. 15441 */ 15442 15443 mutex_exit(SD_MUTEX(un)); 15444 } 15445 15446 15447 /* 15448 * Function: sd_print_incomplete_msg 15449 * 15450 * Description: Prints the error message for a CMD_INCOMPLETE error. 15451 * 15452 * Arguments: un - ptr to associated softstate for the device. 15453 * bp - ptr to the buf(9S) for the command. 15454 * arg - message string ptr 15455 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED, 15456 * or SD_NO_RETRY_ISSUED. 15457 * 15458 * Context: May be called under interrupt context 15459 */ 15460 15461 static void 15462 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 15463 { 15464 struct scsi_pkt *pktp; 15465 char *msgp; 15466 char *cmdp = arg; 15467 15468 ASSERT(un != NULL); 15469 ASSERT(mutex_owned(SD_MUTEX(un))); 15470 ASSERT(bp != NULL); 15471 ASSERT(arg != NULL); 15472 pktp = SD_GET_PKTP(bp); 15473 ASSERT(pktp != NULL); 15474 15475 switch (code) { 15476 case SD_DELAYED_RETRY_ISSUED: 15477 case SD_IMMEDIATE_RETRY_ISSUED: 15478 msgp = "retrying"; 15479 break; 15480 case SD_NO_RETRY_ISSUED: 15481 default: 15482 msgp = "giving up"; 15483 break; 15484 } 15485 15486 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 15487 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15488 "incomplete %s- %s\n", cmdp, msgp); 15489 } 15490 } 15491 15492 15493 15494 /* 15495 * Function: sd_pkt_status_good 15496 * 15497 * Description: Processing for a STATUS_GOOD code in pkt_status. 15498 * 15499 * Context: May be called under interrupt context 15500 */ 15501 15502 static void 15503 sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 15504 struct sd_xbuf *xp, struct scsi_pkt *pktp) 15505 { 15506 char *cmdp; 15507 15508 ASSERT(un != NULL); 15509 ASSERT(mutex_owned(SD_MUTEX(un))); 15510 ASSERT(bp != NULL); 15511 ASSERT(xp != NULL); 15512 ASSERT(pktp != NULL); 15513 ASSERT(pktp->pkt_reason == CMD_CMPLT); 15514 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD); 15515 ASSERT(pktp->pkt_resid != 0); 15516 15517 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n"); 15518 15519 SD_UPDATE_ERRSTATS(un, sd_harderrs); 15520 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) { 15521 case SCMD_READ: 15522 cmdp = "read"; 15523 break; 15524 case SCMD_WRITE: 15525 cmdp = "write"; 15526 break; 15527 default: 15528 SD_UPDATE_B_RESID(bp, pktp); 15529 sd_return_command(un, bp); 15530 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 15531 return; 15532 } 15533 15534 /* 15535 * See if we can retry the read/write, preferrably immediately. 15536 * If retries are exhaused, then sd_retry_command() will update 15537 * the b_resid count. 15538 */ 15539 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg, 15540 cmdp, EIO, (clock_t)0, NULL); 15541 15542 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 15543 } 15544 15545 15546 15547 15548 15549 /* 15550 * Function: sd_handle_request_sense 15551 * 15552 * Description: Processing for non-auto Request Sense command. 15553 * 15554 * Arguments: un - ptr to associated softstate 15555 * sense_bp - ptr to buf(9S) for the RQS command 15556 * sense_xp - ptr to the sd_xbuf for the RQS command 15557 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command 15558 * 15559 * Context: May be called under interrupt context 15560 */ 15561 15562 static void 15563 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp, 15564 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp) 15565 { 15566 struct buf *cmd_bp; /* buf for the original command */ 15567 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */ 15568 struct scsi_pkt *cmd_pktp; /* pkt for the original command */ 15569 size_t actual_len; /* actual sense data length */ 15570 15571 ASSERT(un != NULL); 15572 ASSERT(mutex_owned(SD_MUTEX(un))); 15573 ASSERT(sense_bp != NULL); 15574 ASSERT(sense_xp != NULL); 15575 ASSERT(sense_pktp != NULL); 15576 15577 /* 15578 * Note the sense_bp, sense_xp, and sense_pktp here are for the 15579 * RQS command and not the original command. 15580 */ 15581 ASSERT(sense_pktp == un->un_rqs_pktp); 15582 ASSERT(sense_bp == un->un_rqs_bp); 15583 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) == 15584 (FLAG_SENSING | FLAG_HEAD)); 15585 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) & 15586 FLAG_SENSING) == FLAG_SENSING); 15587 15588 /* These are the bp, xp, and pktp for the original command */ 15589 cmd_bp = sense_xp->xb_sense_bp; 15590 cmd_xp = SD_GET_XBUF(cmd_bp); 15591 cmd_pktp = SD_GET_PKTP(cmd_bp); 15592 15593 if (sense_pktp->pkt_reason != CMD_CMPLT) { 15594 /* 15595 * The REQUEST SENSE command failed. Release the REQUEST 15596 * SENSE command for re-use, get back the bp for the original 15597 * command, and attempt to re-try the original command if 15598 * FLAG_DIAGNOSE is not set in the original packet. 15599 */ 15600 SD_UPDATE_ERRSTATS(un, sd_harderrs); 15601 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15602 cmd_bp = sd_mark_rqs_idle(un, sense_xp); 15603 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD, 15604 NULL, NULL, EIO, (clock_t)0, NULL); 15605 return; 15606 } 15607 } 15608 15609 /* 15610 * Save the relevant sense info into the xp for the original cmd. 15611 * 15612 * Note: if the request sense failed the state info will be zero 15613 * as set in sd_mark_rqs_busy() 15614 */ 15615 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp); 15616 cmd_xp->xb_sense_state = sense_pktp->pkt_state; 15617 actual_len = MAX_SENSE_LENGTH - sense_pktp->pkt_resid; 15618 if ((cmd_xp->xb_pkt_flags & SD_XB_USCSICMD) && 15619 (((struct uscsi_cmd *)cmd_xp->xb_pktinfo)->uscsi_rqlen > 15620 SENSE_LENGTH)) { 15621 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 15622 MAX_SENSE_LENGTH); 15623 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid; 15624 } else { 15625 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 15626 SENSE_LENGTH); 15627 if (actual_len < SENSE_LENGTH) { 15628 cmd_xp->xb_sense_resid = SENSE_LENGTH - actual_len; 15629 } else { 15630 cmd_xp->xb_sense_resid = 0; 15631 } 15632 } 15633 15634 /* 15635 * Free up the RQS command.... 15636 * NOTE: 15637 * Must do this BEFORE calling sd_validate_sense_data! 15638 * sd_validate_sense_data may return the original command in 15639 * which case the pkt will be freed and the flags can no 15640 * longer be touched. 15641 * SD_MUTEX is held through this process until the command 15642 * is dispatched based upon the sense data, so there are 15643 * no race conditions. 15644 */ 15645 (void) sd_mark_rqs_idle(un, sense_xp); 15646 15647 /* 15648 * For a retryable command see if we have valid sense data, if so then 15649 * turn it over to sd_decode_sense() to figure out the right course of 15650 * action. Just fail a non-retryable command. 15651 */ 15652 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15653 if (sd_validate_sense_data(un, cmd_bp, cmd_xp, actual_len) == 15654 SD_SENSE_DATA_IS_VALID) { 15655 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp); 15656 } 15657 } else { 15658 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB", 15659 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15660 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data", 15661 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 15662 sd_return_failed_command(un, cmd_bp, EIO); 15663 } 15664 } 15665 15666 15667 15668 15669 /* 15670 * Function: sd_handle_auto_request_sense 15671 * 15672 * Description: Processing for auto-request sense information. 15673 * 15674 * Arguments: un - ptr to associated softstate 15675 * bp - ptr to buf(9S) for the command 15676 * xp - ptr to the sd_xbuf for the command 15677 * pktp - ptr to the scsi_pkt(9S) for the command 15678 * 15679 * Context: May be called under interrupt context 15680 */ 15681 15682 static void 15683 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 15684 struct sd_xbuf *xp, struct scsi_pkt *pktp) 15685 { 15686 struct scsi_arq_status *asp; 15687 size_t actual_len; 15688 15689 ASSERT(un != NULL); 15690 ASSERT(mutex_owned(SD_MUTEX(un))); 15691 ASSERT(bp != NULL); 15692 ASSERT(xp != NULL); 15693 ASSERT(pktp != NULL); 15694 ASSERT(pktp != un->un_rqs_pktp); 15695 ASSERT(bp != un->un_rqs_bp); 15696 15697 /* 15698 * For auto-request sense, we get a scsi_arq_status back from 15699 * the HBA, with the sense data in the sts_sensedata member. 15700 * The pkt_scbp of the packet points to this scsi_arq_status. 15701 */ 15702 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 15703 15704 if (asp->sts_rqpkt_reason != CMD_CMPLT) { 15705 /* 15706 * The auto REQUEST SENSE failed; see if we can re-try 15707 * the original command. 15708 */ 15709 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15710 "auto request sense failed (reason=%s)\n", 15711 scsi_rname(asp->sts_rqpkt_reason)); 15712 15713 sd_reset_target(un, pktp); 15714 15715 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15716 NULL, NULL, EIO, (clock_t)0, NULL); 15717 return; 15718 } 15719 15720 /* Save the relevant sense info into the xp for the original cmd. */ 15721 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status))); 15722 xp->xb_sense_state = asp->sts_rqpkt_state; 15723 xp->xb_sense_resid = asp->sts_rqpkt_resid; 15724 if (xp->xb_sense_state & STATE_XARQ_DONE) { 15725 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 15726 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 15727 MAX_SENSE_LENGTH); 15728 } else { 15729 if (xp->xb_sense_resid > SENSE_LENGTH) { 15730 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 15731 } else { 15732 actual_len = SENSE_LENGTH - xp->xb_sense_resid; 15733 } 15734 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 15735 if ((((struct uscsi_cmd *) 15736 (xp->xb_pktinfo))->uscsi_rqlen) > actual_len) { 15737 xp->xb_sense_resid = (((struct uscsi_cmd *) 15738 (xp->xb_pktinfo))->uscsi_rqlen) - 15739 actual_len; 15740 } else { 15741 xp->xb_sense_resid = 0; 15742 } 15743 } 15744 bcopy(&asp->sts_sensedata, xp->xb_sense_data, SENSE_LENGTH); 15745 } 15746 15747 /* 15748 * See if we have valid sense data, if so then turn it over to 15749 * sd_decode_sense() to figure out the right course of action. 15750 */ 15751 if (sd_validate_sense_data(un, bp, xp, actual_len) == 15752 SD_SENSE_DATA_IS_VALID) { 15753 sd_decode_sense(un, bp, xp, pktp); 15754 } 15755 } 15756 15757 15758 /* 15759 * Function: sd_print_sense_failed_msg 15760 * 15761 * Description: Print log message when RQS has failed. 15762 * 15763 * Arguments: un - ptr to associated softstate 15764 * bp - ptr to buf(9S) for the command 15765 * arg - generic message string ptr 15766 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 15767 * or SD_NO_RETRY_ISSUED 15768 * 15769 * Context: May be called from interrupt context 15770 */ 15771 15772 static void 15773 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg, 15774 int code) 15775 { 15776 char *msgp = arg; 15777 15778 ASSERT(un != NULL); 15779 ASSERT(mutex_owned(SD_MUTEX(un))); 15780 ASSERT(bp != NULL); 15781 15782 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) { 15783 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp); 15784 } 15785 } 15786 15787 15788 /* 15789 * Function: sd_validate_sense_data 15790 * 15791 * Description: Check the given sense data for validity. 15792 * If the sense data is not valid, the command will 15793 * be either failed or retried! 15794 * 15795 * Return Code: SD_SENSE_DATA_IS_INVALID 15796 * SD_SENSE_DATA_IS_VALID 15797 * 15798 * Context: May be called from interrupt context 15799 */ 15800 15801 static int 15802 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 15803 size_t actual_len) 15804 { 15805 struct scsi_extended_sense *esp; 15806 struct scsi_pkt *pktp; 15807 char *msgp = NULL; 15808 15809 ASSERT(un != NULL); 15810 ASSERT(mutex_owned(SD_MUTEX(un))); 15811 ASSERT(bp != NULL); 15812 ASSERT(bp != un->un_rqs_bp); 15813 ASSERT(xp != NULL); 15814 15815 pktp = SD_GET_PKTP(bp); 15816 ASSERT(pktp != NULL); 15817 15818 /* 15819 * Check the status of the RQS command (auto or manual). 15820 */ 15821 switch (xp->xb_sense_status & STATUS_MASK) { 15822 case STATUS_GOOD: 15823 break; 15824 15825 case STATUS_RESERVATION_CONFLICT: 15826 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 15827 return (SD_SENSE_DATA_IS_INVALID); 15828 15829 case STATUS_BUSY: 15830 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15831 "Busy Status on REQUEST SENSE\n"); 15832 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL, 15833 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); 15834 return (SD_SENSE_DATA_IS_INVALID); 15835 15836 case STATUS_QFULL: 15837 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15838 "QFULL Status on REQUEST SENSE\n"); 15839 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, 15840 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); 15841 return (SD_SENSE_DATA_IS_INVALID); 15842 15843 case STATUS_CHECK: 15844 case STATUS_TERMINATED: 15845 msgp = "Check Condition on REQUEST SENSE\n"; 15846 goto sense_failed; 15847 15848 default: 15849 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n"; 15850 goto sense_failed; 15851 } 15852 15853 /* 15854 * See if we got the minimum required amount of sense data. 15855 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes 15856 * or less. 15857 */ 15858 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) || 15859 (actual_len == 0)) { 15860 msgp = "Request Sense couldn't get sense data\n"; 15861 goto sense_failed; 15862 } 15863 15864 if (actual_len < SUN_MIN_SENSE_LENGTH) { 15865 msgp = "Not enough sense information\n"; 15866 goto sense_failed; 15867 } 15868 15869 /* 15870 * We require the extended sense data 15871 */ 15872 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 15873 if (esp->es_class != CLASS_EXTENDED_SENSE) { 15874 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 15875 static char tmp[8]; 15876 static char buf[148]; 15877 char *p = (char *)(xp->xb_sense_data); 15878 int i; 15879 15880 mutex_enter(&sd_sense_mutex); 15881 (void) strcpy(buf, "undecodable sense information:"); 15882 for (i = 0; i < actual_len; i++) { 15883 (void) sprintf(tmp, " 0x%x", *(p++)&0xff); 15884 (void) strcpy(&buf[strlen(buf)], tmp); 15885 } 15886 i = strlen(buf); 15887 (void) strcpy(&buf[i], "-(assumed fatal)\n"); 15888 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, buf); 15889 mutex_exit(&sd_sense_mutex); 15890 } 15891 /* Note: Legacy behavior, fail the command with no retry */ 15892 sd_return_failed_command(un, bp, EIO); 15893 return (SD_SENSE_DATA_IS_INVALID); 15894 } 15895 15896 /* 15897 * Check that es_code is valid (es_class concatenated with es_code 15898 * make up the "response code" field. es_class will always be 7, so 15899 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the 15900 * format. 15901 */ 15902 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) && 15903 (esp->es_code != CODE_FMT_FIXED_DEFERRED) && 15904 (esp->es_code != CODE_FMT_DESCR_CURRENT) && 15905 (esp->es_code != CODE_FMT_DESCR_DEFERRED) && 15906 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) { 15907 goto sense_failed; 15908 } 15909 15910 return (SD_SENSE_DATA_IS_VALID); 15911 15912 sense_failed: 15913 /* 15914 * If the request sense failed (for whatever reason), attempt 15915 * to retry the original command. 15916 */ 15917 #if defined(__i386) || defined(__amd64) 15918 /* 15919 * SD_RETRY_DELAY is conditionally compile (#if fibre) in 15920 * sddef.h for Sparc platform, and x86 uses 1 binary 15921 * for both SCSI/FC. 15922 * The SD_RETRY_DELAY value need to be adjusted here 15923 * when SD_RETRY_DELAY change in sddef.h 15924 */ 15925 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15926 sd_print_sense_failed_msg, msgp, EIO, 15927 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL); 15928 #else 15929 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15930 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL); 15931 #endif 15932 15933 return (SD_SENSE_DATA_IS_INVALID); 15934 } 15935 15936 15937 15938 /* 15939 * Function: sd_decode_sense 15940 * 15941 * Description: Take recovery action(s) when SCSI Sense Data is received. 15942 * 15943 * Context: Interrupt context. 15944 */ 15945 15946 static void 15947 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 15948 struct scsi_pkt *pktp) 15949 { 15950 uint8_t sense_key; 15951 15952 ASSERT(un != NULL); 15953 ASSERT(mutex_owned(SD_MUTEX(un))); 15954 ASSERT(bp != NULL); 15955 ASSERT(bp != un->un_rqs_bp); 15956 ASSERT(xp != NULL); 15957 ASSERT(pktp != NULL); 15958 15959 sense_key = scsi_sense_key(xp->xb_sense_data); 15960 15961 switch (sense_key) { 15962 case KEY_NO_SENSE: 15963 sd_sense_key_no_sense(un, bp, xp, pktp); 15964 break; 15965 case KEY_RECOVERABLE_ERROR: 15966 sd_sense_key_recoverable_error(un, xp->xb_sense_data, 15967 bp, xp, pktp); 15968 break; 15969 case KEY_NOT_READY: 15970 sd_sense_key_not_ready(un, xp->xb_sense_data, 15971 bp, xp, pktp); 15972 break; 15973 case KEY_MEDIUM_ERROR: 15974 case KEY_HARDWARE_ERROR: 15975 sd_sense_key_medium_or_hardware_error(un, 15976 xp->xb_sense_data, bp, xp, pktp); 15977 break; 15978 case KEY_ILLEGAL_REQUEST: 15979 sd_sense_key_illegal_request(un, bp, xp, pktp); 15980 break; 15981 case KEY_UNIT_ATTENTION: 15982 sd_sense_key_unit_attention(un, xp->xb_sense_data, 15983 bp, xp, pktp); 15984 break; 15985 case KEY_WRITE_PROTECT: 15986 case KEY_VOLUME_OVERFLOW: 15987 case KEY_MISCOMPARE: 15988 sd_sense_key_fail_command(un, bp, xp, pktp); 15989 break; 15990 case KEY_BLANK_CHECK: 15991 sd_sense_key_blank_check(un, bp, xp, pktp); 15992 break; 15993 case KEY_ABORTED_COMMAND: 15994 sd_sense_key_aborted_command(un, bp, xp, pktp); 15995 break; 15996 case KEY_VENDOR_UNIQUE: 15997 case KEY_COPY_ABORTED: 15998 case KEY_EQUAL: 15999 case KEY_RESERVED: 16000 default: 16001 sd_sense_key_default(un, xp->xb_sense_data, 16002 bp, xp, pktp); 16003 break; 16004 } 16005 } 16006 16007 16008 /* 16009 * Function: sd_dump_memory 16010 * 16011 * Description: Debug logging routine to print the contents of a user provided 16012 * buffer. The output of the buffer is broken up into 256 byte 16013 * segments due to a size constraint of the scsi_log. 16014 * implementation. 16015 * 16016 * Arguments: un - ptr to softstate 16017 * comp - component mask 16018 * title - "title" string to preceed data when printed 16019 * data - ptr to data block to be printed 16020 * len - size of data block to be printed 16021 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c) 16022 * 16023 * Context: May be called from interrupt context 16024 */ 16025 16026 #define SD_DUMP_MEMORY_BUF_SIZE 256 16027 16028 static char *sd_dump_format_string[] = { 16029 " 0x%02x", 16030 " %c" 16031 }; 16032 16033 static void 16034 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data, 16035 int len, int fmt) 16036 { 16037 int i, j; 16038 int avail_count; 16039 int start_offset; 16040 int end_offset; 16041 size_t entry_len; 16042 char *bufp; 16043 char *local_buf; 16044 char *format_string; 16045 16046 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR)); 16047 16048 /* 16049 * In the debug version of the driver, this function is called from a 16050 * number of places which are NOPs in the release driver. 16051 * The debug driver therefore has additional methods of filtering 16052 * debug output. 16053 */ 16054 #ifdef SDDEBUG 16055 /* 16056 * In the debug version of the driver we can reduce the amount of debug 16057 * messages by setting sd_error_level to something other than 16058 * SCSI_ERR_ALL and clearing bits in sd_level_mask and 16059 * sd_component_mask. 16060 */ 16061 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) || 16062 (sd_error_level != SCSI_ERR_ALL)) { 16063 return; 16064 } 16065 if (((sd_component_mask & comp) == 0) || 16066 (sd_error_level != SCSI_ERR_ALL)) { 16067 return; 16068 } 16069 #else 16070 if (sd_error_level != SCSI_ERR_ALL) { 16071 return; 16072 } 16073 #endif 16074 16075 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP); 16076 bufp = local_buf; 16077 /* 16078 * Available length is the length of local_buf[], minus the 16079 * length of the title string, minus one for the ":", minus 16080 * one for the newline, minus one for the NULL terminator. 16081 * This gives the #bytes available for holding the printed 16082 * values from the given data buffer. 16083 */ 16084 if (fmt == SD_LOG_HEX) { 16085 format_string = sd_dump_format_string[0]; 16086 } else /* SD_LOG_CHAR */ { 16087 format_string = sd_dump_format_string[1]; 16088 } 16089 /* 16090 * Available count is the number of elements from the given 16091 * data buffer that we can fit into the available length. 16092 * This is based upon the size of the format string used. 16093 * Make one entry and find it's size. 16094 */ 16095 (void) sprintf(bufp, format_string, data[0]); 16096 entry_len = strlen(bufp); 16097 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len; 16098 16099 j = 0; 16100 while (j < len) { 16101 bufp = local_buf; 16102 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE); 16103 start_offset = j; 16104 16105 end_offset = start_offset + avail_count; 16106 16107 (void) sprintf(bufp, "%s:", title); 16108 bufp += strlen(bufp); 16109 for (i = start_offset; ((i < end_offset) && (j < len)); 16110 i++, j++) { 16111 (void) sprintf(bufp, format_string, data[i]); 16112 bufp += entry_len; 16113 } 16114 (void) sprintf(bufp, "\n"); 16115 16116 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf); 16117 } 16118 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE); 16119 } 16120 16121 /* 16122 * Function: sd_print_sense_msg 16123 * 16124 * Description: Log a message based upon the given sense data. 16125 * 16126 * Arguments: un - ptr to associated softstate 16127 * bp - ptr to buf(9S) for the command 16128 * arg - ptr to associate sd_sense_info struct 16129 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16130 * or SD_NO_RETRY_ISSUED 16131 * 16132 * Context: May be called from interrupt context 16133 */ 16134 16135 static void 16136 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 16137 { 16138 struct sd_xbuf *xp; 16139 struct scsi_pkt *pktp; 16140 uint8_t *sensep; 16141 daddr_t request_blkno; 16142 diskaddr_t err_blkno; 16143 int severity; 16144 int pfa_flag; 16145 extern struct scsi_key_strings scsi_cmds[]; 16146 16147 ASSERT(un != NULL); 16148 ASSERT(mutex_owned(SD_MUTEX(un))); 16149 ASSERT(bp != NULL); 16150 xp = SD_GET_XBUF(bp); 16151 ASSERT(xp != NULL); 16152 pktp = SD_GET_PKTP(bp); 16153 ASSERT(pktp != NULL); 16154 ASSERT(arg != NULL); 16155 16156 severity = ((struct sd_sense_info *)(arg))->ssi_severity; 16157 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag; 16158 16159 if ((code == SD_DELAYED_RETRY_ISSUED) || 16160 (code == SD_IMMEDIATE_RETRY_ISSUED)) { 16161 severity = SCSI_ERR_RETRYABLE; 16162 } 16163 16164 /* Use absolute block number for the request block number */ 16165 request_blkno = xp->xb_blkno; 16166 16167 /* 16168 * Now try to get the error block number from the sense data 16169 */ 16170 sensep = xp->xb_sense_data; 16171 16172 if (scsi_sense_info_uint64(sensep, SENSE_LENGTH, 16173 (uint64_t *)&err_blkno)) { 16174 /* 16175 * We retrieved the error block number from the information 16176 * portion of the sense data. 16177 * 16178 * For USCSI commands we are better off using the error 16179 * block no. as the requested block no. (This is the best 16180 * we can estimate.) 16181 */ 16182 if ((SD_IS_BUFIO(xp) == FALSE) && 16183 ((pktp->pkt_flags & FLAG_SILENT) == 0)) { 16184 request_blkno = err_blkno; 16185 } 16186 } else { 16187 /* 16188 * Without the es_valid bit set (for fixed format) or an 16189 * information descriptor (for descriptor format) we cannot 16190 * be certain of the error blkno, so just use the 16191 * request_blkno. 16192 */ 16193 err_blkno = (diskaddr_t)request_blkno; 16194 } 16195 16196 /* 16197 * The following will log the buffer contents for the release driver 16198 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error 16199 * level is set to verbose. 16200 */ 16201 sd_dump_memory(un, SD_LOG_IO, "Failed CDB", 16202 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 16203 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 16204 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX); 16205 16206 if (pfa_flag == FALSE) { 16207 /* This is normally only set for USCSI */ 16208 if ((pktp->pkt_flags & FLAG_SILENT) != 0) { 16209 return; 16210 } 16211 16212 if ((SD_IS_BUFIO(xp) == TRUE) && 16213 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) && 16214 (severity < sd_error_level))) { 16215 return; 16216 } 16217 } 16218 16219 /* 16220 * Check for Sonoma Failover and keep a count of how many failed I/O's 16221 */ 16222 if ((SD_IS_LSI(un)) && 16223 (scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) && 16224 (scsi_sense_asc(sensep) == 0x94) && 16225 (scsi_sense_ascq(sensep) == 0x01)) { 16226 un->un_sonoma_failure_count++; 16227 if (un->un_sonoma_failure_count > 1) { 16228 return; 16229 } 16230 } 16231 16232 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity, 16233 request_blkno, err_blkno, scsi_cmds, 16234 (struct scsi_extended_sense *)sensep, 16235 un->un_additional_codes, NULL); 16236 } 16237 16238 /* 16239 * Function: sd_sense_key_no_sense 16240 * 16241 * Description: Recovery action when sense data was not received. 16242 * 16243 * Context: May be called from interrupt context 16244 */ 16245 16246 static void 16247 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 16248 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16249 { 16250 struct sd_sense_info si; 16251 16252 ASSERT(un != NULL); 16253 ASSERT(mutex_owned(SD_MUTEX(un))); 16254 ASSERT(bp != NULL); 16255 ASSERT(xp != NULL); 16256 ASSERT(pktp != NULL); 16257 16258 si.ssi_severity = SCSI_ERR_FATAL; 16259 si.ssi_pfa_flag = FALSE; 16260 16261 SD_UPDATE_ERRSTATS(un, sd_softerrs); 16262 16263 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16264 &si, EIO, (clock_t)0, NULL); 16265 } 16266 16267 16268 /* 16269 * Function: sd_sense_key_recoverable_error 16270 * 16271 * Description: Recovery actions for a SCSI "Recovered Error" sense key. 16272 * 16273 * Context: May be called from interrupt context 16274 */ 16275 16276 static void 16277 sd_sense_key_recoverable_error(struct sd_lun *un, 16278 uint8_t *sense_datap, 16279 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16280 { 16281 struct sd_sense_info si; 16282 uint8_t asc = scsi_sense_asc(sense_datap); 16283 16284 ASSERT(un != NULL); 16285 ASSERT(mutex_owned(SD_MUTEX(un))); 16286 ASSERT(bp != NULL); 16287 ASSERT(xp != NULL); 16288 ASSERT(pktp != NULL); 16289 16290 /* 16291 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED 16292 */ 16293 if ((asc == 0x5D) && (sd_report_pfa != 0)) { 16294 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 16295 si.ssi_severity = SCSI_ERR_INFO; 16296 si.ssi_pfa_flag = TRUE; 16297 } else { 16298 SD_UPDATE_ERRSTATS(un, sd_softerrs); 16299 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err); 16300 si.ssi_severity = SCSI_ERR_RECOVERED; 16301 si.ssi_pfa_flag = FALSE; 16302 } 16303 16304 if (pktp->pkt_resid == 0) { 16305 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16306 sd_return_command(un, bp); 16307 return; 16308 } 16309 16310 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16311 &si, EIO, (clock_t)0, NULL); 16312 } 16313 16314 16315 16316 16317 /* 16318 * Function: sd_sense_key_not_ready 16319 * 16320 * Description: Recovery actions for a SCSI "Not Ready" sense key. 16321 * 16322 * Context: May be called from interrupt context 16323 */ 16324 16325 static void 16326 sd_sense_key_not_ready(struct sd_lun *un, 16327 uint8_t *sense_datap, 16328 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16329 { 16330 struct sd_sense_info si; 16331 uint8_t asc = scsi_sense_asc(sense_datap); 16332 uint8_t ascq = scsi_sense_ascq(sense_datap); 16333 16334 ASSERT(un != NULL); 16335 ASSERT(mutex_owned(SD_MUTEX(un))); 16336 ASSERT(bp != NULL); 16337 ASSERT(xp != NULL); 16338 ASSERT(pktp != NULL); 16339 16340 si.ssi_severity = SCSI_ERR_FATAL; 16341 si.ssi_pfa_flag = FALSE; 16342 16343 /* 16344 * Update error stats after first NOT READY error. Disks may have 16345 * been powered down and may need to be restarted. For CDROMs, 16346 * report NOT READY errors only if media is present. 16347 */ 16348 if ((ISCD(un) && (asc == 0x3A)) || 16349 (xp->xb_nr_retry_count > 0)) { 16350 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16351 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err); 16352 } 16353 16354 /* 16355 * Just fail if the "not ready" retry limit has been reached. 16356 */ 16357 if (xp->xb_nr_retry_count >= un->un_notready_retry_count) { 16358 /* Special check for error message printing for removables. */ 16359 if (un->un_f_has_removable_media && (asc == 0x04) && 16360 (ascq >= 0x04)) { 16361 si.ssi_severity = SCSI_ERR_ALL; 16362 } 16363 goto fail_command; 16364 } 16365 16366 /* 16367 * Check the ASC and ASCQ in the sense data as needed, to determine 16368 * what to do. 16369 */ 16370 switch (asc) { 16371 case 0x04: /* LOGICAL UNIT NOT READY */ 16372 /* 16373 * disk drives that don't spin up result in a very long delay 16374 * in format without warning messages. We will log a message 16375 * if the error level is set to verbose. 16376 */ 16377 if (sd_error_level < SCSI_ERR_RETRYABLE) { 16378 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16379 "logical unit not ready, resetting disk\n"); 16380 } 16381 16382 /* 16383 * There are different requirements for CDROMs and disks for 16384 * the number of retries. If a CD-ROM is giving this, it is 16385 * probably reading TOC and is in the process of getting 16386 * ready, so we should keep on trying for a long time to make 16387 * sure that all types of media are taken in account (for 16388 * some media the drive takes a long time to read TOC). For 16389 * disks we do not want to retry this too many times as this 16390 * can cause a long hang in format when the drive refuses to 16391 * spin up (a very common failure). 16392 */ 16393 switch (ascq) { 16394 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */ 16395 /* 16396 * Disk drives frequently refuse to spin up which 16397 * results in a very long hang in format without 16398 * warning messages. 16399 * 16400 * Note: This code preserves the legacy behavior of 16401 * comparing xb_nr_retry_count against zero for fibre 16402 * channel targets instead of comparing against the 16403 * un_reset_retry_count value. The reason for this 16404 * discrepancy has been so utterly lost beneath the 16405 * Sands of Time that even Indiana Jones could not 16406 * find it. 16407 */ 16408 if (un->un_f_is_fibre == TRUE) { 16409 if (((sd_level_mask & SD_LOGMASK_DIAG) || 16410 (xp->xb_nr_retry_count > 0)) && 16411 (un->un_startstop_timeid == NULL)) { 16412 scsi_log(SD_DEVINFO(un), sd_label, 16413 CE_WARN, "logical unit not ready, " 16414 "resetting disk\n"); 16415 sd_reset_target(un, pktp); 16416 } 16417 } else { 16418 if (((sd_level_mask & SD_LOGMASK_DIAG) || 16419 (xp->xb_nr_retry_count > 16420 un->un_reset_retry_count)) && 16421 (un->un_startstop_timeid == NULL)) { 16422 scsi_log(SD_DEVINFO(un), sd_label, 16423 CE_WARN, "logical unit not ready, " 16424 "resetting disk\n"); 16425 sd_reset_target(un, pktp); 16426 } 16427 } 16428 break; 16429 16430 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */ 16431 /* 16432 * If the target is in the process of becoming 16433 * ready, just proceed with the retry. This can 16434 * happen with CD-ROMs that take a long time to 16435 * read TOC after a power cycle or reset. 16436 */ 16437 goto do_retry; 16438 16439 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */ 16440 break; 16441 16442 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */ 16443 /* 16444 * Retries cannot help here so just fail right away. 16445 */ 16446 goto fail_command; 16447 16448 case 0x88: 16449 /* 16450 * Vendor-unique code for T3/T4: it indicates a 16451 * path problem in a mutipathed config, but as far as 16452 * the target driver is concerned it equates to a fatal 16453 * error, so we should just fail the command right away 16454 * (without printing anything to the console). If this 16455 * is not a T3/T4, fall thru to the default recovery 16456 * action. 16457 * T3/T4 is FC only, don't need to check is_fibre 16458 */ 16459 if (SD_IS_T3(un) || SD_IS_T4(un)) { 16460 sd_return_failed_command(un, bp, EIO); 16461 return; 16462 } 16463 /* FALLTHRU */ 16464 16465 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */ 16466 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */ 16467 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */ 16468 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */ 16469 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */ 16470 default: /* Possible future codes in SCSI spec? */ 16471 /* 16472 * For removable-media devices, do not retry if 16473 * ASCQ > 2 as these result mostly from USCSI commands 16474 * on MMC devices issued to check status of an 16475 * operation initiated in immediate mode. Also for 16476 * ASCQ >= 4 do not print console messages as these 16477 * mainly represent a user-initiated operation 16478 * instead of a system failure. 16479 */ 16480 if (un->un_f_has_removable_media) { 16481 si.ssi_severity = SCSI_ERR_ALL; 16482 goto fail_command; 16483 } 16484 break; 16485 } 16486 16487 /* 16488 * As part of our recovery attempt for the NOT READY 16489 * condition, we issue a START STOP UNIT command. However 16490 * we want to wait for a short delay before attempting this 16491 * as there may still be more commands coming back from the 16492 * target with the check condition. To do this we use 16493 * timeout(9F) to call sd_start_stop_unit_callback() after 16494 * the delay interval expires. (sd_start_stop_unit_callback() 16495 * dispatches sd_start_stop_unit_task(), which will issue 16496 * the actual START STOP UNIT command. The delay interval 16497 * is one-half of the delay that we will use to retry the 16498 * command that generated the NOT READY condition. 16499 * 16500 * Note that we could just dispatch sd_start_stop_unit_task() 16501 * from here and allow it to sleep for the delay interval, 16502 * but then we would be tying up the taskq thread 16503 * uncesessarily for the duration of the delay. 16504 * 16505 * Do not issue the START STOP UNIT if the current command 16506 * is already a START STOP UNIT. 16507 */ 16508 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) { 16509 break; 16510 } 16511 16512 /* 16513 * Do not schedule the timeout if one is already pending. 16514 */ 16515 if (un->un_startstop_timeid != NULL) { 16516 SD_INFO(SD_LOG_ERROR, un, 16517 "sd_sense_key_not_ready: restart already issued to" 16518 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)), 16519 ddi_get_instance(SD_DEVINFO(un))); 16520 break; 16521 } 16522 16523 /* 16524 * Schedule the START STOP UNIT command, then queue the command 16525 * for a retry. 16526 * 16527 * Note: A timeout is not scheduled for this retry because we 16528 * want the retry to be serial with the START_STOP_UNIT. The 16529 * retry will be started when the START_STOP_UNIT is completed 16530 * in sd_start_stop_unit_task. 16531 */ 16532 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback, 16533 un, un->un_busy_timeout / 2); 16534 xp->xb_nr_retry_count++; 16535 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter); 16536 return; 16537 16538 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */ 16539 if (sd_error_level < SCSI_ERR_RETRYABLE) { 16540 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16541 "unit does not respond to selection\n"); 16542 } 16543 break; 16544 16545 case 0x3A: /* MEDIUM NOT PRESENT */ 16546 if (sd_error_level >= SCSI_ERR_FATAL) { 16547 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16548 "Caddy not inserted in drive\n"); 16549 } 16550 16551 sr_ejected(un); 16552 un->un_mediastate = DKIO_EJECTED; 16553 /* The state has changed, inform the media watch routines */ 16554 cv_broadcast(&un->un_state_cv); 16555 /* Just fail if no media is present in the drive. */ 16556 goto fail_command; 16557 16558 default: 16559 if (sd_error_level < SCSI_ERR_RETRYABLE) { 16560 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 16561 "Unit not Ready. Additional sense code 0x%x\n", 16562 asc); 16563 } 16564 break; 16565 } 16566 16567 do_retry: 16568 16569 /* 16570 * Retry the command, as some targets may report NOT READY for 16571 * several seconds after being reset. 16572 */ 16573 xp->xb_nr_retry_count++; 16574 si.ssi_severity = SCSI_ERR_RETRYABLE; 16575 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 16576 &si, EIO, un->un_busy_timeout, NULL); 16577 16578 return; 16579 16580 fail_command: 16581 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16582 sd_return_failed_command(un, bp, EIO); 16583 } 16584 16585 16586 16587 /* 16588 * Function: sd_sense_key_medium_or_hardware_error 16589 * 16590 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error" 16591 * sense key. 16592 * 16593 * Context: May be called from interrupt context 16594 */ 16595 16596 static void 16597 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 16598 uint8_t *sense_datap, 16599 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16600 { 16601 struct sd_sense_info si; 16602 uint8_t sense_key = scsi_sense_key(sense_datap); 16603 uint8_t asc = scsi_sense_asc(sense_datap); 16604 16605 ASSERT(un != NULL); 16606 ASSERT(mutex_owned(SD_MUTEX(un))); 16607 ASSERT(bp != NULL); 16608 ASSERT(xp != NULL); 16609 ASSERT(pktp != NULL); 16610 16611 si.ssi_severity = SCSI_ERR_FATAL; 16612 si.ssi_pfa_flag = FALSE; 16613 16614 if (sense_key == KEY_MEDIUM_ERROR) { 16615 SD_UPDATE_ERRSTATS(un, sd_rq_media_err); 16616 } 16617 16618 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16619 16620 if ((un->un_reset_retry_count != 0) && 16621 (xp->xb_retry_count == un->un_reset_retry_count)) { 16622 mutex_exit(SD_MUTEX(un)); 16623 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */ 16624 if (un->un_f_allow_bus_device_reset == TRUE) { 16625 16626 boolean_t try_resetting_target = B_TRUE; 16627 16628 /* 16629 * We need to be able to handle specific ASC when we are 16630 * handling a KEY_HARDWARE_ERROR. In particular 16631 * taking the default action of resetting the target may 16632 * not be the appropriate way to attempt recovery. 16633 * Resetting a target because of a single LUN failure 16634 * victimizes all LUNs on that target. 16635 * 16636 * This is true for the LSI arrays, if an LSI 16637 * array controller returns an ASC of 0x84 (LUN Dead) we 16638 * should trust it. 16639 */ 16640 16641 if (sense_key == KEY_HARDWARE_ERROR) { 16642 switch (asc) { 16643 case 0x84: 16644 if (SD_IS_LSI(un)) { 16645 try_resetting_target = B_FALSE; 16646 } 16647 break; 16648 default: 16649 break; 16650 } 16651 } 16652 16653 if (try_resetting_target == B_TRUE) { 16654 int reset_retval = 0; 16655 if (un->un_f_lun_reset_enabled == TRUE) { 16656 SD_TRACE(SD_LOG_IO_CORE, un, 16657 "sd_sense_key_medium_or_hardware_" 16658 "error: issuing RESET_LUN\n"); 16659 reset_retval = 16660 scsi_reset(SD_ADDRESS(un), 16661 RESET_LUN); 16662 } 16663 if (reset_retval == 0) { 16664 SD_TRACE(SD_LOG_IO_CORE, un, 16665 "sd_sense_key_medium_or_hardware_" 16666 "error: issuing RESET_TARGET\n"); 16667 (void) scsi_reset(SD_ADDRESS(un), 16668 RESET_TARGET); 16669 } 16670 } 16671 } 16672 mutex_enter(SD_MUTEX(un)); 16673 } 16674 16675 /* 16676 * This really ought to be a fatal error, but we will retry anyway 16677 * as some drives report this as a spurious error. 16678 */ 16679 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16680 &si, EIO, (clock_t)0, NULL); 16681 } 16682 16683 16684 16685 /* 16686 * Function: sd_sense_key_illegal_request 16687 * 16688 * Description: Recovery actions for a SCSI "Illegal Request" sense key. 16689 * 16690 * Context: May be called from interrupt context 16691 */ 16692 16693 static void 16694 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 16695 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16696 { 16697 struct sd_sense_info si; 16698 16699 ASSERT(un != NULL); 16700 ASSERT(mutex_owned(SD_MUTEX(un))); 16701 ASSERT(bp != NULL); 16702 ASSERT(xp != NULL); 16703 ASSERT(pktp != NULL); 16704 16705 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err); 16706 16707 si.ssi_severity = SCSI_ERR_INFO; 16708 si.ssi_pfa_flag = FALSE; 16709 16710 /* Pointless to retry if the target thinks it's an illegal request */ 16711 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16712 sd_return_failed_command(un, bp, EIO); 16713 } 16714 16715 16716 16717 16718 /* 16719 * Function: sd_sense_key_unit_attention 16720 * 16721 * Description: Recovery actions for a SCSI "Unit Attention" sense key. 16722 * 16723 * Context: May be called from interrupt context 16724 */ 16725 16726 static void 16727 sd_sense_key_unit_attention(struct sd_lun *un, 16728 uint8_t *sense_datap, 16729 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16730 { 16731 /* 16732 * For UNIT ATTENTION we allow retries for one minute. Devices 16733 * like Sonoma can return UNIT ATTENTION close to a minute 16734 * under certain conditions. 16735 */ 16736 int retry_check_flag = SD_RETRIES_UA; 16737 boolean_t kstat_updated = B_FALSE; 16738 struct sd_sense_info si; 16739 uint8_t asc = scsi_sense_asc(sense_datap); 16740 uint8_t ascq = scsi_sense_ascq(sense_datap); 16741 16742 ASSERT(un != NULL); 16743 ASSERT(mutex_owned(SD_MUTEX(un))); 16744 ASSERT(bp != NULL); 16745 ASSERT(xp != NULL); 16746 ASSERT(pktp != NULL); 16747 16748 si.ssi_severity = SCSI_ERR_INFO; 16749 si.ssi_pfa_flag = FALSE; 16750 16751 16752 switch (asc) { 16753 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */ 16754 if (sd_report_pfa != 0) { 16755 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 16756 si.ssi_pfa_flag = TRUE; 16757 retry_check_flag = SD_RETRIES_STANDARD; 16758 goto do_retry; 16759 } 16760 16761 break; 16762 16763 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */ 16764 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 16765 un->un_resvd_status |= 16766 (SD_LOST_RESERVE | SD_WANT_RESERVE); 16767 } 16768 #ifdef _LP64 16769 if (un->un_blockcount + 1 > SD_GROUP1_MAX_ADDRESS) { 16770 if (taskq_dispatch(sd_tq, sd_reenable_dsense_task, 16771 un, KM_NOSLEEP) == 0) { 16772 /* 16773 * If we can't dispatch the task we'll just 16774 * live without descriptor sense. We can 16775 * try again on the next "unit attention" 16776 */ 16777 SD_ERROR(SD_LOG_ERROR, un, 16778 "sd_sense_key_unit_attention: " 16779 "Could not dispatch " 16780 "sd_reenable_dsense_task\n"); 16781 } 16782 } 16783 #endif /* _LP64 */ 16784 /* FALLTHRU */ 16785 16786 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */ 16787 if (!un->un_f_has_removable_media) { 16788 break; 16789 } 16790 16791 /* 16792 * When we get a unit attention from a removable-media device, 16793 * it may be in a state that will take a long time to recover 16794 * (e.g., from a reset). Since we are executing in interrupt 16795 * context here, we cannot wait around for the device to come 16796 * back. So hand this command off to sd_media_change_task() 16797 * for deferred processing under taskq thread context. (Note 16798 * that the command still may be failed if a problem is 16799 * encountered at a later time.) 16800 */ 16801 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp, 16802 KM_NOSLEEP) == 0) { 16803 /* 16804 * Cannot dispatch the request so fail the command. 16805 */ 16806 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16807 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 16808 si.ssi_severity = SCSI_ERR_FATAL; 16809 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16810 sd_return_failed_command(un, bp, EIO); 16811 } 16812 16813 /* 16814 * If failed to dispatch sd_media_change_task(), we already 16815 * updated kstat. If succeed to dispatch sd_media_change_task(), 16816 * we should update kstat later if it encounters an error. So, 16817 * we update kstat_updated flag here. 16818 */ 16819 kstat_updated = B_TRUE; 16820 16821 /* 16822 * Either the command has been successfully dispatched to a 16823 * task Q for retrying, or the dispatch failed. In either case 16824 * do NOT retry again by calling sd_retry_command. This sets up 16825 * two retries of the same command and when one completes and 16826 * frees the resources the other will access freed memory, 16827 * a bad thing. 16828 */ 16829 return; 16830 16831 default: 16832 break; 16833 } 16834 16835 /* 16836 * ASC ASCQ 16837 * 2A 09 Capacity data has changed 16838 * 2A 01 Mode parameters changed 16839 * 3F 0E Reported luns data has changed 16840 * Arrays that support logical unit expansion should report 16841 * capacity changes(2Ah/09). Mode parameters changed and 16842 * reported luns data has changed are the approximation. 16843 */ 16844 if (((asc == 0x2a) && (ascq == 0x09)) || 16845 ((asc == 0x2a) && (ascq == 0x01)) || 16846 ((asc == 0x3f) && (ascq == 0x0e))) { 16847 if (taskq_dispatch(sd_tq, sd_target_change_task, un, 16848 KM_NOSLEEP) == 0) { 16849 SD_ERROR(SD_LOG_ERROR, un, 16850 "sd_sense_key_unit_attention: " 16851 "Could not dispatch sd_target_change_task\n"); 16852 } 16853 } 16854 16855 /* 16856 * Update kstat if we haven't done that. 16857 */ 16858 if (!kstat_updated) { 16859 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16860 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 16861 } 16862 16863 do_retry: 16864 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si, 16865 EIO, SD_UA_RETRY_DELAY, NULL); 16866 } 16867 16868 16869 16870 /* 16871 * Function: sd_sense_key_fail_command 16872 * 16873 * Description: Use to fail a command when we don't like the sense key that 16874 * was returned. 16875 * 16876 * Context: May be called from interrupt context 16877 */ 16878 16879 static void 16880 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 16881 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16882 { 16883 struct sd_sense_info si; 16884 16885 ASSERT(un != NULL); 16886 ASSERT(mutex_owned(SD_MUTEX(un))); 16887 ASSERT(bp != NULL); 16888 ASSERT(xp != NULL); 16889 ASSERT(pktp != NULL); 16890 16891 si.ssi_severity = SCSI_ERR_FATAL; 16892 si.ssi_pfa_flag = FALSE; 16893 16894 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16895 sd_return_failed_command(un, bp, EIO); 16896 } 16897 16898 16899 16900 /* 16901 * Function: sd_sense_key_blank_check 16902 * 16903 * Description: Recovery actions for a SCSI "Blank Check" sense key. 16904 * Has no monetary connotation. 16905 * 16906 * Context: May be called from interrupt context 16907 */ 16908 16909 static void 16910 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 16911 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16912 { 16913 struct sd_sense_info si; 16914 16915 ASSERT(un != NULL); 16916 ASSERT(mutex_owned(SD_MUTEX(un))); 16917 ASSERT(bp != NULL); 16918 ASSERT(xp != NULL); 16919 ASSERT(pktp != NULL); 16920 16921 /* 16922 * Blank check is not fatal for removable devices, therefore 16923 * it does not require a console message. 16924 */ 16925 si.ssi_severity = (un->un_f_has_removable_media) ? SCSI_ERR_ALL : 16926 SCSI_ERR_FATAL; 16927 si.ssi_pfa_flag = FALSE; 16928 16929 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 16930 sd_return_failed_command(un, bp, EIO); 16931 } 16932 16933 16934 16935 16936 /* 16937 * Function: sd_sense_key_aborted_command 16938 * 16939 * Description: Recovery actions for a SCSI "Aborted Command" sense key. 16940 * 16941 * Context: May be called from interrupt context 16942 */ 16943 16944 static void 16945 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 16946 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16947 { 16948 struct sd_sense_info si; 16949 16950 ASSERT(un != NULL); 16951 ASSERT(mutex_owned(SD_MUTEX(un))); 16952 ASSERT(bp != NULL); 16953 ASSERT(xp != NULL); 16954 ASSERT(pktp != NULL); 16955 16956 si.ssi_severity = SCSI_ERR_FATAL; 16957 si.ssi_pfa_flag = FALSE; 16958 16959 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16960 16961 /* 16962 * This really ought to be a fatal error, but we will retry anyway 16963 * as some drives report this as a spurious error. 16964 */ 16965 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16966 &si, EIO, drv_usectohz(100000), NULL); 16967 } 16968 16969 16970 16971 /* 16972 * Function: sd_sense_key_default 16973 * 16974 * Description: Default recovery action for several SCSI sense keys (basically 16975 * attempts a retry). 16976 * 16977 * Context: May be called from interrupt context 16978 */ 16979 16980 static void 16981 sd_sense_key_default(struct sd_lun *un, 16982 uint8_t *sense_datap, 16983 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 16984 { 16985 struct sd_sense_info si; 16986 uint8_t sense_key = scsi_sense_key(sense_datap); 16987 16988 ASSERT(un != NULL); 16989 ASSERT(mutex_owned(SD_MUTEX(un))); 16990 ASSERT(bp != NULL); 16991 ASSERT(xp != NULL); 16992 ASSERT(pktp != NULL); 16993 16994 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16995 16996 /* 16997 * Undecoded sense key. Attempt retries and hope that will fix 16998 * the problem. Otherwise, we're dead. 16999 */ 17000 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 17001 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17002 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]); 17003 } 17004 17005 si.ssi_severity = SCSI_ERR_FATAL; 17006 si.ssi_pfa_flag = FALSE; 17007 17008 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17009 &si, EIO, (clock_t)0, NULL); 17010 } 17011 17012 17013 17014 /* 17015 * Function: sd_print_retry_msg 17016 * 17017 * Description: Print a message indicating the retry action being taken. 17018 * 17019 * Arguments: un - ptr to associated softstate 17020 * bp - ptr to buf(9S) for the command 17021 * arg - not used. 17022 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17023 * or SD_NO_RETRY_ISSUED 17024 * 17025 * Context: May be called from interrupt context 17026 */ 17027 /* ARGSUSED */ 17028 static void 17029 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag) 17030 { 17031 struct sd_xbuf *xp; 17032 struct scsi_pkt *pktp; 17033 char *reasonp; 17034 char *msgp; 17035 17036 ASSERT(un != NULL); 17037 ASSERT(mutex_owned(SD_MUTEX(un))); 17038 ASSERT(bp != NULL); 17039 pktp = SD_GET_PKTP(bp); 17040 ASSERT(pktp != NULL); 17041 xp = SD_GET_XBUF(bp); 17042 ASSERT(xp != NULL); 17043 17044 ASSERT(!mutex_owned(&un->un_pm_mutex)); 17045 mutex_enter(&un->un_pm_mutex); 17046 if ((un->un_state == SD_STATE_SUSPENDED) || 17047 (SD_DEVICE_IS_IN_LOW_POWER(un)) || 17048 (pktp->pkt_flags & FLAG_SILENT)) { 17049 mutex_exit(&un->un_pm_mutex); 17050 goto update_pkt_reason; 17051 } 17052 mutex_exit(&un->un_pm_mutex); 17053 17054 /* 17055 * Suppress messages if they are all the same pkt_reason; with 17056 * TQ, many (up to 256) are returned with the same pkt_reason. 17057 * If we are in panic, then suppress the retry messages. 17058 */ 17059 switch (flag) { 17060 case SD_NO_RETRY_ISSUED: 17061 msgp = "giving up"; 17062 break; 17063 case SD_IMMEDIATE_RETRY_ISSUED: 17064 case SD_DELAYED_RETRY_ISSUED: 17065 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) || 17066 ((pktp->pkt_reason == un->un_last_pkt_reason) && 17067 (sd_error_level != SCSI_ERR_ALL))) { 17068 return; 17069 } 17070 msgp = "retrying command"; 17071 break; 17072 default: 17073 goto update_pkt_reason; 17074 } 17075 17076 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" : 17077 scsi_rname(pktp->pkt_reason)); 17078 17079 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17080 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp); 17081 17082 update_pkt_reason: 17083 /* 17084 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason. 17085 * This is to prevent multiple console messages for the same failure 17086 * condition. Note that un->un_last_pkt_reason is NOT restored if & 17087 * when the command is retried successfully because there still may be 17088 * more commands coming back with the same value of pktp->pkt_reason. 17089 */ 17090 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) { 17091 un->un_last_pkt_reason = pktp->pkt_reason; 17092 } 17093 } 17094 17095 17096 /* 17097 * Function: sd_print_cmd_incomplete_msg 17098 * 17099 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason. 17100 * 17101 * Arguments: un - ptr to associated softstate 17102 * bp - ptr to buf(9S) for the command 17103 * arg - passed to sd_print_retry_msg() 17104 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17105 * or SD_NO_RETRY_ISSUED 17106 * 17107 * Context: May be called from interrupt context 17108 */ 17109 17110 static void 17111 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, 17112 int code) 17113 { 17114 dev_info_t *dip; 17115 17116 ASSERT(un != NULL); 17117 ASSERT(mutex_owned(SD_MUTEX(un))); 17118 ASSERT(bp != NULL); 17119 17120 switch (code) { 17121 case SD_NO_RETRY_ISSUED: 17122 /* Command was failed. Someone turned off this target? */ 17123 if (un->un_state != SD_STATE_OFFLINE) { 17124 /* 17125 * Suppress message if we are detaching and 17126 * device has been disconnected 17127 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation 17128 * private interface and not part of the DDI 17129 */ 17130 dip = un->un_sd->sd_dev; 17131 if (!(DEVI_IS_DETACHING(dip) && 17132 DEVI_IS_DEVICE_REMOVED(dip))) { 17133 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17134 "disk not responding to selection\n"); 17135 } 17136 New_state(un, SD_STATE_OFFLINE); 17137 } 17138 break; 17139 17140 case SD_DELAYED_RETRY_ISSUED: 17141 case SD_IMMEDIATE_RETRY_ISSUED: 17142 default: 17143 /* Command was successfully queued for retry */ 17144 sd_print_retry_msg(un, bp, arg, code); 17145 break; 17146 } 17147 } 17148 17149 17150 /* 17151 * Function: sd_pkt_reason_cmd_incomplete 17152 * 17153 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason. 17154 * 17155 * Context: May be called from interrupt context 17156 */ 17157 17158 static void 17159 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 17160 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17161 { 17162 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE; 17163 17164 ASSERT(un != NULL); 17165 ASSERT(mutex_owned(SD_MUTEX(un))); 17166 ASSERT(bp != NULL); 17167 ASSERT(xp != NULL); 17168 ASSERT(pktp != NULL); 17169 17170 /* Do not do a reset if selection did not complete */ 17171 /* Note: Should this not just check the bit? */ 17172 if (pktp->pkt_state != STATE_GOT_BUS) { 17173 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17174 sd_reset_target(un, pktp); 17175 } 17176 17177 /* 17178 * If the target was not successfully selected, then set 17179 * SD_RETRIES_FAILFAST to indicate that we lost communication 17180 * with the target, and further retries and/or commands are 17181 * likely to take a long time. 17182 */ 17183 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) { 17184 flag |= SD_RETRIES_FAILFAST; 17185 } 17186 17187 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17188 17189 sd_retry_command(un, bp, flag, 17190 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17191 } 17192 17193 17194 17195 /* 17196 * Function: sd_pkt_reason_cmd_tran_err 17197 * 17198 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason. 17199 * 17200 * Context: May be called from interrupt context 17201 */ 17202 17203 static void 17204 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 17205 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17206 { 17207 ASSERT(un != NULL); 17208 ASSERT(mutex_owned(SD_MUTEX(un))); 17209 ASSERT(bp != NULL); 17210 ASSERT(xp != NULL); 17211 ASSERT(pktp != NULL); 17212 17213 /* 17214 * Do not reset if we got a parity error, or if 17215 * selection did not complete. 17216 */ 17217 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17218 /* Note: Should this not just check the bit for pkt_state? */ 17219 if (((pktp->pkt_statistics & STAT_PERR) == 0) && 17220 (pktp->pkt_state != STATE_GOT_BUS)) { 17221 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17222 sd_reset_target(un, pktp); 17223 } 17224 17225 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17226 17227 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 17228 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17229 } 17230 17231 17232 17233 /* 17234 * Function: sd_pkt_reason_cmd_reset 17235 * 17236 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason. 17237 * 17238 * Context: May be called from interrupt context 17239 */ 17240 17241 static void 17242 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 17243 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17244 { 17245 ASSERT(un != NULL); 17246 ASSERT(mutex_owned(SD_MUTEX(un))); 17247 ASSERT(bp != NULL); 17248 ASSERT(xp != NULL); 17249 ASSERT(pktp != NULL); 17250 17251 /* The target may still be running the command, so try to reset. */ 17252 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17253 sd_reset_target(un, pktp); 17254 17255 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17256 17257 /* 17258 * If pkt_reason is CMD_RESET chances are that this pkt got 17259 * reset because another target on this bus caused it. The target 17260 * that caused it should get CMD_TIMEOUT with pkt_statistics 17261 * of STAT_TIMEOUT/STAT_DEV_RESET. 17262 */ 17263 17264 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 17265 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17266 } 17267 17268 17269 17270 17271 /* 17272 * Function: sd_pkt_reason_cmd_aborted 17273 * 17274 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason. 17275 * 17276 * Context: May be called from interrupt context 17277 */ 17278 17279 static void 17280 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 17281 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17282 { 17283 ASSERT(un != NULL); 17284 ASSERT(mutex_owned(SD_MUTEX(un))); 17285 ASSERT(bp != NULL); 17286 ASSERT(xp != NULL); 17287 ASSERT(pktp != NULL); 17288 17289 /* The target may still be running the command, so try to reset. */ 17290 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17291 sd_reset_target(un, pktp); 17292 17293 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17294 17295 /* 17296 * If pkt_reason is CMD_ABORTED chances are that this pkt got 17297 * aborted because another target on this bus caused it. The target 17298 * that caused it should get CMD_TIMEOUT with pkt_statistics 17299 * of STAT_TIMEOUT/STAT_DEV_RESET. 17300 */ 17301 17302 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 17303 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17304 } 17305 17306 17307 17308 /* 17309 * Function: sd_pkt_reason_cmd_timeout 17310 * 17311 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason. 17312 * 17313 * Context: May be called from interrupt context 17314 */ 17315 17316 static void 17317 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 17318 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17319 { 17320 ASSERT(un != NULL); 17321 ASSERT(mutex_owned(SD_MUTEX(un))); 17322 ASSERT(bp != NULL); 17323 ASSERT(xp != NULL); 17324 ASSERT(pktp != NULL); 17325 17326 17327 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17328 sd_reset_target(un, pktp); 17329 17330 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17331 17332 /* 17333 * A command timeout indicates that we could not establish 17334 * communication with the target, so set SD_RETRIES_FAILFAST 17335 * as further retries/commands are likely to take a long time. 17336 */ 17337 sd_retry_command(un, bp, 17338 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST), 17339 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17340 } 17341 17342 17343 17344 /* 17345 * Function: sd_pkt_reason_cmd_unx_bus_free 17346 * 17347 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason. 17348 * 17349 * Context: May be called from interrupt context 17350 */ 17351 17352 static void 17353 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 17354 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17355 { 17356 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code); 17357 17358 ASSERT(un != NULL); 17359 ASSERT(mutex_owned(SD_MUTEX(un))); 17360 ASSERT(bp != NULL); 17361 ASSERT(xp != NULL); 17362 ASSERT(pktp != NULL); 17363 17364 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17365 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17366 17367 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? 17368 sd_print_retry_msg : NULL; 17369 17370 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 17371 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17372 } 17373 17374 17375 /* 17376 * Function: sd_pkt_reason_cmd_tag_reject 17377 * 17378 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason. 17379 * 17380 * Context: May be called from interrupt context 17381 */ 17382 17383 static void 17384 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 17385 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17386 { 17387 ASSERT(un != NULL); 17388 ASSERT(mutex_owned(SD_MUTEX(un))); 17389 ASSERT(bp != NULL); 17390 ASSERT(xp != NULL); 17391 ASSERT(pktp != NULL); 17392 17393 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17394 pktp->pkt_flags = 0; 17395 un->un_tagflags = 0; 17396 if (un->un_f_opt_queueing == TRUE) { 17397 un->un_throttle = min(un->un_throttle, 3); 17398 } else { 17399 un->un_throttle = 1; 17400 } 17401 mutex_exit(SD_MUTEX(un)); 17402 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 17403 mutex_enter(SD_MUTEX(un)); 17404 17405 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17406 17407 /* Legacy behavior not to check retry counts here. */ 17408 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE), 17409 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17410 } 17411 17412 17413 /* 17414 * Function: sd_pkt_reason_default 17415 * 17416 * Description: Default recovery actions for SCSA pkt_reason values that 17417 * do not have more explicit recovery actions. 17418 * 17419 * Context: May be called from interrupt context 17420 */ 17421 17422 static void 17423 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 17424 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17425 { 17426 ASSERT(un != NULL); 17427 ASSERT(mutex_owned(SD_MUTEX(un))); 17428 ASSERT(bp != NULL); 17429 ASSERT(xp != NULL); 17430 ASSERT(pktp != NULL); 17431 17432 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17433 sd_reset_target(un, pktp); 17434 17435 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17436 17437 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 17438 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17439 } 17440 17441 17442 17443 /* 17444 * Function: sd_pkt_status_check_condition 17445 * 17446 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status. 17447 * 17448 * Context: May be called from interrupt context 17449 */ 17450 17451 static void 17452 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 17453 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17454 { 17455 ASSERT(un != NULL); 17456 ASSERT(mutex_owned(SD_MUTEX(un))); 17457 ASSERT(bp != NULL); 17458 ASSERT(xp != NULL); 17459 ASSERT(pktp != NULL); 17460 17461 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: " 17462 "entry: buf:0x%p xp:0x%p\n", bp, xp); 17463 17464 /* 17465 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the 17466 * command will be retried after the request sense). Otherwise, retry 17467 * the command. Note: we are issuing the request sense even though the 17468 * retry limit may have been reached for the failed command. 17469 */ 17470 if (un->un_f_arq_enabled == FALSE) { 17471 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 17472 "no ARQ, sending request sense command\n"); 17473 sd_send_request_sense_command(un, bp, pktp); 17474 } else { 17475 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 17476 "ARQ,retrying request sense command\n"); 17477 #if defined(__i386) || defined(__amd64) 17478 /* 17479 * The SD_RETRY_DELAY value need to be adjusted here 17480 * when SD_RETRY_DELAY change in sddef.h 17481 */ 17482 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 17483 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, 17484 NULL); 17485 #else 17486 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 17487 EIO, SD_RETRY_DELAY, NULL); 17488 #endif 17489 } 17490 17491 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); 17492 } 17493 17494 17495 /* 17496 * Function: sd_pkt_status_busy 17497 * 17498 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status. 17499 * 17500 * Context: May be called from interrupt context 17501 */ 17502 17503 static void 17504 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 17505 struct scsi_pkt *pktp) 17506 { 17507 ASSERT(un != NULL); 17508 ASSERT(mutex_owned(SD_MUTEX(un))); 17509 ASSERT(bp != NULL); 17510 ASSERT(xp != NULL); 17511 ASSERT(pktp != NULL); 17512 17513 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17514 "sd_pkt_status_busy: entry\n"); 17515 17516 /* If retries are exhausted, just fail the command. */ 17517 if (xp->xb_retry_count >= un->un_busy_retry_count) { 17518 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17519 "device busy too long\n"); 17520 sd_return_failed_command(un, bp, EIO); 17521 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17522 "sd_pkt_status_busy: exit\n"); 17523 return; 17524 } 17525 xp->xb_retry_count++; 17526 17527 /* 17528 * Try to reset the target. However, we do not want to perform 17529 * more than one reset if the device continues to fail. The reset 17530 * will be performed when the retry count reaches the reset 17531 * threshold. This threshold should be set such that at least 17532 * one retry is issued before the reset is performed. 17533 */ 17534 if (xp->xb_retry_count == 17535 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) { 17536 int rval = 0; 17537 mutex_exit(SD_MUTEX(un)); 17538 if (un->un_f_allow_bus_device_reset == TRUE) { 17539 /* 17540 * First try to reset the LUN; if we cannot then 17541 * try to reset the target. 17542 */ 17543 if (un->un_f_lun_reset_enabled == TRUE) { 17544 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17545 "sd_pkt_status_busy: RESET_LUN\n"); 17546 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 17547 } 17548 if (rval == 0) { 17549 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17550 "sd_pkt_status_busy: RESET_TARGET\n"); 17551 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 17552 } 17553 } 17554 if (rval == 0) { 17555 /* 17556 * If the RESET_LUN and/or RESET_TARGET failed, 17557 * try RESET_ALL 17558 */ 17559 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17560 "sd_pkt_status_busy: RESET_ALL\n"); 17561 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL); 17562 } 17563 mutex_enter(SD_MUTEX(un)); 17564 if (rval == 0) { 17565 /* 17566 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed. 17567 * At this point we give up & fail the command. 17568 */ 17569 sd_return_failed_command(un, bp, EIO); 17570 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17571 "sd_pkt_status_busy: exit (failed cmd)\n"); 17572 return; 17573 } 17574 } 17575 17576 /* 17577 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as 17578 * we have already checked the retry counts above. 17579 */ 17580 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 17581 EIO, un->un_busy_timeout, NULL); 17582 17583 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17584 "sd_pkt_status_busy: exit\n"); 17585 } 17586 17587 17588 /* 17589 * Function: sd_pkt_status_reservation_conflict 17590 * 17591 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI 17592 * command status. 17593 * 17594 * Context: May be called from interrupt context 17595 */ 17596 17597 static void 17598 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp, 17599 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17600 { 17601 ASSERT(un != NULL); 17602 ASSERT(mutex_owned(SD_MUTEX(un))); 17603 ASSERT(bp != NULL); 17604 ASSERT(xp != NULL); 17605 ASSERT(pktp != NULL); 17606 17607 /* 17608 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation 17609 * conflict could be due to various reasons like incorrect keys, not 17610 * registered or not reserved etc. So, we return EACCES to the caller. 17611 */ 17612 if (un->un_reservation_type == SD_SCSI3_RESERVATION) { 17613 int cmd = SD_GET_PKT_OPCODE(pktp); 17614 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) || 17615 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) { 17616 sd_return_failed_command(un, bp, EACCES); 17617 return; 17618 } 17619 } 17620 17621 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 17622 17623 if ((un->un_resvd_status & SD_FAILFAST) != 0) { 17624 if (sd_failfast_enable != 0) { 17625 /* By definition, we must panic here.... */ 17626 sd_panic_for_res_conflict(un); 17627 /*NOTREACHED*/ 17628 } 17629 SD_ERROR(SD_LOG_IO, un, 17630 "sd_handle_resv_conflict: Disk Reserved\n"); 17631 sd_return_failed_command(un, bp, EACCES); 17632 return; 17633 } 17634 17635 /* 17636 * 1147670: retry only if sd_retry_on_reservation_conflict 17637 * property is set (default is 1). Retries will not succeed 17638 * on a disk reserved by another initiator. HA systems 17639 * may reset this via sd.conf to avoid these retries. 17640 * 17641 * Note: The legacy return code for this failure is EIO, however EACCES 17642 * seems more appropriate for a reservation conflict. 17643 */ 17644 if (sd_retry_on_reservation_conflict == 0) { 17645 SD_ERROR(SD_LOG_IO, un, 17646 "sd_handle_resv_conflict: Device Reserved\n"); 17647 sd_return_failed_command(un, bp, EIO); 17648 return; 17649 } 17650 17651 /* 17652 * Retry the command if we can. 17653 * 17654 * Note: The legacy return code for this failure is EIO, however EACCES 17655 * seems more appropriate for a reservation conflict. 17656 */ 17657 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 17658 (clock_t)2, NULL); 17659 } 17660 17661 17662 17663 /* 17664 * Function: sd_pkt_status_qfull 17665 * 17666 * Description: Handle a QUEUE FULL condition from the target. This can 17667 * occur if the HBA does not handle the queue full condition. 17668 * (Basically this means third-party HBAs as Sun HBAs will 17669 * handle the queue full condition.) Note that if there are 17670 * some commands already in the transport, then the queue full 17671 * has occurred because the queue for this nexus is actually 17672 * full. If there are no commands in the transport, then the 17673 * queue full is resulting from some other initiator or lun 17674 * consuming all the resources at the target. 17675 * 17676 * Context: May be called from interrupt context 17677 */ 17678 17679 static void 17680 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 17681 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17682 { 17683 ASSERT(un != NULL); 17684 ASSERT(mutex_owned(SD_MUTEX(un))); 17685 ASSERT(bp != NULL); 17686 ASSERT(xp != NULL); 17687 ASSERT(pktp != NULL); 17688 17689 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17690 "sd_pkt_status_qfull: entry\n"); 17691 17692 /* 17693 * Just lower the QFULL throttle and retry the command. Note that 17694 * we do not limit the number of retries here. 17695 */ 17696 sd_reduce_throttle(un, SD_THROTTLE_QFULL); 17697 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, 17698 SD_RESTART_TIMEOUT, NULL); 17699 17700 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17701 "sd_pkt_status_qfull: exit\n"); 17702 } 17703 17704 17705 /* 17706 * Function: sd_reset_target 17707 * 17708 * Description: Issue a scsi_reset(9F), with either RESET_LUN, 17709 * RESET_TARGET, or RESET_ALL. 17710 * 17711 * Context: May be called under interrupt context. 17712 */ 17713 17714 static void 17715 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp) 17716 { 17717 int rval = 0; 17718 17719 ASSERT(un != NULL); 17720 ASSERT(mutex_owned(SD_MUTEX(un))); 17721 ASSERT(pktp != NULL); 17722 17723 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n"); 17724 17725 /* 17726 * No need to reset if the transport layer has already done so. 17727 */ 17728 if ((pktp->pkt_statistics & 17729 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) { 17730 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17731 "sd_reset_target: no reset\n"); 17732 return; 17733 } 17734 17735 mutex_exit(SD_MUTEX(un)); 17736 17737 if (un->un_f_allow_bus_device_reset == TRUE) { 17738 if (un->un_f_lun_reset_enabled == TRUE) { 17739 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17740 "sd_reset_target: RESET_LUN\n"); 17741 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 17742 } 17743 if (rval == 0) { 17744 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17745 "sd_reset_target: RESET_TARGET\n"); 17746 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 17747 } 17748 } 17749 17750 if (rval == 0) { 17751 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17752 "sd_reset_target: RESET_ALL\n"); 17753 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 17754 } 17755 17756 mutex_enter(SD_MUTEX(un)); 17757 17758 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n"); 17759 } 17760 17761 /* 17762 * Function: sd_target_change_task 17763 * 17764 * Description: Handle dynamic target change 17765 * 17766 * Context: Executes in a taskq() thread context 17767 */ 17768 static void 17769 sd_target_change_task(void *arg) 17770 { 17771 struct sd_lun *un = arg; 17772 uint64_t capacity; 17773 diskaddr_t label_cap; 17774 uint_t lbasize; 17775 17776 ASSERT(un != NULL); 17777 ASSERT(!mutex_owned(SD_MUTEX(un))); 17778 17779 if ((un->un_f_blockcount_is_valid == FALSE) || 17780 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 17781 return; 17782 } 17783 17784 if (sd_send_scsi_READ_CAPACITY(un, &capacity, 17785 &lbasize, SD_PATH_DIRECT) != 0) { 17786 SD_ERROR(SD_LOG_ERROR, un, 17787 "sd_target_change_task: fail to read capacity\n"); 17788 return; 17789 } 17790 17791 mutex_enter(SD_MUTEX(un)); 17792 if (capacity <= un->un_blockcount) { 17793 mutex_exit(SD_MUTEX(un)); 17794 return; 17795 } 17796 17797 sd_update_block_info(un, lbasize, capacity); 17798 mutex_exit(SD_MUTEX(un)); 17799 17800 /* 17801 * If lun is EFI labeled and lun capacity is greater than the 17802 * capacity contained in the label, log a sys event. 17803 */ 17804 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 17805 (void*)SD_PATH_DIRECT) == 0) { 17806 mutex_enter(SD_MUTEX(un)); 17807 if (un->un_f_blockcount_is_valid && 17808 un->un_blockcount > label_cap) { 17809 mutex_exit(SD_MUTEX(un)); 17810 sd_log_lun_expansion_event(un, KM_SLEEP); 17811 } else { 17812 mutex_exit(SD_MUTEX(un)); 17813 } 17814 } 17815 } 17816 17817 /* 17818 * Function: sd_log_lun_expansion_event 17819 * 17820 * Description: Log lun expansion sys event 17821 * 17822 * Context: Never called from interrupt context 17823 */ 17824 static void 17825 sd_log_lun_expansion_event(struct sd_lun *un, int km_flag) 17826 { 17827 int err; 17828 char *path; 17829 nvlist_t *dle_attr_list; 17830 17831 /* Allocate and build sysevent attribute list */ 17832 err = nvlist_alloc(&dle_attr_list, NV_UNIQUE_NAME_TYPE, km_flag); 17833 if (err != 0) { 17834 SD_ERROR(SD_LOG_ERROR, un, 17835 "sd_log_lun_expansion_event: fail to allocate space\n"); 17836 return; 17837 } 17838 17839 path = kmem_alloc(MAXPATHLEN, km_flag); 17840 if (path == NULL) { 17841 nvlist_free(dle_attr_list); 17842 SD_ERROR(SD_LOG_ERROR, un, 17843 "sd_log_lun_expansion_event: fail to allocate space\n"); 17844 return; 17845 } 17846 /* 17847 * Add path attribute to identify the lun. 17848 * We are using minor node 'a' as the sysevent attribute. 17849 */ 17850 (void) snprintf(path, MAXPATHLEN, "/devices"); 17851 (void) ddi_pathname(SD_DEVINFO(un), path + strlen(path)); 17852 (void) snprintf(path + strlen(path), MAXPATHLEN - strlen(path), 17853 ":a"); 17854 17855 err = nvlist_add_string(dle_attr_list, DEV_PHYS_PATH, path); 17856 if (err != 0) { 17857 nvlist_free(dle_attr_list); 17858 kmem_free(path, MAXPATHLEN); 17859 SD_ERROR(SD_LOG_ERROR, un, 17860 "sd_log_lun_expansion_event: fail to add attribute\n"); 17861 return; 17862 } 17863 17864 /* Log dynamic lun expansion sysevent */ 17865 err = ddi_log_sysevent(SD_DEVINFO(un), SUNW_VENDOR, EC_DEV_STATUS, 17866 ESC_DEV_DLE, dle_attr_list, NULL, km_flag); 17867 if (err != DDI_SUCCESS) { 17868 SD_ERROR(SD_LOG_ERROR, un, 17869 "sd_log_lun_expansion_event: fail to log sysevent\n"); 17870 } 17871 17872 nvlist_free(dle_attr_list); 17873 kmem_free(path, MAXPATHLEN); 17874 } 17875 17876 /* 17877 * Function: sd_media_change_task 17878 * 17879 * Description: Recovery action for CDROM to become available. 17880 * 17881 * Context: Executes in a taskq() thread context 17882 */ 17883 17884 static void 17885 sd_media_change_task(void *arg) 17886 { 17887 struct scsi_pkt *pktp = arg; 17888 struct sd_lun *un; 17889 struct buf *bp; 17890 struct sd_xbuf *xp; 17891 int err = 0; 17892 int retry_count = 0; 17893 int retry_limit = SD_UNIT_ATTENTION_RETRY/10; 17894 struct sd_sense_info si; 17895 17896 ASSERT(pktp != NULL); 17897 bp = (struct buf *)pktp->pkt_private; 17898 ASSERT(bp != NULL); 17899 xp = SD_GET_XBUF(bp); 17900 ASSERT(xp != NULL); 17901 un = SD_GET_UN(bp); 17902 ASSERT(un != NULL); 17903 ASSERT(!mutex_owned(SD_MUTEX(un))); 17904 ASSERT(un->un_f_monitor_media_state); 17905 17906 si.ssi_severity = SCSI_ERR_INFO; 17907 si.ssi_pfa_flag = FALSE; 17908 17909 /* 17910 * When a reset is issued on a CDROM, it takes a long time to 17911 * recover. First few attempts to read capacity and other things 17912 * related to handling unit attention fail (with a ASC 0x4 and 17913 * ASCQ 0x1). In that case we want to do enough retries and we want 17914 * to limit the retries in other cases of genuine failures like 17915 * no media in drive. 17916 */ 17917 while (retry_count++ < retry_limit) { 17918 if ((err = sd_handle_mchange(un)) == 0) { 17919 break; 17920 } 17921 if (err == EAGAIN) { 17922 retry_limit = SD_UNIT_ATTENTION_RETRY; 17923 } 17924 /* Sleep for 0.5 sec. & try again */ 17925 delay(drv_usectohz(500000)); 17926 } 17927 17928 /* 17929 * Dispatch (retry or fail) the original command here, 17930 * along with appropriate console messages.... 17931 * 17932 * Must grab the mutex before calling sd_retry_command, 17933 * sd_print_sense_msg and sd_return_failed_command. 17934 */ 17935 mutex_enter(SD_MUTEX(un)); 17936 if (err != SD_CMD_SUCCESS) { 17937 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17938 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 17939 si.ssi_severity = SCSI_ERR_FATAL; 17940 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17941 sd_return_failed_command(un, bp, EIO); 17942 } else { 17943 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 17944 &si, EIO, (clock_t)0, NULL); 17945 } 17946 mutex_exit(SD_MUTEX(un)); 17947 } 17948 17949 17950 17951 /* 17952 * Function: sd_handle_mchange 17953 * 17954 * Description: Perform geometry validation & other recovery when CDROM 17955 * has been removed from drive. 17956 * 17957 * Return Code: 0 for success 17958 * errno-type return code of either sd_send_scsi_DOORLOCK() or 17959 * sd_send_scsi_READ_CAPACITY() 17960 * 17961 * Context: Executes in a taskq() thread context 17962 */ 17963 17964 static int 17965 sd_handle_mchange(struct sd_lun *un) 17966 { 17967 uint64_t capacity; 17968 uint32_t lbasize; 17969 int rval; 17970 17971 ASSERT(!mutex_owned(SD_MUTEX(un))); 17972 ASSERT(un->un_f_monitor_media_state); 17973 17974 if ((rval = sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 17975 SD_PATH_DIRECT_PRIORITY)) != 0) { 17976 return (rval); 17977 } 17978 17979 mutex_enter(SD_MUTEX(un)); 17980 sd_update_block_info(un, lbasize, capacity); 17981 17982 if (un->un_errstats != NULL) { 17983 struct sd_errstats *stp = 17984 (struct sd_errstats *)un->un_errstats->ks_data; 17985 stp->sd_capacity.value.ui64 = (uint64_t) 17986 ((uint64_t)un->un_blockcount * 17987 (uint64_t)un->un_tgt_blocksize); 17988 } 17989 17990 17991 /* 17992 * Check if the media in the device is writable or not 17993 */ 17994 if (ISCD(un)) 17995 sd_check_for_writable_cd(un, SD_PATH_DIRECT_PRIORITY); 17996 17997 /* 17998 * Note: Maybe let the strategy/partitioning chain worry about getting 17999 * valid geometry. 18000 */ 18001 mutex_exit(SD_MUTEX(un)); 18002 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 18003 18004 18005 if (cmlb_validate(un->un_cmlbhandle, 0, 18006 (void *)SD_PATH_DIRECT_PRIORITY) != 0) { 18007 return (EIO); 18008 } else { 18009 if (un->un_f_pkstats_enabled) { 18010 sd_set_pstats(un); 18011 SD_TRACE(SD_LOG_IO_PARTITION, un, 18012 "sd_handle_mchange: un:0x%p pstats created and " 18013 "set\n", un); 18014 } 18015 } 18016 18017 18018 /* 18019 * Try to lock the door 18020 */ 18021 return (sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 18022 SD_PATH_DIRECT_PRIORITY)); 18023 } 18024 18025 18026 /* 18027 * Function: sd_send_scsi_DOORLOCK 18028 * 18029 * Description: Issue the scsi DOOR LOCK command 18030 * 18031 * Arguments: un - pointer to driver soft state (unit) structure for 18032 * this target. 18033 * flag - SD_REMOVAL_ALLOW 18034 * SD_REMOVAL_PREVENT 18035 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18036 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18037 * to use the USCSI "direct" chain and bypass the normal 18038 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 18039 * command is issued as part of an error recovery action. 18040 * 18041 * Return Code: 0 - Success 18042 * errno return code from sd_send_scsi_cmd() 18043 * 18044 * Context: Can sleep. 18045 */ 18046 18047 static int 18048 sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag) 18049 { 18050 union scsi_cdb cdb; 18051 struct uscsi_cmd ucmd_buf; 18052 struct scsi_extended_sense sense_buf; 18053 int status; 18054 18055 ASSERT(un != NULL); 18056 ASSERT(!mutex_owned(SD_MUTEX(un))); 18057 18058 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un); 18059 18060 /* already determined doorlock is not supported, fake success */ 18061 if (un->un_f_doorlock_supported == FALSE) { 18062 return (0); 18063 } 18064 18065 /* 18066 * If we are ejecting and see an SD_REMOVAL_PREVENT 18067 * ignore the command so we can complete the eject 18068 * operation. 18069 */ 18070 if (flag == SD_REMOVAL_PREVENT) { 18071 mutex_enter(SD_MUTEX(un)); 18072 if (un->un_f_ejecting == TRUE) { 18073 mutex_exit(SD_MUTEX(un)); 18074 return (EAGAIN); 18075 } 18076 mutex_exit(SD_MUTEX(un)); 18077 } 18078 18079 bzero(&cdb, sizeof (cdb)); 18080 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18081 18082 cdb.scc_cmd = SCMD_DOORLOCK; 18083 cdb.cdb_opaque[4] = (uchar_t)flag; 18084 18085 ucmd_buf.uscsi_cdb = (char *)&cdb; 18086 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18087 ucmd_buf.uscsi_bufaddr = NULL; 18088 ucmd_buf.uscsi_buflen = 0; 18089 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18090 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 18091 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18092 ucmd_buf.uscsi_timeout = 15; 18093 18094 SD_TRACE(SD_LOG_IO, un, 18095 "sd_send_scsi_DOORLOCK: returning sd_send_scsi_cmd()\n"); 18096 18097 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18098 UIO_SYSSPACE, path_flag); 18099 18100 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) && 18101 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18102 (scsi_sense_key((uint8_t *)&sense_buf) == KEY_ILLEGAL_REQUEST)) { 18103 /* fake success and skip subsequent doorlock commands */ 18104 un->un_f_doorlock_supported = FALSE; 18105 return (0); 18106 } 18107 18108 return (status); 18109 } 18110 18111 /* 18112 * Function: sd_send_scsi_READ_CAPACITY 18113 * 18114 * Description: This routine uses the scsi READ CAPACITY command to determine 18115 * the device capacity in number of blocks and the device native 18116 * block size. If this function returns a failure, then the 18117 * values in *capp and *lbap are undefined. If the capacity 18118 * returned is 0xffffffff then the lun is too large for a 18119 * normal READ CAPACITY command and the results of a 18120 * READ CAPACITY 16 will be used instead. 18121 * 18122 * Arguments: un - ptr to soft state struct for the target 18123 * capp - ptr to unsigned 64-bit variable to receive the 18124 * capacity value from the command. 18125 * lbap - ptr to unsigned 32-bit varaible to receive the 18126 * block size value from the command 18127 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18128 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18129 * to use the USCSI "direct" chain and bypass the normal 18130 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 18131 * command is issued as part of an error recovery action. 18132 * 18133 * Return Code: 0 - Success 18134 * EIO - IO error 18135 * EACCES - Reservation conflict detected 18136 * EAGAIN - Device is becoming ready 18137 * errno return code from sd_send_scsi_cmd() 18138 * 18139 * Context: Can sleep. Blocks until command completes. 18140 */ 18141 18142 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity) 18143 18144 static int 18145 sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, uint32_t *lbap, 18146 int path_flag) 18147 { 18148 struct scsi_extended_sense sense_buf; 18149 struct uscsi_cmd ucmd_buf; 18150 union scsi_cdb cdb; 18151 uint32_t *capacity_buf; 18152 uint64_t capacity; 18153 uint32_t lbasize; 18154 int status; 18155 18156 ASSERT(un != NULL); 18157 ASSERT(!mutex_owned(SD_MUTEX(un))); 18158 ASSERT(capp != NULL); 18159 ASSERT(lbap != NULL); 18160 18161 SD_TRACE(SD_LOG_IO, un, 18162 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 18163 18164 /* 18165 * First send a READ_CAPACITY command to the target. 18166 * (This command is mandatory under SCSI-2.) 18167 * 18168 * Set up the CDB for the READ_CAPACITY command. The Partial 18169 * Medium Indicator bit is cleared. The address field must be 18170 * zero if the PMI bit is zero. 18171 */ 18172 bzero(&cdb, sizeof (cdb)); 18173 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18174 18175 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP); 18176 18177 cdb.scc_cmd = SCMD_READ_CAPACITY; 18178 18179 ucmd_buf.uscsi_cdb = (char *)&cdb; 18180 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 18181 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; 18182 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; 18183 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18184 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 18185 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 18186 ucmd_buf.uscsi_timeout = 60; 18187 18188 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18189 UIO_SYSSPACE, path_flag); 18190 18191 switch (status) { 18192 case 0: 18193 /* Return failure if we did not get valid capacity data. */ 18194 if (ucmd_buf.uscsi_resid != 0) { 18195 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 18196 return (EIO); 18197 } 18198 18199 /* 18200 * Read capacity and block size from the READ CAPACITY 10 data. 18201 * This data may be adjusted later due to device specific 18202 * issues. 18203 * 18204 * According to the SCSI spec, the READ CAPACITY 10 18205 * command returns the following: 18206 * 18207 * bytes 0-3: Maximum logical block address available. 18208 * (MSB in byte:0 & LSB in byte:3) 18209 * 18210 * bytes 4-7: Block length in bytes 18211 * (MSB in byte:4 & LSB in byte:7) 18212 * 18213 */ 18214 capacity = BE_32(capacity_buf[0]); 18215 lbasize = BE_32(capacity_buf[1]); 18216 18217 /* 18218 * Done with capacity_buf 18219 */ 18220 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 18221 18222 /* 18223 * if the reported capacity is set to all 0xf's, then 18224 * this disk is too large and requires SBC-2 commands. 18225 * Reissue the request using READ CAPACITY 16. 18226 */ 18227 if (capacity == 0xffffffff) { 18228 status = sd_send_scsi_READ_CAPACITY_16(un, &capacity, 18229 &lbasize, path_flag); 18230 if (status != 0) { 18231 return (status); 18232 } 18233 } 18234 break; /* Success! */ 18235 case EIO: 18236 switch (ucmd_buf.uscsi_status) { 18237 case STATUS_RESERVATION_CONFLICT: 18238 status = EACCES; 18239 break; 18240 case STATUS_CHECK: 18241 /* 18242 * Check condition; look for ASC/ASCQ of 0x04/0x01 18243 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 18244 */ 18245 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18246 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 18247 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 18248 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 18249 return (EAGAIN); 18250 } 18251 break; 18252 default: 18253 break; 18254 } 18255 /* FALLTHRU */ 18256 default: 18257 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 18258 return (status); 18259 } 18260 18261 /* 18262 * Some ATAPI CD-ROM drives report inaccurate LBA size values 18263 * (2352 and 0 are common) so for these devices always force the value 18264 * to 2048 as required by the ATAPI specs. 18265 */ 18266 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 18267 lbasize = 2048; 18268 } 18269 18270 /* 18271 * Get the maximum LBA value from the READ CAPACITY data. 18272 * Here we assume that the Partial Medium Indicator (PMI) bit 18273 * was cleared when issuing the command. This means that the LBA 18274 * returned from the device is the LBA of the last logical block 18275 * on the logical unit. The actual logical block count will be 18276 * this value plus one. 18277 * 18278 * Currently the capacity is saved in terms of un->un_sys_blocksize, 18279 * so scale the capacity value to reflect this. 18280 */ 18281 capacity = (capacity + 1) * (lbasize / un->un_sys_blocksize); 18282 18283 /* 18284 * Copy the values from the READ CAPACITY command into the space 18285 * provided by the caller. 18286 */ 18287 *capp = capacity; 18288 *lbap = lbasize; 18289 18290 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: " 18291 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 18292 18293 /* 18294 * Both the lbasize and capacity from the device must be nonzero, 18295 * otherwise we assume that the values are not valid and return 18296 * failure to the caller. (4203735) 18297 */ 18298 if ((capacity == 0) || (lbasize == 0)) { 18299 return (EIO); 18300 } 18301 18302 return (0); 18303 } 18304 18305 /* 18306 * Function: sd_send_scsi_READ_CAPACITY_16 18307 * 18308 * Description: This routine uses the scsi READ CAPACITY 16 command to 18309 * determine the device capacity in number of blocks and the 18310 * device native block size. If this function returns a failure, 18311 * then the values in *capp and *lbap are undefined. 18312 * This routine should always be called by 18313 * sd_send_scsi_READ_CAPACITY which will appy any device 18314 * specific adjustments to capacity and lbasize. 18315 * 18316 * Arguments: un - ptr to soft state struct for the target 18317 * capp - ptr to unsigned 64-bit variable to receive the 18318 * capacity value from the command. 18319 * lbap - ptr to unsigned 32-bit varaible to receive the 18320 * block size value from the command 18321 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18322 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18323 * to use the USCSI "direct" chain and bypass the normal 18324 * command waitq. SD_PATH_DIRECT_PRIORITY is used when 18325 * this command is issued as part of an error recovery 18326 * action. 18327 * 18328 * Return Code: 0 - Success 18329 * EIO - IO error 18330 * EACCES - Reservation conflict detected 18331 * EAGAIN - Device is becoming ready 18332 * errno return code from sd_send_scsi_cmd() 18333 * 18334 * Context: Can sleep. Blocks until command completes. 18335 */ 18336 18337 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16) 18338 18339 static int 18340 sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 18341 uint32_t *lbap, int path_flag) 18342 { 18343 struct scsi_extended_sense sense_buf; 18344 struct uscsi_cmd ucmd_buf; 18345 union scsi_cdb cdb; 18346 uint64_t *capacity16_buf; 18347 uint64_t capacity; 18348 uint32_t lbasize; 18349 int status; 18350 18351 ASSERT(un != NULL); 18352 ASSERT(!mutex_owned(SD_MUTEX(un))); 18353 ASSERT(capp != NULL); 18354 ASSERT(lbap != NULL); 18355 18356 SD_TRACE(SD_LOG_IO, un, 18357 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 18358 18359 /* 18360 * First send a READ_CAPACITY_16 command to the target. 18361 * 18362 * Set up the CDB for the READ_CAPACITY_16 command. The Partial 18363 * Medium Indicator bit is cleared. The address field must be 18364 * zero if the PMI bit is zero. 18365 */ 18366 bzero(&cdb, sizeof (cdb)); 18367 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18368 18369 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP); 18370 18371 ucmd_buf.uscsi_cdb = (char *)&cdb; 18372 ucmd_buf.uscsi_cdblen = CDB_GROUP4; 18373 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; 18374 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; 18375 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18376 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 18377 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 18378 ucmd_buf.uscsi_timeout = 60; 18379 18380 /* 18381 * Read Capacity (16) is a Service Action In command. One 18382 * command byte (0x9E) is overloaded for multiple operations, 18383 * with the second CDB byte specifying the desired operation 18384 */ 18385 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4; 18386 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4; 18387 18388 /* 18389 * Fill in allocation length field 18390 */ 18391 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen); 18392 18393 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18394 UIO_SYSSPACE, path_flag); 18395 18396 switch (status) { 18397 case 0: 18398 /* Return failure if we did not get valid capacity data. */ 18399 if (ucmd_buf.uscsi_resid > 20) { 18400 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 18401 return (EIO); 18402 } 18403 18404 /* 18405 * Read capacity and block size from the READ CAPACITY 10 data. 18406 * This data may be adjusted later due to device specific 18407 * issues. 18408 * 18409 * According to the SCSI spec, the READ CAPACITY 10 18410 * command returns the following: 18411 * 18412 * bytes 0-7: Maximum logical block address available. 18413 * (MSB in byte:0 & LSB in byte:7) 18414 * 18415 * bytes 8-11: Block length in bytes 18416 * (MSB in byte:8 & LSB in byte:11) 18417 * 18418 */ 18419 capacity = BE_64(capacity16_buf[0]); 18420 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]); 18421 18422 /* 18423 * Done with capacity16_buf 18424 */ 18425 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 18426 18427 /* 18428 * if the reported capacity is set to all 0xf's, then 18429 * this disk is too large. This could only happen with 18430 * a device that supports LBAs larger than 64 bits which 18431 * are not defined by any current T10 standards. 18432 */ 18433 if (capacity == 0xffffffffffffffff) { 18434 return (EIO); 18435 } 18436 break; /* Success! */ 18437 case EIO: 18438 switch (ucmd_buf.uscsi_status) { 18439 case STATUS_RESERVATION_CONFLICT: 18440 status = EACCES; 18441 break; 18442 case STATUS_CHECK: 18443 /* 18444 * Check condition; look for ASC/ASCQ of 0x04/0x01 18445 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 18446 */ 18447 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18448 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 18449 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 18450 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 18451 return (EAGAIN); 18452 } 18453 break; 18454 default: 18455 break; 18456 } 18457 /* FALLTHRU */ 18458 default: 18459 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 18460 return (status); 18461 } 18462 18463 *capp = capacity; 18464 *lbap = lbasize; 18465 18466 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: " 18467 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 18468 18469 return (0); 18470 } 18471 18472 18473 /* 18474 * Function: sd_send_scsi_START_STOP_UNIT 18475 * 18476 * Description: Issue a scsi START STOP UNIT command to the target. 18477 * 18478 * Arguments: un - pointer to driver soft state (unit) structure for 18479 * this target. 18480 * flag - SD_TARGET_START 18481 * SD_TARGET_STOP 18482 * SD_TARGET_EJECT 18483 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18484 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18485 * to use the USCSI "direct" chain and bypass the normal 18486 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 18487 * command is issued as part of an error recovery action. 18488 * 18489 * Return Code: 0 - Success 18490 * EIO - IO error 18491 * EACCES - Reservation conflict detected 18492 * ENXIO - Not Ready, medium not present 18493 * errno return code from sd_send_scsi_cmd() 18494 * 18495 * Context: Can sleep. 18496 */ 18497 18498 static int 18499 sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, int path_flag) 18500 { 18501 struct scsi_extended_sense sense_buf; 18502 union scsi_cdb cdb; 18503 struct uscsi_cmd ucmd_buf; 18504 int status; 18505 18506 ASSERT(un != NULL); 18507 ASSERT(!mutex_owned(SD_MUTEX(un))); 18508 18509 SD_TRACE(SD_LOG_IO, un, 18510 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un); 18511 18512 if (un->un_f_check_start_stop && 18513 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) && 18514 (un->un_f_start_stop_supported != TRUE)) { 18515 return (0); 18516 } 18517 18518 /* 18519 * If we are performing an eject operation and 18520 * we receive any command other than SD_TARGET_EJECT 18521 * we should immediately return. 18522 */ 18523 if (flag != SD_TARGET_EJECT) { 18524 mutex_enter(SD_MUTEX(un)); 18525 if (un->un_f_ejecting == TRUE) { 18526 mutex_exit(SD_MUTEX(un)); 18527 return (EAGAIN); 18528 } 18529 mutex_exit(SD_MUTEX(un)); 18530 } 18531 18532 bzero(&cdb, sizeof (cdb)); 18533 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18534 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18535 18536 cdb.scc_cmd = SCMD_START_STOP; 18537 cdb.cdb_opaque[4] = (uchar_t)flag; 18538 18539 ucmd_buf.uscsi_cdb = (char *)&cdb; 18540 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18541 ucmd_buf.uscsi_bufaddr = NULL; 18542 ucmd_buf.uscsi_buflen = 0; 18543 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18544 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18545 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18546 ucmd_buf.uscsi_timeout = 200; 18547 18548 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18549 UIO_SYSSPACE, path_flag); 18550 18551 switch (status) { 18552 case 0: 18553 break; /* Success! */ 18554 case EIO: 18555 switch (ucmd_buf.uscsi_status) { 18556 case STATUS_RESERVATION_CONFLICT: 18557 status = EACCES; 18558 break; 18559 case STATUS_CHECK: 18560 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) { 18561 switch (scsi_sense_key( 18562 (uint8_t *)&sense_buf)) { 18563 case KEY_ILLEGAL_REQUEST: 18564 status = ENOTSUP; 18565 break; 18566 case KEY_NOT_READY: 18567 if (scsi_sense_asc( 18568 (uint8_t *)&sense_buf) 18569 == 0x3A) { 18570 status = ENXIO; 18571 } 18572 break; 18573 default: 18574 break; 18575 } 18576 } 18577 break; 18578 default: 18579 break; 18580 } 18581 break; 18582 default: 18583 break; 18584 } 18585 18586 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n"); 18587 18588 return (status); 18589 } 18590 18591 18592 /* 18593 * Function: sd_start_stop_unit_callback 18594 * 18595 * Description: timeout(9F) callback to begin recovery process for a 18596 * device that has spun down. 18597 * 18598 * Arguments: arg - pointer to associated softstate struct. 18599 * 18600 * Context: Executes in a timeout(9F) thread context 18601 */ 18602 18603 static void 18604 sd_start_stop_unit_callback(void *arg) 18605 { 18606 struct sd_lun *un = arg; 18607 ASSERT(un != NULL); 18608 ASSERT(!mutex_owned(SD_MUTEX(un))); 18609 18610 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n"); 18611 18612 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP); 18613 } 18614 18615 18616 /* 18617 * Function: sd_start_stop_unit_task 18618 * 18619 * Description: Recovery procedure when a drive is spun down. 18620 * 18621 * Arguments: arg - pointer to associated softstate struct. 18622 * 18623 * Context: Executes in a taskq() thread context 18624 */ 18625 18626 static void 18627 sd_start_stop_unit_task(void *arg) 18628 { 18629 struct sd_lun *un = arg; 18630 18631 ASSERT(un != NULL); 18632 ASSERT(!mutex_owned(SD_MUTEX(un))); 18633 18634 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n"); 18635 18636 /* 18637 * Some unformatted drives report not ready error, no need to 18638 * restart if format has been initiated. 18639 */ 18640 mutex_enter(SD_MUTEX(un)); 18641 if (un->un_f_format_in_progress == TRUE) { 18642 mutex_exit(SD_MUTEX(un)); 18643 return; 18644 } 18645 mutex_exit(SD_MUTEX(un)); 18646 18647 /* 18648 * When a START STOP command is issued from here, it is part of a 18649 * failure recovery operation and must be issued before any other 18650 * commands, including any pending retries. Thus it must be sent 18651 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up 18652 * succeeds or not, we will start I/O after the attempt. 18653 */ 18654 (void) sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 18655 SD_PATH_DIRECT_PRIORITY); 18656 18657 /* 18658 * The above call blocks until the START_STOP_UNIT command completes. 18659 * Now that it has completed, we must re-try the original IO that 18660 * received the NOT READY condition in the first place. There are 18661 * three possible conditions here: 18662 * 18663 * (1) The original IO is on un_retry_bp. 18664 * (2) The original IO is on the regular wait queue, and un_retry_bp 18665 * is NULL. 18666 * (3) The original IO is on the regular wait queue, and un_retry_bp 18667 * points to some other, unrelated bp. 18668 * 18669 * For each case, we must call sd_start_cmds() with un_retry_bp 18670 * as the argument. If un_retry_bp is NULL, this will initiate 18671 * processing of the regular wait queue. If un_retry_bp is not NULL, 18672 * then this will process the bp on un_retry_bp. That may or may not 18673 * be the original IO, but that does not matter: the important thing 18674 * is to keep the IO processing going at this point. 18675 * 18676 * Note: This is a very specific error recovery sequence associated 18677 * with a drive that is not spun up. We attempt a START_STOP_UNIT and 18678 * serialize the I/O with completion of the spin-up. 18679 */ 18680 mutex_enter(SD_MUTEX(un)); 18681 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18682 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n", 18683 un, un->un_retry_bp); 18684 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */ 18685 sd_start_cmds(un, un->un_retry_bp); 18686 mutex_exit(SD_MUTEX(un)); 18687 18688 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n"); 18689 } 18690 18691 18692 /* 18693 * Function: sd_send_scsi_INQUIRY 18694 * 18695 * Description: Issue the scsi INQUIRY command. 18696 * 18697 * Arguments: un 18698 * bufaddr 18699 * buflen 18700 * evpd 18701 * page_code 18702 * page_length 18703 * 18704 * Return Code: 0 - Success 18705 * errno return code from sd_send_scsi_cmd() 18706 * 18707 * Context: Can sleep. Does not return until command is completed. 18708 */ 18709 18710 static int 18711 sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, size_t buflen, 18712 uchar_t evpd, uchar_t page_code, size_t *residp) 18713 { 18714 union scsi_cdb cdb; 18715 struct uscsi_cmd ucmd_buf; 18716 int status; 18717 18718 ASSERT(un != NULL); 18719 ASSERT(!mutex_owned(SD_MUTEX(un))); 18720 ASSERT(bufaddr != NULL); 18721 18722 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un); 18723 18724 bzero(&cdb, sizeof (cdb)); 18725 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18726 bzero(bufaddr, buflen); 18727 18728 cdb.scc_cmd = SCMD_INQUIRY; 18729 cdb.cdb_opaque[1] = evpd; 18730 cdb.cdb_opaque[2] = page_code; 18731 FORMG0COUNT(&cdb, buflen); 18732 18733 ucmd_buf.uscsi_cdb = (char *)&cdb; 18734 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18735 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 18736 ucmd_buf.uscsi_buflen = buflen; 18737 ucmd_buf.uscsi_rqbuf = NULL; 18738 ucmd_buf.uscsi_rqlen = 0; 18739 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 18740 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */ 18741 18742 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18743 UIO_SYSSPACE, SD_PATH_DIRECT); 18744 18745 if ((status == 0) && (residp != NULL)) { 18746 *residp = ucmd_buf.uscsi_resid; 18747 } 18748 18749 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n"); 18750 18751 return (status); 18752 } 18753 18754 18755 /* 18756 * Function: sd_send_scsi_TEST_UNIT_READY 18757 * 18758 * Description: Issue the scsi TEST UNIT READY command. 18759 * This routine can be told to set the flag USCSI_DIAGNOSE to 18760 * prevent retrying failed commands. Use this when the intent 18761 * is either to check for device readiness, to clear a Unit 18762 * Attention, or to clear any outstanding sense data. 18763 * However under specific conditions the expected behavior 18764 * is for retries to bring a device ready, so use the flag 18765 * with caution. 18766 * 18767 * Arguments: un 18768 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present 18769 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE. 18770 * 0: dont check for media present, do retries on cmd. 18771 * 18772 * Return Code: 0 - Success 18773 * EIO - IO error 18774 * EACCES - Reservation conflict detected 18775 * ENXIO - Not Ready, medium not present 18776 * errno return code from sd_send_scsi_cmd() 18777 * 18778 * Context: Can sleep. Does not return until command is completed. 18779 */ 18780 18781 static int 18782 sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag) 18783 { 18784 struct scsi_extended_sense sense_buf; 18785 union scsi_cdb cdb; 18786 struct uscsi_cmd ucmd_buf; 18787 int status; 18788 18789 ASSERT(un != NULL); 18790 ASSERT(!mutex_owned(SD_MUTEX(un))); 18791 18792 SD_TRACE(SD_LOG_IO, un, 18793 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un); 18794 18795 /* 18796 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect 18797 * timeouts when they receive a TUR and the queue is not empty. Check 18798 * the configuration flag set during attach (indicating the drive has 18799 * this firmware bug) and un_ncmds_in_transport before issuing the 18800 * TUR. If there are 18801 * pending commands return success, this is a bit arbitrary but is ok 18802 * for non-removables (i.e. the eliteI disks) and non-clustering 18803 * configurations. 18804 */ 18805 if (un->un_f_cfg_tur_check == TRUE) { 18806 mutex_enter(SD_MUTEX(un)); 18807 if (un->un_ncmds_in_transport != 0) { 18808 mutex_exit(SD_MUTEX(un)); 18809 return (0); 18810 } 18811 mutex_exit(SD_MUTEX(un)); 18812 } 18813 18814 bzero(&cdb, sizeof (cdb)); 18815 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18816 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18817 18818 cdb.scc_cmd = SCMD_TEST_UNIT_READY; 18819 18820 ucmd_buf.uscsi_cdb = (char *)&cdb; 18821 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18822 ucmd_buf.uscsi_bufaddr = NULL; 18823 ucmd_buf.uscsi_buflen = 0; 18824 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18825 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18826 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18827 18828 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ 18829 if ((flag & SD_DONT_RETRY_TUR) != 0) { 18830 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; 18831 } 18832 ucmd_buf.uscsi_timeout = 60; 18833 18834 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18835 UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : 18836 SD_PATH_STANDARD)); 18837 18838 switch (status) { 18839 case 0: 18840 break; /* Success! */ 18841 case EIO: 18842 switch (ucmd_buf.uscsi_status) { 18843 case STATUS_RESERVATION_CONFLICT: 18844 status = EACCES; 18845 break; 18846 case STATUS_CHECK: 18847 if ((flag & SD_CHECK_FOR_MEDIA) == 0) { 18848 break; 18849 } 18850 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18851 (scsi_sense_key((uint8_t *)&sense_buf) == 18852 KEY_NOT_READY) && 18853 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x3A)) { 18854 status = ENXIO; 18855 } 18856 break; 18857 default: 18858 break; 18859 } 18860 break; 18861 default: 18862 break; 18863 } 18864 18865 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n"); 18866 18867 return (status); 18868 } 18869 18870 18871 /* 18872 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN 18873 * 18874 * Description: Issue the scsi PERSISTENT RESERVE IN command. 18875 * 18876 * Arguments: un 18877 * 18878 * Return Code: 0 - Success 18879 * EACCES 18880 * ENOTSUP 18881 * errno return code from sd_send_scsi_cmd() 18882 * 18883 * Context: Can sleep. Does not return until command is completed. 18884 */ 18885 18886 static int 18887 sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, uchar_t usr_cmd, 18888 uint16_t data_len, uchar_t *data_bufp) 18889 { 18890 struct scsi_extended_sense sense_buf; 18891 union scsi_cdb cdb; 18892 struct uscsi_cmd ucmd_buf; 18893 int status; 18894 int no_caller_buf = FALSE; 18895 18896 ASSERT(un != NULL); 18897 ASSERT(!mutex_owned(SD_MUTEX(un))); 18898 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV)); 18899 18900 SD_TRACE(SD_LOG_IO, un, 18901 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un); 18902 18903 bzero(&cdb, sizeof (cdb)); 18904 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18905 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 18906 if (data_bufp == NULL) { 18907 /* Allocate a default buf if the caller did not give one */ 18908 ASSERT(data_len == 0); 18909 data_len = MHIOC_RESV_KEY_SIZE; 18910 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP); 18911 no_caller_buf = TRUE; 18912 } 18913 18914 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN; 18915 cdb.cdb_opaque[1] = usr_cmd; 18916 FORMG1COUNT(&cdb, data_len); 18917 18918 ucmd_buf.uscsi_cdb = (char *)&cdb; 18919 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 18920 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; 18921 ucmd_buf.uscsi_buflen = data_len; 18922 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18923 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 18924 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 18925 ucmd_buf.uscsi_timeout = 60; 18926 18927 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 18928 UIO_SYSSPACE, SD_PATH_STANDARD); 18929 18930 switch (status) { 18931 case 0: 18932 break; /* Success! */ 18933 case EIO: 18934 switch (ucmd_buf.uscsi_status) { 18935 case STATUS_RESERVATION_CONFLICT: 18936 status = EACCES; 18937 break; 18938 case STATUS_CHECK: 18939 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18940 (scsi_sense_key((uint8_t *)&sense_buf) == 18941 KEY_ILLEGAL_REQUEST)) { 18942 status = ENOTSUP; 18943 } 18944 break; 18945 default: 18946 break; 18947 } 18948 break; 18949 default: 18950 break; 18951 } 18952 18953 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n"); 18954 18955 if (no_caller_buf == TRUE) { 18956 kmem_free(data_bufp, data_len); 18957 } 18958 18959 return (status); 18960 } 18961 18962 18963 /* 18964 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT 18965 * 18966 * Description: This routine is the driver entry point for handling CD-ROM 18967 * multi-host persistent reservation requests (MHIOCGRP_INKEYS, 18968 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the 18969 * device. 18970 * 18971 * Arguments: un - Pointer to soft state struct for the target. 18972 * usr_cmd SCSI-3 reservation facility command (one of 18973 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE, 18974 * SD_SCSI3_PREEMPTANDABORT) 18975 * usr_bufp - user provided pointer register, reserve descriptor or 18976 * preempt and abort structure (mhioc_register_t, 18977 * mhioc_resv_desc_t, mhioc_preemptandabort_t) 18978 * 18979 * Return Code: 0 - Success 18980 * EACCES 18981 * ENOTSUP 18982 * errno return code from sd_send_scsi_cmd() 18983 * 18984 * Context: Can sleep. Does not return until command is completed. 18985 */ 18986 18987 static int 18988 sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, uchar_t usr_cmd, 18989 uchar_t *usr_bufp) 18990 { 18991 struct scsi_extended_sense sense_buf; 18992 union scsi_cdb cdb; 18993 struct uscsi_cmd ucmd_buf; 18994 int status; 18995 uchar_t data_len = sizeof (sd_prout_t); 18996 sd_prout_t *prp; 18997 18998 ASSERT(un != NULL); 18999 ASSERT(!mutex_owned(SD_MUTEX(un))); 19000 ASSERT(data_len == 24); /* required by scsi spec */ 19001 19002 SD_TRACE(SD_LOG_IO, un, 19003 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un); 19004 19005 if (usr_bufp == NULL) { 19006 return (EINVAL); 19007 } 19008 19009 bzero(&cdb, sizeof (cdb)); 19010 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19011 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19012 prp = kmem_zalloc(data_len, KM_SLEEP); 19013 19014 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT; 19015 cdb.cdb_opaque[1] = usr_cmd; 19016 FORMG1COUNT(&cdb, data_len); 19017 19018 ucmd_buf.uscsi_cdb = (char *)&cdb; 19019 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19020 ucmd_buf.uscsi_bufaddr = (caddr_t)prp; 19021 ucmd_buf.uscsi_buflen = data_len; 19022 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19023 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19024 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 19025 ucmd_buf.uscsi_timeout = 60; 19026 19027 switch (usr_cmd) { 19028 case SD_SCSI3_REGISTER: { 19029 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp; 19030 19031 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 19032 bcopy(ptr->newkey.key, prp->service_key, 19033 MHIOC_RESV_KEY_SIZE); 19034 prp->aptpl = ptr->aptpl; 19035 break; 19036 } 19037 case SD_SCSI3_RESERVE: 19038 case SD_SCSI3_RELEASE: { 19039 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 19040 19041 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 19042 prp->scope_address = BE_32(ptr->scope_specific_addr); 19043 cdb.cdb_opaque[2] = ptr->type; 19044 break; 19045 } 19046 case SD_SCSI3_PREEMPTANDABORT: { 19047 mhioc_preemptandabort_t *ptr = 19048 (mhioc_preemptandabort_t *)usr_bufp; 19049 19050 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 19051 bcopy(ptr->victim_key.key, prp->service_key, 19052 MHIOC_RESV_KEY_SIZE); 19053 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr); 19054 cdb.cdb_opaque[2] = ptr->resvdesc.type; 19055 ucmd_buf.uscsi_flags |= USCSI_HEAD; 19056 break; 19057 } 19058 case SD_SCSI3_REGISTERANDIGNOREKEY: 19059 { 19060 mhioc_registerandignorekey_t *ptr; 19061 ptr = (mhioc_registerandignorekey_t *)usr_bufp; 19062 bcopy(ptr->newkey.key, 19063 prp->service_key, MHIOC_RESV_KEY_SIZE); 19064 prp->aptpl = ptr->aptpl; 19065 break; 19066 } 19067 default: 19068 ASSERT(FALSE); 19069 break; 19070 } 19071 19072 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19073 UIO_SYSSPACE, SD_PATH_STANDARD); 19074 19075 switch (status) { 19076 case 0: 19077 break; /* Success! */ 19078 case EIO: 19079 switch (ucmd_buf.uscsi_status) { 19080 case STATUS_RESERVATION_CONFLICT: 19081 status = EACCES; 19082 break; 19083 case STATUS_CHECK: 19084 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19085 (scsi_sense_key((uint8_t *)&sense_buf) == 19086 KEY_ILLEGAL_REQUEST)) { 19087 status = ENOTSUP; 19088 } 19089 break; 19090 default: 19091 break; 19092 } 19093 break; 19094 default: 19095 break; 19096 } 19097 19098 kmem_free(prp, data_len); 19099 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n"); 19100 return (status); 19101 } 19102 19103 19104 /* 19105 * Function: sd_send_scsi_SYNCHRONIZE_CACHE 19106 * 19107 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target 19108 * 19109 * Arguments: un - pointer to the target's soft state struct 19110 * dkc - pointer to the callback structure 19111 * 19112 * Return Code: 0 - success 19113 * errno-type error code 19114 * 19115 * Context: kernel thread context only. 19116 * 19117 * _______________________________________________________________ 19118 * | dkc_flag & | dkc_callback | DKIOCFLUSHWRITECACHE | 19119 * |FLUSH_VOLATILE| | operation | 19120 * |______________|______________|_________________________________| 19121 * | 0 | NULL | Synchronous flush on both | 19122 * | | | volatile and non-volatile cache | 19123 * |______________|______________|_________________________________| 19124 * | 1 | NULL | Synchronous flush on volatile | 19125 * | | | cache; disk drivers may suppress| 19126 * | | | flush if disk table indicates | 19127 * | | | non-volatile cache | 19128 * |______________|______________|_________________________________| 19129 * | 0 | !NULL | Asynchronous flush on both | 19130 * | | | volatile and non-volatile cache;| 19131 * |______________|______________|_________________________________| 19132 * | 1 | !NULL | Asynchronous flush on volatile | 19133 * | | | cache; disk drivers may suppress| 19134 * | | | flush if disk table indicates | 19135 * | | | non-volatile cache | 19136 * |______________|______________|_________________________________| 19137 * 19138 */ 19139 19140 static int 19141 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc) 19142 { 19143 struct sd_uscsi_info *uip; 19144 struct uscsi_cmd *uscmd; 19145 union scsi_cdb *cdb; 19146 struct buf *bp; 19147 int rval = 0; 19148 int is_async; 19149 19150 SD_TRACE(SD_LOG_IO, un, 19151 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un); 19152 19153 ASSERT(un != NULL); 19154 ASSERT(!mutex_owned(SD_MUTEX(un))); 19155 19156 if (dkc == NULL || dkc->dkc_callback == NULL) { 19157 is_async = FALSE; 19158 } else { 19159 is_async = TRUE; 19160 } 19161 19162 mutex_enter(SD_MUTEX(un)); 19163 /* check whether cache flush should be suppressed */ 19164 if (un->un_f_suppress_cache_flush == TRUE) { 19165 mutex_exit(SD_MUTEX(un)); 19166 /* 19167 * suppress the cache flush if the device is told to do 19168 * so by sd.conf or disk table 19169 */ 19170 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_SYNCHRONIZE_CACHE: \ 19171 skip the cache flush since suppress_cache_flush is %d!\n", 19172 un->un_f_suppress_cache_flush); 19173 19174 if (is_async == TRUE) { 19175 /* invoke callback for asynchronous flush */ 19176 (*dkc->dkc_callback)(dkc->dkc_cookie, 0); 19177 } 19178 return (rval); 19179 } 19180 mutex_exit(SD_MUTEX(un)); 19181 19182 /* 19183 * check dkc_flag & FLUSH_VOLATILE so SYNC_NV bit can be 19184 * set properly 19185 */ 19186 cdb = kmem_zalloc(CDB_GROUP1, KM_SLEEP); 19187 cdb->scc_cmd = SCMD_SYNCHRONIZE_CACHE; 19188 19189 mutex_enter(SD_MUTEX(un)); 19190 if (dkc != NULL && un->un_f_sync_nv_supported && 19191 (dkc->dkc_flag & FLUSH_VOLATILE)) { 19192 /* 19193 * if the device supports SYNC_NV bit, turn on 19194 * the SYNC_NV bit to only flush volatile cache 19195 */ 19196 cdb->cdb_un.tag |= SD_SYNC_NV_BIT; 19197 } 19198 mutex_exit(SD_MUTEX(un)); 19199 19200 /* 19201 * First get some memory for the uscsi_cmd struct and cdb 19202 * and initialize for SYNCHRONIZE_CACHE cmd. 19203 */ 19204 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 19205 uscmd->uscsi_cdblen = CDB_GROUP1; 19206 uscmd->uscsi_cdb = (caddr_t)cdb; 19207 uscmd->uscsi_bufaddr = NULL; 19208 uscmd->uscsi_buflen = 0; 19209 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 19210 uscmd->uscsi_rqlen = SENSE_LENGTH; 19211 uscmd->uscsi_rqresid = SENSE_LENGTH; 19212 uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19213 uscmd->uscsi_timeout = sd_io_time; 19214 19215 /* 19216 * Allocate an sd_uscsi_info struct and fill it with the info 19217 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 19218 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 19219 * since we allocate the buf here in this function, we do not 19220 * need to preserve the prior contents of b_private. 19221 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 19222 */ 19223 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 19224 uip->ui_flags = SD_PATH_DIRECT; 19225 uip->ui_cmdp = uscmd; 19226 19227 bp = getrbuf(KM_SLEEP); 19228 bp->b_private = uip; 19229 19230 /* 19231 * Setup buffer to carry uscsi request. 19232 */ 19233 bp->b_flags = B_BUSY; 19234 bp->b_bcount = 0; 19235 bp->b_blkno = 0; 19236 19237 if (is_async == TRUE) { 19238 bp->b_iodone = sd_send_scsi_SYNCHRONIZE_CACHE_biodone; 19239 uip->ui_dkc = *dkc; 19240 } 19241 19242 bp->b_edev = SD_GET_DEV(un); 19243 bp->b_dev = cmpdev(bp->b_edev); /* maybe unnecessary? */ 19244 19245 (void) sd_uscsi_strategy(bp); 19246 19247 /* 19248 * If synchronous request, wait for completion 19249 * If async just return and let b_iodone callback 19250 * cleanup. 19251 * NOTE: On return, u_ncmds_in_driver will be decremented, 19252 * but it was also incremented in sd_uscsi_strategy(), so 19253 * we should be ok. 19254 */ 19255 if (is_async == FALSE) { 19256 (void) biowait(bp); 19257 rval = sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp); 19258 } 19259 19260 return (rval); 19261 } 19262 19263 19264 static int 19265 sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp) 19266 { 19267 struct sd_uscsi_info *uip; 19268 struct uscsi_cmd *uscmd; 19269 uint8_t *sense_buf; 19270 struct sd_lun *un; 19271 int status; 19272 union scsi_cdb *cdb; 19273 19274 uip = (struct sd_uscsi_info *)(bp->b_private); 19275 ASSERT(uip != NULL); 19276 19277 uscmd = uip->ui_cmdp; 19278 ASSERT(uscmd != NULL); 19279 19280 sense_buf = (uint8_t *)uscmd->uscsi_rqbuf; 19281 ASSERT(sense_buf != NULL); 19282 19283 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 19284 ASSERT(un != NULL); 19285 19286 cdb = (union scsi_cdb *)uscmd->uscsi_cdb; 19287 19288 status = geterror(bp); 19289 switch (status) { 19290 case 0: 19291 break; /* Success! */ 19292 case EIO: 19293 switch (uscmd->uscsi_status) { 19294 case STATUS_RESERVATION_CONFLICT: 19295 /* Ignore reservation conflict */ 19296 status = 0; 19297 goto done; 19298 19299 case STATUS_CHECK: 19300 if ((uscmd->uscsi_rqstatus == STATUS_GOOD) && 19301 (scsi_sense_key(sense_buf) == 19302 KEY_ILLEGAL_REQUEST)) { 19303 /* Ignore Illegal Request error */ 19304 if (cdb->cdb_un.tag&SD_SYNC_NV_BIT) { 19305 mutex_enter(SD_MUTEX(un)); 19306 un->un_f_sync_nv_supported = FALSE; 19307 mutex_exit(SD_MUTEX(un)); 19308 status = 0; 19309 SD_TRACE(SD_LOG_IO, un, 19310 "un_f_sync_nv_supported \ 19311 is set to false.\n"); 19312 goto done; 19313 } 19314 19315 mutex_enter(SD_MUTEX(un)); 19316 un->un_f_sync_cache_supported = FALSE; 19317 mutex_exit(SD_MUTEX(un)); 19318 SD_TRACE(SD_LOG_IO, un, 19319 "sd_send_scsi_SYNCHRONIZE_CACHE_biodone: \ 19320 un_f_sync_cache_supported set to false \ 19321 with asc = %x, ascq = %x\n", 19322 scsi_sense_asc(sense_buf), 19323 scsi_sense_ascq(sense_buf)); 19324 status = ENOTSUP; 19325 goto done; 19326 } 19327 break; 19328 default: 19329 break; 19330 } 19331 /* FALLTHRU */ 19332 default: 19333 /* 19334 * Don't log an error message if this device 19335 * has removable media. 19336 */ 19337 if (!un->un_f_has_removable_media) { 19338 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 19339 "SYNCHRONIZE CACHE command failed (%d)\n", status); 19340 } 19341 break; 19342 } 19343 19344 done: 19345 if (uip->ui_dkc.dkc_callback != NULL) { 19346 (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status); 19347 } 19348 19349 ASSERT((bp->b_flags & B_REMAPPED) == 0); 19350 freerbuf(bp); 19351 kmem_free(uip, sizeof (struct sd_uscsi_info)); 19352 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); 19353 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen); 19354 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 19355 19356 return (status); 19357 } 19358 19359 19360 /* 19361 * Function: sd_send_scsi_GET_CONFIGURATION 19362 * 19363 * Description: Issues the get configuration command to the device. 19364 * Called from sd_check_for_writable_cd & sd_get_media_info 19365 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN 19366 * Arguments: un 19367 * ucmdbuf 19368 * rqbuf 19369 * rqbuflen 19370 * bufaddr 19371 * buflen 19372 * path_flag 19373 * 19374 * Return Code: 0 - Success 19375 * errno return code from sd_send_scsi_cmd() 19376 * 19377 * Context: Can sleep. Does not return until command is completed. 19378 * 19379 */ 19380 19381 static int 19382 sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, struct uscsi_cmd *ucmdbuf, 19383 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen, 19384 int path_flag) 19385 { 19386 char cdb[CDB_GROUP1]; 19387 int status; 19388 19389 ASSERT(un != NULL); 19390 ASSERT(!mutex_owned(SD_MUTEX(un))); 19391 ASSERT(bufaddr != NULL); 19392 ASSERT(ucmdbuf != NULL); 19393 ASSERT(rqbuf != NULL); 19394 19395 SD_TRACE(SD_LOG_IO, un, 19396 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un); 19397 19398 bzero(cdb, sizeof (cdb)); 19399 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 19400 bzero(rqbuf, rqbuflen); 19401 bzero(bufaddr, buflen); 19402 19403 /* 19404 * Set up cdb field for the get configuration command. 19405 */ 19406 cdb[0] = SCMD_GET_CONFIGURATION; 19407 cdb[1] = 0x02; /* Requested Type */ 19408 cdb[8] = SD_PROFILE_HEADER_LEN; 19409 ucmdbuf->uscsi_cdb = cdb; 19410 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 19411 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 19412 ucmdbuf->uscsi_buflen = buflen; 19413 ucmdbuf->uscsi_timeout = sd_io_time; 19414 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 19415 ucmdbuf->uscsi_rqlen = rqbuflen; 19416 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 19417 19418 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, FKIOCTL, 19419 UIO_SYSSPACE, path_flag); 19420 19421 switch (status) { 19422 case 0: 19423 break; /* Success! */ 19424 case EIO: 19425 switch (ucmdbuf->uscsi_status) { 19426 case STATUS_RESERVATION_CONFLICT: 19427 status = EACCES; 19428 break; 19429 default: 19430 break; 19431 } 19432 break; 19433 default: 19434 break; 19435 } 19436 19437 if (status == 0) { 19438 SD_DUMP_MEMORY(un, SD_LOG_IO, 19439 "sd_send_scsi_GET_CONFIGURATION: data", 19440 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 19441 } 19442 19443 SD_TRACE(SD_LOG_IO, un, 19444 "sd_send_scsi_GET_CONFIGURATION: exit\n"); 19445 19446 return (status); 19447 } 19448 19449 /* 19450 * Function: sd_send_scsi_feature_GET_CONFIGURATION 19451 * 19452 * Description: Issues the get configuration command to the device to 19453 * retrieve a specific feature. Called from 19454 * sd_check_for_writable_cd & sd_set_mmc_caps. 19455 * Arguments: un 19456 * ucmdbuf 19457 * rqbuf 19458 * rqbuflen 19459 * bufaddr 19460 * buflen 19461 * feature 19462 * 19463 * Return Code: 0 - Success 19464 * errno return code from sd_send_scsi_cmd() 19465 * 19466 * Context: Can sleep. Does not return until command is completed. 19467 * 19468 */ 19469 static int 19470 sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 19471 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 19472 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag) 19473 { 19474 char cdb[CDB_GROUP1]; 19475 int status; 19476 19477 ASSERT(un != NULL); 19478 ASSERT(!mutex_owned(SD_MUTEX(un))); 19479 ASSERT(bufaddr != NULL); 19480 ASSERT(ucmdbuf != NULL); 19481 ASSERT(rqbuf != NULL); 19482 19483 SD_TRACE(SD_LOG_IO, un, 19484 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un); 19485 19486 bzero(cdb, sizeof (cdb)); 19487 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 19488 bzero(rqbuf, rqbuflen); 19489 bzero(bufaddr, buflen); 19490 19491 /* 19492 * Set up cdb field for the get configuration command. 19493 */ 19494 cdb[0] = SCMD_GET_CONFIGURATION; 19495 cdb[1] = 0x02; /* Requested Type */ 19496 cdb[3] = feature; 19497 cdb[8] = buflen; 19498 ucmdbuf->uscsi_cdb = cdb; 19499 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 19500 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 19501 ucmdbuf->uscsi_buflen = buflen; 19502 ucmdbuf->uscsi_timeout = sd_io_time; 19503 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 19504 ucmdbuf->uscsi_rqlen = rqbuflen; 19505 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 19506 19507 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, FKIOCTL, 19508 UIO_SYSSPACE, path_flag); 19509 19510 switch (status) { 19511 case 0: 19512 break; /* Success! */ 19513 case EIO: 19514 switch (ucmdbuf->uscsi_status) { 19515 case STATUS_RESERVATION_CONFLICT: 19516 status = EACCES; 19517 break; 19518 default: 19519 break; 19520 } 19521 break; 19522 default: 19523 break; 19524 } 19525 19526 if (status == 0) { 19527 SD_DUMP_MEMORY(un, SD_LOG_IO, 19528 "sd_send_scsi_feature_GET_CONFIGURATION: data", 19529 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 19530 } 19531 19532 SD_TRACE(SD_LOG_IO, un, 19533 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n"); 19534 19535 return (status); 19536 } 19537 19538 19539 /* 19540 * Function: sd_send_scsi_MODE_SENSE 19541 * 19542 * Description: Utility function for issuing a scsi MODE SENSE command. 19543 * Note: This routine uses a consistent implementation for Group0, 19544 * Group1, and Group2 commands across all platforms. ATAPI devices 19545 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 19546 * 19547 * Arguments: un - pointer to the softstate struct for the target. 19548 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 19549 * CDB_GROUP[1|2] (10 byte). 19550 * bufaddr - buffer for page data retrieved from the target. 19551 * buflen - size of page to be retrieved. 19552 * page_code - page code of data to be retrieved from the target. 19553 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19554 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19555 * to use the USCSI "direct" chain and bypass the normal 19556 * command waitq. 19557 * 19558 * Return Code: 0 - Success 19559 * errno return code from sd_send_scsi_cmd() 19560 * 19561 * Context: Can sleep. Does not return until command is completed. 19562 */ 19563 19564 static int 19565 sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 19566 size_t buflen, uchar_t page_code, int path_flag) 19567 { 19568 struct scsi_extended_sense sense_buf; 19569 union scsi_cdb cdb; 19570 struct uscsi_cmd ucmd_buf; 19571 int status; 19572 int headlen; 19573 19574 ASSERT(un != NULL); 19575 ASSERT(!mutex_owned(SD_MUTEX(un))); 19576 ASSERT(bufaddr != NULL); 19577 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 19578 (cdbsize == CDB_GROUP2)); 19579 19580 SD_TRACE(SD_LOG_IO, un, 19581 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un); 19582 19583 bzero(&cdb, sizeof (cdb)); 19584 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19585 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19586 bzero(bufaddr, buflen); 19587 19588 if (cdbsize == CDB_GROUP0) { 19589 cdb.scc_cmd = SCMD_MODE_SENSE; 19590 cdb.cdb_opaque[2] = page_code; 19591 FORMG0COUNT(&cdb, buflen); 19592 headlen = MODE_HEADER_LENGTH; 19593 } else { 19594 cdb.scc_cmd = SCMD_MODE_SENSE_G1; 19595 cdb.cdb_opaque[2] = page_code; 19596 FORMG1COUNT(&cdb, buflen); 19597 headlen = MODE_HEADER_LENGTH_GRP2; 19598 } 19599 19600 ASSERT(headlen <= buflen); 19601 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 19602 19603 ucmd_buf.uscsi_cdb = (char *)&cdb; 19604 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 19605 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19606 ucmd_buf.uscsi_buflen = buflen; 19607 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19608 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19609 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19610 ucmd_buf.uscsi_timeout = 60; 19611 19612 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19613 UIO_SYSSPACE, path_flag); 19614 19615 switch (status) { 19616 case 0: 19617 /* 19618 * sr_check_wp() uses 0x3f page code and check the header of 19619 * mode page to determine if target device is write-protected. 19620 * But some USB devices return 0 bytes for 0x3f page code. For 19621 * this case, make sure that mode page header is returned at 19622 * least. 19623 */ 19624 if (buflen - ucmd_buf.uscsi_resid < headlen) 19625 status = EIO; 19626 break; /* Success! */ 19627 case EIO: 19628 switch (ucmd_buf.uscsi_status) { 19629 case STATUS_RESERVATION_CONFLICT: 19630 status = EACCES; 19631 break; 19632 default: 19633 break; 19634 } 19635 break; 19636 default: 19637 break; 19638 } 19639 19640 if (status == 0) { 19641 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data", 19642 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19643 } 19644 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n"); 19645 19646 return (status); 19647 } 19648 19649 19650 /* 19651 * Function: sd_send_scsi_MODE_SELECT 19652 * 19653 * Description: Utility function for issuing a scsi MODE SELECT command. 19654 * Note: This routine uses a consistent implementation for Group0, 19655 * Group1, and Group2 commands across all platforms. ATAPI devices 19656 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 19657 * 19658 * Arguments: un - pointer to the softstate struct for the target. 19659 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 19660 * CDB_GROUP[1|2] (10 byte). 19661 * bufaddr - buffer for page data retrieved from the target. 19662 * buflen - size of page to be retrieved. 19663 * save_page - boolean to determin if SP bit should be set. 19664 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19665 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19666 * to use the USCSI "direct" chain and bypass the normal 19667 * command waitq. 19668 * 19669 * Return Code: 0 - Success 19670 * errno return code from sd_send_scsi_cmd() 19671 * 19672 * Context: Can sleep. Does not return until command is completed. 19673 */ 19674 19675 static int 19676 sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 19677 size_t buflen, uchar_t save_page, int path_flag) 19678 { 19679 struct scsi_extended_sense sense_buf; 19680 union scsi_cdb cdb; 19681 struct uscsi_cmd ucmd_buf; 19682 int status; 19683 19684 ASSERT(un != NULL); 19685 ASSERT(!mutex_owned(SD_MUTEX(un))); 19686 ASSERT(bufaddr != NULL); 19687 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 19688 (cdbsize == CDB_GROUP2)); 19689 19690 SD_TRACE(SD_LOG_IO, un, 19691 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un); 19692 19693 bzero(&cdb, sizeof (cdb)); 19694 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19695 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19696 19697 /* Set the PF bit for many third party drives */ 19698 cdb.cdb_opaque[1] = 0x10; 19699 19700 /* Set the savepage(SP) bit if given */ 19701 if (save_page == SD_SAVE_PAGE) { 19702 cdb.cdb_opaque[1] |= 0x01; 19703 } 19704 19705 if (cdbsize == CDB_GROUP0) { 19706 cdb.scc_cmd = SCMD_MODE_SELECT; 19707 FORMG0COUNT(&cdb, buflen); 19708 } else { 19709 cdb.scc_cmd = SCMD_MODE_SELECT_G1; 19710 FORMG1COUNT(&cdb, buflen); 19711 } 19712 19713 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 19714 19715 ucmd_buf.uscsi_cdb = (char *)&cdb; 19716 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 19717 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19718 ucmd_buf.uscsi_buflen = buflen; 19719 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19720 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19721 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 19722 ucmd_buf.uscsi_timeout = 60; 19723 19724 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19725 UIO_SYSSPACE, path_flag); 19726 19727 switch (status) { 19728 case 0: 19729 break; /* Success! */ 19730 case EIO: 19731 switch (ucmd_buf.uscsi_status) { 19732 case STATUS_RESERVATION_CONFLICT: 19733 status = EACCES; 19734 break; 19735 default: 19736 break; 19737 } 19738 break; 19739 default: 19740 break; 19741 } 19742 19743 if (status == 0) { 19744 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data", 19745 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19746 } 19747 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n"); 19748 19749 return (status); 19750 } 19751 19752 19753 /* 19754 * Function: sd_send_scsi_RDWR 19755 * 19756 * Description: Issue a scsi READ or WRITE command with the given parameters. 19757 * 19758 * Arguments: un: Pointer to the sd_lun struct for the target. 19759 * cmd: SCMD_READ or SCMD_WRITE 19760 * bufaddr: Address of caller's buffer to receive the RDWR data 19761 * buflen: Length of caller's buffer receive the RDWR data. 19762 * start_block: Block number for the start of the RDWR operation. 19763 * (Assumes target-native block size.) 19764 * residp: Pointer to variable to receive the redisual of the 19765 * RDWR operation (may be NULL of no residual requested). 19766 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19767 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19768 * to use the USCSI "direct" chain and bypass the normal 19769 * command waitq. 19770 * 19771 * Return Code: 0 - Success 19772 * errno return code from sd_send_scsi_cmd() 19773 * 19774 * Context: Can sleep. Does not return until command is completed. 19775 */ 19776 19777 static int 19778 sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 19779 size_t buflen, daddr_t start_block, int path_flag) 19780 { 19781 struct scsi_extended_sense sense_buf; 19782 union scsi_cdb cdb; 19783 struct uscsi_cmd ucmd_buf; 19784 uint32_t block_count; 19785 int status; 19786 int cdbsize; 19787 uchar_t flag; 19788 19789 ASSERT(un != NULL); 19790 ASSERT(!mutex_owned(SD_MUTEX(un))); 19791 ASSERT(bufaddr != NULL); 19792 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE)); 19793 19794 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un); 19795 19796 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 19797 return (EINVAL); 19798 } 19799 19800 mutex_enter(SD_MUTEX(un)); 19801 block_count = SD_BYTES2TGTBLOCKS(un, buflen); 19802 mutex_exit(SD_MUTEX(un)); 19803 19804 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE; 19805 19806 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: " 19807 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n", 19808 bufaddr, buflen, start_block, block_count); 19809 19810 bzero(&cdb, sizeof (cdb)); 19811 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19812 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19813 19814 /* Compute CDB size to use */ 19815 if (start_block > 0xffffffff) 19816 cdbsize = CDB_GROUP4; 19817 else if ((start_block & 0xFFE00000) || 19818 (un->un_f_cfg_is_atapi == TRUE)) 19819 cdbsize = CDB_GROUP1; 19820 else 19821 cdbsize = CDB_GROUP0; 19822 19823 switch (cdbsize) { 19824 case CDB_GROUP0: /* 6-byte CDBs */ 19825 cdb.scc_cmd = cmd; 19826 FORMG0ADDR(&cdb, start_block); 19827 FORMG0COUNT(&cdb, block_count); 19828 break; 19829 case CDB_GROUP1: /* 10-byte CDBs */ 19830 cdb.scc_cmd = cmd | SCMD_GROUP1; 19831 FORMG1ADDR(&cdb, start_block); 19832 FORMG1COUNT(&cdb, block_count); 19833 break; 19834 case CDB_GROUP4: /* 16-byte CDBs */ 19835 cdb.scc_cmd = cmd | SCMD_GROUP4; 19836 FORMG4LONGADDR(&cdb, (uint64_t)start_block); 19837 FORMG4COUNT(&cdb, block_count); 19838 break; 19839 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */ 19840 default: 19841 /* All others reserved */ 19842 return (EINVAL); 19843 } 19844 19845 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */ 19846 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 19847 19848 ucmd_buf.uscsi_cdb = (char *)&cdb; 19849 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 19850 ucmd_buf.uscsi_bufaddr = bufaddr; 19851 ucmd_buf.uscsi_buflen = buflen; 19852 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19853 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19854 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; 19855 ucmd_buf.uscsi_timeout = 60; 19856 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19857 UIO_SYSSPACE, path_flag); 19858 switch (status) { 19859 case 0: 19860 break; /* Success! */ 19861 case EIO: 19862 switch (ucmd_buf.uscsi_status) { 19863 case STATUS_RESERVATION_CONFLICT: 19864 status = EACCES; 19865 break; 19866 default: 19867 break; 19868 } 19869 break; 19870 default: 19871 break; 19872 } 19873 19874 if (status == 0) { 19875 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data", 19876 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19877 } 19878 19879 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n"); 19880 19881 return (status); 19882 } 19883 19884 19885 /* 19886 * Function: sd_send_scsi_LOG_SENSE 19887 * 19888 * Description: Issue a scsi LOG_SENSE command with the given parameters. 19889 * 19890 * Arguments: un: Pointer to the sd_lun struct for the target. 19891 * 19892 * Return Code: 0 - Success 19893 * errno return code from sd_send_scsi_cmd() 19894 * 19895 * Context: Can sleep. Does not return until command is completed. 19896 */ 19897 19898 static int 19899 sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, uint16_t buflen, 19900 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, 19901 int path_flag) 19902 19903 { 19904 struct scsi_extended_sense sense_buf; 19905 union scsi_cdb cdb; 19906 struct uscsi_cmd ucmd_buf; 19907 int status; 19908 19909 ASSERT(un != NULL); 19910 ASSERT(!mutex_owned(SD_MUTEX(un))); 19911 19912 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un); 19913 19914 bzero(&cdb, sizeof (cdb)); 19915 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19916 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19917 19918 cdb.scc_cmd = SCMD_LOG_SENSE_G1; 19919 cdb.cdb_opaque[2] = (page_control << 6) | page_code; 19920 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8); 19921 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF); 19922 FORMG1COUNT(&cdb, buflen); 19923 19924 ucmd_buf.uscsi_cdb = (char *)&cdb; 19925 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19926 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19927 ucmd_buf.uscsi_buflen = buflen; 19928 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19929 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19930 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19931 ucmd_buf.uscsi_timeout = 60; 19932 19933 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19934 UIO_SYSSPACE, path_flag); 19935 19936 switch (status) { 19937 case 0: 19938 break; 19939 case EIO: 19940 switch (ucmd_buf.uscsi_status) { 19941 case STATUS_RESERVATION_CONFLICT: 19942 status = EACCES; 19943 break; 19944 case STATUS_CHECK: 19945 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19946 (scsi_sense_key((uint8_t *)&sense_buf) == 19947 KEY_ILLEGAL_REQUEST) && 19948 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x24)) { 19949 /* 19950 * ASC 0x24: INVALID FIELD IN CDB 19951 */ 19952 switch (page_code) { 19953 case START_STOP_CYCLE_PAGE: 19954 /* 19955 * The start stop cycle counter is 19956 * implemented as page 0x31 in earlier 19957 * generation disks. In new generation 19958 * disks the start stop cycle counter is 19959 * implemented as page 0xE. To properly 19960 * handle this case if an attempt for 19961 * log page 0xE is made and fails we 19962 * will try again using page 0x31. 19963 * 19964 * Network storage BU committed to 19965 * maintain the page 0x31 for this 19966 * purpose and will not have any other 19967 * page implemented with page code 0x31 19968 * until all disks transition to the 19969 * standard page. 19970 */ 19971 mutex_enter(SD_MUTEX(un)); 19972 un->un_start_stop_cycle_page = 19973 START_STOP_CYCLE_VU_PAGE; 19974 cdb.cdb_opaque[2] = 19975 (char)(page_control << 6) | 19976 un->un_start_stop_cycle_page; 19977 mutex_exit(SD_MUTEX(un)); 19978 status = sd_send_scsi_cmd( 19979 SD_GET_DEV(un), &ucmd_buf, FKIOCTL, 19980 UIO_SYSSPACE, path_flag); 19981 19982 break; 19983 case TEMPERATURE_PAGE: 19984 status = ENOTTY; 19985 break; 19986 default: 19987 break; 19988 } 19989 } 19990 break; 19991 default: 19992 break; 19993 } 19994 break; 19995 default: 19996 break; 19997 } 19998 19999 if (status == 0) { 20000 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data", 20001 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 20002 } 20003 20004 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n"); 20005 20006 return (status); 20007 } 20008 20009 20010 /* 20011 * Function: sdioctl 20012 * 20013 * Description: Driver's ioctl(9e) entry point function. 20014 * 20015 * Arguments: dev - device number 20016 * cmd - ioctl operation to be performed 20017 * arg - user argument, contains data to be set or reference 20018 * parameter for get 20019 * flag - bit flag, indicating open settings, 32/64 bit type 20020 * cred_p - user credential pointer 20021 * rval_p - calling process return value (OPT) 20022 * 20023 * Return Code: EINVAL 20024 * ENOTTY 20025 * ENXIO 20026 * EIO 20027 * EFAULT 20028 * ENOTSUP 20029 * EPERM 20030 * 20031 * Context: Called from the device switch at normal priority. 20032 */ 20033 20034 static int 20035 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) 20036 { 20037 struct sd_lun *un = NULL; 20038 int err = 0; 20039 int i = 0; 20040 cred_t *cr; 20041 int tmprval = EINVAL; 20042 int is_valid; 20043 20044 /* 20045 * All device accesses go thru sdstrategy where we check on suspend 20046 * status 20047 */ 20048 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20049 return (ENXIO); 20050 } 20051 20052 ASSERT(!mutex_owned(SD_MUTEX(un))); 20053 20054 20055 is_valid = SD_IS_VALID_LABEL(un); 20056 20057 /* 20058 * Moved this wait from sd_uscsi_strategy to here for 20059 * reasons of deadlock prevention. Internal driver commands, 20060 * specifically those to change a devices power level, result 20061 * in a call to sd_uscsi_strategy. 20062 */ 20063 mutex_enter(SD_MUTEX(un)); 20064 while ((un->un_state == SD_STATE_SUSPENDED) || 20065 (un->un_state == SD_STATE_PM_CHANGING)) { 20066 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 20067 } 20068 /* 20069 * Twiddling the counter here protects commands from now 20070 * through to the top of sd_uscsi_strategy. Without the 20071 * counter inc. a power down, for example, could get in 20072 * after the above check for state is made and before 20073 * execution gets to the top of sd_uscsi_strategy. 20074 * That would cause problems. 20075 */ 20076 un->un_ncmds_in_driver++; 20077 20078 if (!is_valid && 20079 (flag & (FNDELAY | FNONBLOCK))) { 20080 switch (cmd) { 20081 case DKIOCGGEOM: /* SD_PATH_DIRECT */ 20082 case DKIOCGVTOC: 20083 case DKIOCGAPART: 20084 case DKIOCPARTINFO: 20085 case DKIOCSGEOM: 20086 case DKIOCSAPART: 20087 case DKIOCGETEFI: 20088 case DKIOCPARTITION: 20089 case DKIOCSVTOC: 20090 case DKIOCSETEFI: 20091 case DKIOCGMBOOT: 20092 case DKIOCSMBOOT: 20093 case DKIOCG_PHYGEOM: 20094 case DKIOCG_VIRTGEOM: 20095 /* let cmlb handle it */ 20096 goto skip_ready_valid; 20097 20098 case CDROMPAUSE: 20099 case CDROMRESUME: 20100 case CDROMPLAYMSF: 20101 case CDROMPLAYTRKIND: 20102 case CDROMREADTOCHDR: 20103 case CDROMREADTOCENTRY: 20104 case CDROMSTOP: 20105 case CDROMSTART: 20106 case CDROMVOLCTRL: 20107 case CDROMSUBCHNL: 20108 case CDROMREADMODE2: 20109 case CDROMREADMODE1: 20110 case CDROMREADOFFSET: 20111 case CDROMSBLKMODE: 20112 case CDROMGBLKMODE: 20113 case CDROMGDRVSPEED: 20114 case CDROMSDRVSPEED: 20115 case CDROMCDDA: 20116 case CDROMCDXA: 20117 case CDROMSUBCODE: 20118 if (!ISCD(un)) { 20119 un->un_ncmds_in_driver--; 20120 ASSERT(un->un_ncmds_in_driver >= 0); 20121 mutex_exit(SD_MUTEX(un)); 20122 return (ENOTTY); 20123 } 20124 break; 20125 case FDEJECT: 20126 case DKIOCEJECT: 20127 case CDROMEJECT: 20128 if (!un->un_f_eject_media_supported) { 20129 un->un_ncmds_in_driver--; 20130 ASSERT(un->un_ncmds_in_driver >= 0); 20131 mutex_exit(SD_MUTEX(un)); 20132 return (ENOTTY); 20133 } 20134 break; 20135 case DKIOCFLUSHWRITECACHE: 20136 mutex_exit(SD_MUTEX(un)); 20137 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 20138 if (err != 0) { 20139 mutex_enter(SD_MUTEX(un)); 20140 un->un_ncmds_in_driver--; 20141 ASSERT(un->un_ncmds_in_driver >= 0); 20142 mutex_exit(SD_MUTEX(un)); 20143 return (EIO); 20144 } 20145 mutex_enter(SD_MUTEX(un)); 20146 /* FALLTHROUGH */ 20147 case DKIOCREMOVABLE: 20148 case DKIOCHOTPLUGGABLE: 20149 case DKIOCINFO: 20150 case DKIOCGMEDIAINFO: 20151 case MHIOCENFAILFAST: 20152 case MHIOCSTATUS: 20153 case MHIOCTKOWN: 20154 case MHIOCRELEASE: 20155 case MHIOCGRP_INKEYS: 20156 case MHIOCGRP_INRESV: 20157 case MHIOCGRP_REGISTER: 20158 case MHIOCGRP_RESERVE: 20159 case MHIOCGRP_PREEMPTANDABORT: 20160 case MHIOCGRP_REGISTERANDIGNOREKEY: 20161 case CDROMCLOSETRAY: 20162 case USCSICMD: 20163 goto skip_ready_valid; 20164 default: 20165 break; 20166 } 20167 20168 mutex_exit(SD_MUTEX(un)); 20169 err = sd_ready_and_valid(un); 20170 mutex_enter(SD_MUTEX(un)); 20171 20172 if (err != SD_READY_VALID) { 20173 switch (cmd) { 20174 case DKIOCSTATE: 20175 case CDROMGDRVSPEED: 20176 case CDROMSDRVSPEED: 20177 case FDEJECT: /* for eject command */ 20178 case DKIOCEJECT: 20179 case CDROMEJECT: 20180 case DKIOCREMOVABLE: 20181 case DKIOCHOTPLUGGABLE: 20182 break; 20183 default: 20184 if (un->un_f_has_removable_media) { 20185 err = ENXIO; 20186 } else { 20187 /* Do not map SD_RESERVED_BY_OTHERS to EIO */ 20188 if (err == SD_RESERVED_BY_OTHERS) { 20189 err = EACCES; 20190 } else { 20191 err = EIO; 20192 } 20193 } 20194 un->un_ncmds_in_driver--; 20195 ASSERT(un->un_ncmds_in_driver >= 0); 20196 mutex_exit(SD_MUTEX(un)); 20197 return (err); 20198 } 20199 } 20200 } 20201 20202 skip_ready_valid: 20203 mutex_exit(SD_MUTEX(un)); 20204 20205 switch (cmd) { 20206 case DKIOCINFO: 20207 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n"); 20208 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag); 20209 break; 20210 20211 case DKIOCGMEDIAINFO: 20212 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n"); 20213 err = sd_get_media_info(dev, (caddr_t)arg, flag); 20214 break; 20215 20216 case DKIOCGGEOM: 20217 case DKIOCGVTOC: 20218 case DKIOCGAPART: 20219 case DKIOCPARTINFO: 20220 case DKIOCSGEOM: 20221 case DKIOCSAPART: 20222 case DKIOCGETEFI: 20223 case DKIOCPARTITION: 20224 case DKIOCSVTOC: 20225 case DKIOCSETEFI: 20226 case DKIOCGMBOOT: 20227 case DKIOCSMBOOT: 20228 case DKIOCG_PHYGEOM: 20229 case DKIOCG_VIRTGEOM: 20230 SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd); 20231 20232 /* TUR should spin up */ 20233 20234 if (un->un_f_has_removable_media) 20235 err = sd_send_scsi_TEST_UNIT_READY(un, 20236 SD_CHECK_FOR_MEDIA); 20237 else 20238 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 20239 20240 if (err != 0) 20241 break; 20242 20243 err = cmlb_ioctl(un->un_cmlbhandle, dev, 20244 cmd, arg, flag, cred_p, rval_p, (void *)SD_PATH_DIRECT); 20245 20246 if ((err == 0) && 20247 ((cmd == DKIOCSETEFI) || 20248 (un->un_f_pkstats_enabled) && 20249 (cmd == DKIOCSAPART || cmd == DKIOCSVTOC))) { 20250 20251 tmprval = cmlb_validate(un->un_cmlbhandle, CMLB_SILENT, 20252 (void *)SD_PATH_DIRECT); 20253 if ((tmprval == 0) && un->un_f_pkstats_enabled) { 20254 sd_set_pstats(un); 20255 SD_TRACE(SD_LOG_IO_PARTITION, un, 20256 "sd_ioctl: un:0x%p pstats created and " 20257 "set\n", un); 20258 } 20259 } 20260 20261 if ((cmd == DKIOCSVTOC) || 20262 ((cmd == DKIOCSETEFI) && (tmprval == 0))) { 20263 20264 mutex_enter(SD_MUTEX(un)); 20265 if (un->un_f_devid_supported && 20266 (un->un_f_opt_fab_devid == TRUE)) { 20267 if (un->un_devid == NULL) { 20268 sd_register_devid(un, SD_DEVINFO(un), 20269 SD_TARGET_IS_UNRESERVED); 20270 } else { 20271 /* 20272 * The device id for this disk 20273 * has been fabricated. The 20274 * device id must be preserved 20275 * by writing it back out to 20276 * disk. 20277 */ 20278 if (sd_write_deviceid(un) != 0) { 20279 ddi_devid_free(un->un_devid); 20280 un->un_devid = NULL; 20281 } 20282 } 20283 } 20284 mutex_exit(SD_MUTEX(un)); 20285 } 20286 20287 break; 20288 20289 case DKIOCLOCK: 20290 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n"); 20291 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 20292 SD_PATH_STANDARD); 20293 break; 20294 20295 case DKIOCUNLOCK: 20296 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n"); 20297 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 20298 SD_PATH_STANDARD); 20299 break; 20300 20301 case DKIOCSTATE: { 20302 enum dkio_state state; 20303 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n"); 20304 20305 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) { 20306 err = EFAULT; 20307 } else { 20308 err = sd_check_media(dev, state); 20309 if (err == 0) { 20310 if (ddi_copyout(&un->un_mediastate, (void *)arg, 20311 sizeof (int), flag) != 0) 20312 err = EFAULT; 20313 } 20314 } 20315 break; 20316 } 20317 20318 case DKIOCREMOVABLE: 20319 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n"); 20320 i = un->un_f_has_removable_media ? 1 : 0; 20321 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 20322 err = EFAULT; 20323 } else { 20324 err = 0; 20325 } 20326 break; 20327 20328 case DKIOCHOTPLUGGABLE: 20329 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCHOTPLUGGABLE\n"); 20330 i = un->un_f_is_hotpluggable ? 1 : 0; 20331 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 20332 err = EFAULT; 20333 } else { 20334 err = 0; 20335 } 20336 break; 20337 20338 case DKIOCGTEMPERATURE: 20339 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n"); 20340 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag); 20341 break; 20342 20343 case MHIOCENFAILFAST: 20344 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n"); 20345 if ((err = drv_priv(cred_p)) == 0) { 20346 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag); 20347 } 20348 break; 20349 20350 case MHIOCTKOWN: 20351 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n"); 20352 if ((err = drv_priv(cred_p)) == 0) { 20353 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag); 20354 } 20355 break; 20356 20357 case MHIOCRELEASE: 20358 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n"); 20359 if ((err = drv_priv(cred_p)) == 0) { 20360 err = sd_mhdioc_release(dev); 20361 } 20362 break; 20363 20364 case MHIOCSTATUS: 20365 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n"); 20366 if ((err = drv_priv(cred_p)) == 0) { 20367 switch (sd_send_scsi_TEST_UNIT_READY(un, 0)) { 20368 case 0: 20369 err = 0; 20370 break; 20371 case EACCES: 20372 *rval_p = 1; 20373 err = 0; 20374 break; 20375 default: 20376 err = EIO; 20377 break; 20378 } 20379 } 20380 break; 20381 20382 case MHIOCQRESERVE: 20383 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n"); 20384 if ((err = drv_priv(cred_p)) == 0) { 20385 err = sd_reserve_release(dev, SD_RESERVE); 20386 } 20387 break; 20388 20389 case MHIOCREREGISTERDEVID: 20390 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n"); 20391 if (drv_priv(cred_p) == EPERM) { 20392 err = EPERM; 20393 } else if (!un->un_f_devid_supported) { 20394 err = ENOTTY; 20395 } else { 20396 err = sd_mhdioc_register_devid(dev); 20397 } 20398 break; 20399 20400 case MHIOCGRP_INKEYS: 20401 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n"); 20402 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 20403 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20404 err = ENOTSUP; 20405 } else { 20406 err = sd_mhdioc_inkeys(dev, (caddr_t)arg, 20407 flag); 20408 } 20409 } 20410 break; 20411 20412 case MHIOCGRP_INRESV: 20413 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n"); 20414 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 20415 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20416 err = ENOTSUP; 20417 } else { 20418 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag); 20419 } 20420 } 20421 break; 20422 20423 case MHIOCGRP_REGISTER: 20424 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n"); 20425 if ((err = drv_priv(cred_p)) != EPERM) { 20426 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20427 err = ENOTSUP; 20428 } else if (arg != NULL) { 20429 mhioc_register_t reg; 20430 if (ddi_copyin((void *)arg, ®, 20431 sizeof (mhioc_register_t), flag) != 0) { 20432 err = EFAULT; 20433 } else { 20434 err = 20435 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20436 un, SD_SCSI3_REGISTER, 20437 (uchar_t *)®); 20438 } 20439 } 20440 } 20441 break; 20442 20443 case MHIOCGRP_RESERVE: 20444 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n"); 20445 if ((err = drv_priv(cred_p)) != EPERM) { 20446 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20447 err = ENOTSUP; 20448 } else if (arg != NULL) { 20449 mhioc_resv_desc_t resv_desc; 20450 if (ddi_copyin((void *)arg, &resv_desc, 20451 sizeof (mhioc_resv_desc_t), flag) != 0) { 20452 err = EFAULT; 20453 } else { 20454 err = 20455 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20456 un, SD_SCSI3_RESERVE, 20457 (uchar_t *)&resv_desc); 20458 } 20459 } 20460 } 20461 break; 20462 20463 case MHIOCGRP_PREEMPTANDABORT: 20464 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 20465 if ((err = drv_priv(cred_p)) != EPERM) { 20466 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20467 err = ENOTSUP; 20468 } else if (arg != NULL) { 20469 mhioc_preemptandabort_t preempt_abort; 20470 if (ddi_copyin((void *)arg, &preempt_abort, 20471 sizeof (mhioc_preemptandabort_t), 20472 flag) != 0) { 20473 err = EFAULT; 20474 } else { 20475 err = 20476 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20477 un, SD_SCSI3_PREEMPTANDABORT, 20478 (uchar_t *)&preempt_abort); 20479 } 20480 } 20481 } 20482 break; 20483 20484 case MHIOCGRP_REGISTERANDIGNOREKEY: 20485 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTERANDIGNOREKEY\n"); 20486 if ((err = drv_priv(cred_p)) != EPERM) { 20487 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20488 err = ENOTSUP; 20489 } else if (arg != NULL) { 20490 mhioc_registerandignorekey_t r_and_i; 20491 if (ddi_copyin((void *)arg, (void *)&r_and_i, 20492 sizeof (mhioc_registerandignorekey_t), 20493 flag) != 0) { 20494 err = EFAULT; 20495 } else { 20496 err = 20497 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20498 un, SD_SCSI3_REGISTERANDIGNOREKEY, 20499 (uchar_t *)&r_and_i); 20500 } 20501 } 20502 } 20503 break; 20504 20505 case USCSICMD: 20506 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n"); 20507 cr = ddi_get_cred(); 20508 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 20509 err = EPERM; 20510 } else { 20511 enum uio_seg uioseg; 20512 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE : 20513 UIO_USERSPACE; 20514 if (un->un_f_format_in_progress == TRUE) { 20515 err = EAGAIN; 20516 break; 20517 } 20518 err = sd_send_scsi_cmd(dev, (struct uscsi_cmd *)arg, 20519 flag, uioseg, SD_PATH_STANDARD); 20520 } 20521 break; 20522 20523 case CDROMPAUSE: 20524 case CDROMRESUME: 20525 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n"); 20526 if (!ISCD(un)) { 20527 err = ENOTTY; 20528 } else { 20529 err = sr_pause_resume(dev, cmd); 20530 } 20531 break; 20532 20533 case CDROMPLAYMSF: 20534 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n"); 20535 if (!ISCD(un)) { 20536 err = ENOTTY; 20537 } else { 20538 err = sr_play_msf(dev, (caddr_t)arg, flag); 20539 } 20540 break; 20541 20542 case CDROMPLAYTRKIND: 20543 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n"); 20544 #if defined(__i386) || defined(__amd64) 20545 /* 20546 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead 20547 */ 20548 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 20549 #else 20550 if (!ISCD(un)) { 20551 #endif 20552 err = ENOTTY; 20553 } else { 20554 err = sr_play_trkind(dev, (caddr_t)arg, flag); 20555 } 20556 break; 20557 20558 case CDROMREADTOCHDR: 20559 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n"); 20560 if (!ISCD(un)) { 20561 err = ENOTTY; 20562 } else { 20563 err = sr_read_tochdr(dev, (caddr_t)arg, flag); 20564 } 20565 break; 20566 20567 case CDROMREADTOCENTRY: 20568 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n"); 20569 if (!ISCD(un)) { 20570 err = ENOTTY; 20571 } else { 20572 err = sr_read_tocentry(dev, (caddr_t)arg, flag); 20573 } 20574 break; 20575 20576 case CDROMSTOP: 20577 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n"); 20578 if (!ISCD(un)) { 20579 err = ENOTTY; 20580 } else { 20581 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_STOP, 20582 SD_PATH_STANDARD); 20583 } 20584 break; 20585 20586 case CDROMSTART: 20587 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n"); 20588 if (!ISCD(un)) { 20589 err = ENOTTY; 20590 } else { 20591 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 20592 SD_PATH_STANDARD); 20593 } 20594 break; 20595 20596 case CDROMCLOSETRAY: 20597 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n"); 20598 if (!ISCD(un)) { 20599 err = ENOTTY; 20600 } else { 20601 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_CLOSE, 20602 SD_PATH_STANDARD); 20603 } 20604 break; 20605 20606 case FDEJECT: /* for eject command */ 20607 case DKIOCEJECT: 20608 case CDROMEJECT: 20609 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n"); 20610 if (!un->un_f_eject_media_supported) { 20611 err = ENOTTY; 20612 } else { 20613 err = sr_eject(dev); 20614 } 20615 break; 20616 20617 case CDROMVOLCTRL: 20618 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n"); 20619 if (!ISCD(un)) { 20620 err = ENOTTY; 20621 } else { 20622 err = sr_volume_ctrl(dev, (caddr_t)arg, flag); 20623 } 20624 break; 20625 20626 case CDROMSUBCHNL: 20627 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n"); 20628 if (!ISCD(un)) { 20629 err = ENOTTY; 20630 } else { 20631 err = sr_read_subchannel(dev, (caddr_t)arg, flag); 20632 } 20633 break; 20634 20635 case CDROMREADMODE2: 20636 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n"); 20637 if (!ISCD(un)) { 20638 err = ENOTTY; 20639 } else if (un->un_f_cfg_is_atapi == TRUE) { 20640 /* 20641 * If the drive supports READ CD, use that instead of 20642 * switching the LBA size via a MODE SELECT 20643 * Block Descriptor 20644 */ 20645 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag); 20646 } else { 20647 err = sr_read_mode2(dev, (caddr_t)arg, flag); 20648 } 20649 break; 20650 20651 case CDROMREADMODE1: 20652 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n"); 20653 if (!ISCD(un)) { 20654 err = ENOTTY; 20655 } else { 20656 err = sr_read_mode1(dev, (caddr_t)arg, flag); 20657 } 20658 break; 20659 20660 case CDROMREADOFFSET: 20661 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n"); 20662 if (!ISCD(un)) { 20663 err = ENOTTY; 20664 } else { 20665 err = sr_read_sony_session_offset(dev, (caddr_t)arg, 20666 flag); 20667 } 20668 break; 20669 20670 case CDROMSBLKMODE: 20671 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n"); 20672 /* 20673 * There is no means of changing block size in case of atapi 20674 * drives, thus return ENOTTY if drive type is atapi 20675 */ 20676 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 20677 err = ENOTTY; 20678 } else if (un->un_f_mmc_cap == TRUE) { 20679 20680 /* 20681 * MMC Devices do not support changing the 20682 * logical block size 20683 * 20684 * Note: EINVAL is being returned instead of ENOTTY to 20685 * maintain consistancy with the original mmc 20686 * driver update. 20687 */ 20688 err = EINVAL; 20689 } else { 20690 mutex_enter(SD_MUTEX(un)); 20691 if ((!(un->un_exclopen & (1<<SDPART(dev)))) || 20692 (un->un_ncmds_in_transport > 0)) { 20693 mutex_exit(SD_MUTEX(un)); 20694 err = EINVAL; 20695 } else { 20696 mutex_exit(SD_MUTEX(un)); 20697 err = sr_change_blkmode(dev, cmd, arg, flag); 20698 } 20699 } 20700 break; 20701 20702 case CDROMGBLKMODE: 20703 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n"); 20704 if (!ISCD(un)) { 20705 err = ENOTTY; 20706 } else if ((un->un_f_cfg_is_atapi != FALSE) && 20707 (un->un_f_blockcount_is_valid != FALSE)) { 20708 /* 20709 * Drive is an ATAPI drive so return target block 20710 * size for ATAPI drives since we cannot change the 20711 * blocksize on ATAPI drives. Used primarily to detect 20712 * if an ATAPI cdrom is present. 20713 */ 20714 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg, 20715 sizeof (int), flag) != 0) { 20716 err = EFAULT; 20717 } else { 20718 err = 0; 20719 } 20720 20721 } else { 20722 /* 20723 * Drive supports changing block sizes via a Mode 20724 * Select. 20725 */ 20726 err = sr_change_blkmode(dev, cmd, arg, flag); 20727 } 20728 break; 20729 20730 case CDROMGDRVSPEED: 20731 case CDROMSDRVSPEED: 20732 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n"); 20733 if (!ISCD(un)) { 20734 err = ENOTTY; 20735 } else if (un->un_f_mmc_cap == TRUE) { 20736 /* 20737 * Note: In the future the driver implementation 20738 * for getting and 20739 * setting cd speed should entail: 20740 * 1) If non-mmc try the Toshiba mode page 20741 * (sr_change_speed) 20742 * 2) If mmc but no support for Real Time Streaming try 20743 * the SET CD SPEED (0xBB) command 20744 * (sr_atapi_change_speed) 20745 * 3) If mmc and support for Real Time Streaming 20746 * try the GET PERFORMANCE and SET STREAMING 20747 * commands (not yet implemented, 4380808) 20748 */ 20749 /* 20750 * As per recent MMC spec, CD-ROM speed is variable 20751 * and changes with LBA. Since there is no such 20752 * things as drive speed now, fail this ioctl. 20753 * 20754 * Note: EINVAL is returned for consistancy of original 20755 * implementation which included support for getting 20756 * the drive speed of mmc devices but not setting 20757 * the drive speed. Thus EINVAL would be returned 20758 * if a set request was made for an mmc device. 20759 * We no longer support get or set speed for 20760 * mmc but need to remain consistent with regard 20761 * to the error code returned. 20762 */ 20763 err = EINVAL; 20764 } else if (un->un_f_cfg_is_atapi == TRUE) { 20765 err = sr_atapi_change_speed(dev, cmd, arg, flag); 20766 } else { 20767 err = sr_change_speed(dev, cmd, arg, flag); 20768 } 20769 break; 20770 20771 case CDROMCDDA: 20772 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n"); 20773 if (!ISCD(un)) { 20774 err = ENOTTY; 20775 } else { 20776 err = sr_read_cdda(dev, (void *)arg, flag); 20777 } 20778 break; 20779 20780 case CDROMCDXA: 20781 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n"); 20782 if (!ISCD(un)) { 20783 err = ENOTTY; 20784 } else { 20785 err = sr_read_cdxa(dev, (caddr_t)arg, flag); 20786 } 20787 break; 20788 20789 case CDROMSUBCODE: 20790 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n"); 20791 if (!ISCD(un)) { 20792 err = ENOTTY; 20793 } else { 20794 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag); 20795 } 20796 break; 20797 20798 20799 #ifdef SDDEBUG 20800 /* RESET/ABORTS testing ioctls */ 20801 case DKIOCRESET: { 20802 int reset_level; 20803 20804 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) { 20805 err = EFAULT; 20806 } else { 20807 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: " 20808 "reset_level = 0x%lx\n", reset_level); 20809 if (scsi_reset(SD_ADDRESS(un), reset_level)) { 20810 err = 0; 20811 } else { 20812 err = EIO; 20813 } 20814 } 20815 break; 20816 } 20817 20818 case DKIOCABORT: 20819 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n"); 20820 if (scsi_abort(SD_ADDRESS(un), NULL)) { 20821 err = 0; 20822 } else { 20823 err = EIO; 20824 } 20825 break; 20826 #endif 20827 20828 #ifdef SD_FAULT_INJECTION 20829 /* SDIOC FaultInjection testing ioctls */ 20830 case SDIOCSTART: 20831 case SDIOCSTOP: 20832 case SDIOCINSERTPKT: 20833 case SDIOCINSERTXB: 20834 case SDIOCINSERTUN: 20835 case SDIOCINSERTARQ: 20836 case SDIOCPUSH: 20837 case SDIOCRETRIEVE: 20838 case SDIOCRUN: 20839 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" 20840 "SDIOC detected cmd:0x%X:\n", cmd); 20841 /* call error generator */ 20842 sd_faultinjection_ioctl(cmd, arg, un); 20843 err = 0; 20844 break; 20845 20846 #endif /* SD_FAULT_INJECTION */ 20847 20848 case DKIOCFLUSHWRITECACHE: 20849 { 20850 struct dk_callback *dkc = (struct dk_callback *)arg; 20851 20852 mutex_enter(SD_MUTEX(un)); 20853 if (!un->un_f_sync_cache_supported || 20854 !un->un_f_write_cache_enabled) { 20855 err = un->un_f_sync_cache_supported ? 20856 0 : ENOTSUP; 20857 mutex_exit(SD_MUTEX(un)); 20858 if ((flag & FKIOCTL) && dkc != NULL && 20859 dkc->dkc_callback != NULL) { 20860 (*dkc->dkc_callback)(dkc->dkc_cookie, 20861 err); 20862 /* 20863 * Did callback and reported error. 20864 * Since we did a callback, ioctl 20865 * should return 0. 20866 */ 20867 err = 0; 20868 } 20869 break; 20870 } 20871 mutex_exit(SD_MUTEX(un)); 20872 20873 if ((flag & FKIOCTL) && dkc != NULL && 20874 dkc->dkc_callback != NULL) { 20875 /* async SYNC CACHE request */ 20876 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 20877 } else { 20878 /* synchronous SYNC CACHE request */ 20879 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); 20880 } 20881 } 20882 break; 20883 20884 case DKIOCGETWCE: { 20885 20886 int wce; 20887 20888 if ((err = sd_get_write_cache_enabled(un, &wce)) != 0) { 20889 break; 20890 } 20891 20892 if (ddi_copyout(&wce, (void *)arg, sizeof (wce), flag)) { 20893 err = EFAULT; 20894 } 20895 break; 20896 } 20897 20898 case DKIOCSETWCE: { 20899 20900 int wce, sync_supported; 20901 20902 if (ddi_copyin((void *)arg, &wce, sizeof (wce), flag)) { 20903 err = EFAULT; 20904 break; 20905 } 20906 20907 /* 20908 * Synchronize multiple threads trying to enable 20909 * or disable the cache via the un_f_wcc_cv 20910 * condition variable. 20911 */ 20912 mutex_enter(SD_MUTEX(un)); 20913 20914 /* 20915 * Don't allow the cache to be enabled if the 20916 * config file has it disabled. 20917 */ 20918 if (un->un_f_opt_disable_cache && wce) { 20919 mutex_exit(SD_MUTEX(un)); 20920 err = EINVAL; 20921 break; 20922 } 20923 20924 /* 20925 * Wait for write cache change in progress 20926 * bit to be clear before proceeding. 20927 */ 20928 while (un->un_f_wcc_inprog) 20929 cv_wait(&un->un_wcc_cv, SD_MUTEX(un)); 20930 20931 un->un_f_wcc_inprog = 1; 20932 20933 if (un->un_f_write_cache_enabled && wce == 0) { 20934 /* 20935 * Disable the write cache. Don't clear 20936 * un_f_write_cache_enabled until after 20937 * the mode select and flush are complete. 20938 */ 20939 sync_supported = un->un_f_sync_cache_supported; 20940 20941 /* 20942 * If cache flush is suppressed, we assume that the 20943 * controller firmware will take care of managing the 20944 * write cache for us: no need to explicitly 20945 * disable it. 20946 */ 20947 if (!un->un_f_suppress_cache_flush) { 20948 mutex_exit(SD_MUTEX(un)); 20949 if ((err = sd_cache_control(un, 20950 SD_CACHE_NOCHANGE, 20951 SD_CACHE_DISABLE)) == 0 && 20952 sync_supported) { 20953 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, 20954 NULL); 20955 } 20956 } else { 20957 mutex_exit(SD_MUTEX(un)); 20958 } 20959 20960 mutex_enter(SD_MUTEX(un)); 20961 if (err == 0) { 20962 un->un_f_write_cache_enabled = 0; 20963 } 20964 20965 } else if (!un->un_f_write_cache_enabled && wce != 0) { 20966 /* 20967 * Set un_f_write_cache_enabled first, so there is 20968 * no window where the cache is enabled, but the 20969 * bit says it isn't. 20970 */ 20971 un->un_f_write_cache_enabled = 1; 20972 20973 /* 20974 * If cache flush is suppressed, we assume that the 20975 * controller firmware will take care of managing the 20976 * write cache for us: no need to explicitly 20977 * enable it. 20978 */ 20979 if (!un->un_f_suppress_cache_flush) { 20980 mutex_exit(SD_MUTEX(un)); 20981 err = sd_cache_control(un, SD_CACHE_NOCHANGE, 20982 SD_CACHE_ENABLE); 20983 } else { 20984 mutex_exit(SD_MUTEX(un)); 20985 } 20986 20987 mutex_enter(SD_MUTEX(un)); 20988 20989 if (err) { 20990 un->un_f_write_cache_enabled = 0; 20991 } 20992 } 20993 20994 un->un_f_wcc_inprog = 0; 20995 cv_broadcast(&un->un_wcc_cv); 20996 mutex_exit(SD_MUTEX(un)); 20997 break; 20998 } 20999 21000 default: 21001 err = ENOTTY; 21002 break; 21003 } 21004 mutex_enter(SD_MUTEX(un)); 21005 un->un_ncmds_in_driver--; 21006 ASSERT(un->un_ncmds_in_driver >= 0); 21007 mutex_exit(SD_MUTEX(un)); 21008 21009 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 21010 return (err); 21011 } 21012 21013 21014 /* 21015 * Function: sd_dkio_ctrl_info 21016 * 21017 * Description: This routine is the driver entry point for handling controller 21018 * information ioctl requests (DKIOCINFO). 21019 * 21020 * Arguments: dev - the device number 21021 * arg - pointer to user provided dk_cinfo structure 21022 * specifying the controller type and attributes. 21023 * flag - this argument is a pass through to ddi_copyxxx() 21024 * directly from the mode argument of ioctl(). 21025 * 21026 * Return Code: 0 21027 * EFAULT 21028 * ENXIO 21029 */ 21030 21031 static int 21032 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag) 21033 { 21034 struct sd_lun *un = NULL; 21035 struct dk_cinfo *info; 21036 dev_info_t *pdip; 21037 int lun, tgt; 21038 21039 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21040 return (ENXIO); 21041 } 21042 21043 info = (struct dk_cinfo *) 21044 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); 21045 21046 switch (un->un_ctype) { 21047 case CTYPE_CDROM: 21048 info->dki_ctype = DKC_CDROM; 21049 break; 21050 default: 21051 info->dki_ctype = DKC_SCSI_CCS; 21052 break; 21053 } 21054 pdip = ddi_get_parent(SD_DEVINFO(un)); 21055 info->dki_cnum = ddi_get_instance(pdip); 21056 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) { 21057 (void) strcpy(info->dki_cname, ddi_get_name(pdip)); 21058 } else { 21059 (void) strncpy(info->dki_cname, ddi_node_name(pdip), 21060 DK_DEVLEN - 1); 21061 } 21062 21063 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 21064 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0); 21065 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 21066 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0); 21067 21068 /* Unit Information */ 21069 info->dki_unit = ddi_get_instance(SD_DEVINFO(un)); 21070 info->dki_slave = ((tgt << 3) | lun); 21071 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)), 21072 DK_DEVLEN - 1); 21073 info->dki_flags = DKI_FMTVOL; 21074 info->dki_partition = SDPART(dev); 21075 21076 /* Max Transfer size of this device in blocks */ 21077 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize; 21078 info->dki_addr = 0; 21079 info->dki_space = 0; 21080 info->dki_prio = 0; 21081 info->dki_vec = 0; 21082 21083 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) { 21084 kmem_free(info, sizeof (struct dk_cinfo)); 21085 return (EFAULT); 21086 } else { 21087 kmem_free(info, sizeof (struct dk_cinfo)); 21088 return (0); 21089 } 21090 } 21091 21092 21093 /* 21094 * Function: sd_get_media_info 21095 * 21096 * Description: This routine is the driver entry point for handling ioctl 21097 * requests for the media type or command set profile used by the 21098 * drive to operate on the media (DKIOCGMEDIAINFO). 21099 * 21100 * Arguments: dev - the device number 21101 * arg - pointer to user provided dk_minfo structure 21102 * specifying the media type, logical block size and 21103 * drive capacity. 21104 * flag - this argument is a pass through to ddi_copyxxx() 21105 * directly from the mode argument of ioctl(). 21106 * 21107 * Return Code: 0 21108 * EACCESS 21109 * EFAULT 21110 * ENXIO 21111 * EIO 21112 */ 21113 21114 static int 21115 sd_get_media_info(dev_t dev, caddr_t arg, int flag) 21116 { 21117 struct sd_lun *un = NULL; 21118 struct uscsi_cmd com; 21119 struct scsi_inquiry *sinq; 21120 struct dk_minfo media_info; 21121 u_longlong_t media_capacity; 21122 uint64_t capacity; 21123 uint_t lbasize; 21124 uchar_t *out_data; 21125 uchar_t *rqbuf; 21126 int rval = 0; 21127 int rtn; 21128 21129 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 21130 (un->un_state == SD_STATE_OFFLINE)) { 21131 return (ENXIO); 21132 } 21133 21134 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info: entry\n"); 21135 21136 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 21137 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 21138 21139 /* Issue a TUR to determine if the drive is ready with media present */ 21140 rval = sd_send_scsi_TEST_UNIT_READY(un, SD_CHECK_FOR_MEDIA); 21141 if (rval == ENXIO) { 21142 goto done; 21143 } 21144 21145 /* Now get configuration data */ 21146 if (ISCD(un)) { 21147 media_info.dki_media_type = DK_CDROM; 21148 21149 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 21150 if (un->un_f_mmc_cap == TRUE) { 21151 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, 21152 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN, 21153 SD_PATH_STANDARD); 21154 21155 if (rtn) { 21156 /* 21157 * Failed for other than an illegal request 21158 * or command not supported 21159 */ 21160 if ((com.uscsi_status == STATUS_CHECK) && 21161 (com.uscsi_rqstatus == STATUS_GOOD)) { 21162 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 21163 (rqbuf[12] != 0x20)) { 21164 rval = EIO; 21165 goto done; 21166 } 21167 } 21168 } else { 21169 /* 21170 * The GET CONFIGURATION command succeeded 21171 * so set the media type according to the 21172 * returned data 21173 */ 21174 media_info.dki_media_type = out_data[6]; 21175 media_info.dki_media_type <<= 8; 21176 media_info.dki_media_type |= out_data[7]; 21177 } 21178 } 21179 } else { 21180 /* 21181 * The profile list is not available, so we attempt to identify 21182 * the media type based on the inquiry data 21183 */ 21184 sinq = un->un_sd->sd_inq; 21185 if ((sinq->inq_dtype == DTYPE_DIRECT) || 21186 (sinq->inq_dtype == DTYPE_OPTICAL)) { 21187 /* This is a direct access device or optical disk */ 21188 media_info.dki_media_type = DK_FIXED_DISK; 21189 21190 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 21191 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 21192 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 21193 media_info.dki_media_type = DK_ZIP; 21194 } else if ( 21195 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 21196 media_info.dki_media_type = DK_JAZ; 21197 } 21198 } 21199 } else { 21200 /* 21201 * Not a CD, direct access or optical disk so return 21202 * unknown media 21203 */ 21204 media_info.dki_media_type = DK_UNKNOWN; 21205 } 21206 } 21207 21208 /* Now read the capacity so we can provide the lbasize and capacity */ 21209 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 21210 SD_PATH_DIRECT)) { 21211 case 0: 21212 break; 21213 case EACCES: 21214 rval = EACCES; 21215 goto done; 21216 default: 21217 rval = EIO; 21218 goto done; 21219 } 21220 21221 /* 21222 * If lun is expanded dynamically, update the un structure. 21223 */ 21224 mutex_enter(SD_MUTEX(un)); 21225 if ((un->un_f_blockcount_is_valid == TRUE) && 21226 (un->un_f_tgt_blocksize_is_valid == TRUE) && 21227 (capacity > un->un_blockcount)) { 21228 sd_update_block_info(un, lbasize, capacity); 21229 } 21230 mutex_exit(SD_MUTEX(un)); 21231 21232 media_info.dki_lbsize = lbasize; 21233 media_capacity = capacity; 21234 21235 /* 21236 * sd_send_scsi_READ_CAPACITY() reports capacity in 21237 * un->un_sys_blocksize chunks. So we need to convert it into 21238 * cap.lbasize chunks. 21239 */ 21240 media_capacity *= un->un_sys_blocksize; 21241 media_capacity /= lbasize; 21242 media_info.dki_capacity = media_capacity; 21243 21244 if (ddi_copyout(&media_info, arg, sizeof (struct dk_minfo), flag)) { 21245 rval = EFAULT; 21246 /* Put goto. Anybody might add some code below in future */ 21247 goto done; 21248 } 21249 done: 21250 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 21251 kmem_free(rqbuf, SENSE_LENGTH); 21252 return (rval); 21253 } 21254 21255 21256 /* 21257 * Function: sd_check_media 21258 * 21259 * Description: This utility routine implements the functionality for the 21260 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the 21261 * driver state changes from that specified by the user 21262 * (inserted or ejected). For example, if the user specifies 21263 * DKIO_EJECTED and the current media state is inserted this 21264 * routine will immediately return DKIO_INSERTED. However, if the 21265 * current media state is not inserted the user thread will be 21266 * blocked until the drive state changes. If DKIO_NONE is specified 21267 * the user thread will block until a drive state change occurs. 21268 * 21269 * Arguments: dev - the device number 21270 * state - user pointer to a dkio_state, updated with the current 21271 * drive state at return. 21272 * 21273 * Return Code: ENXIO 21274 * EIO 21275 * EAGAIN 21276 * EINTR 21277 */ 21278 21279 static int 21280 sd_check_media(dev_t dev, enum dkio_state state) 21281 { 21282 struct sd_lun *un = NULL; 21283 enum dkio_state prev_state; 21284 opaque_t token = NULL; 21285 int rval = 0; 21286 21287 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21288 return (ENXIO); 21289 } 21290 21291 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n"); 21292 21293 mutex_enter(SD_MUTEX(un)); 21294 21295 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: " 21296 "state=%x, mediastate=%x\n", state, un->un_mediastate); 21297 21298 prev_state = un->un_mediastate; 21299 21300 /* is there anything to do? */ 21301 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) { 21302 /* 21303 * submit the request to the scsi_watch service; 21304 * scsi_media_watch_cb() does the real work 21305 */ 21306 mutex_exit(SD_MUTEX(un)); 21307 21308 /* 21309 * This change handles the case where a scsi watch request is 21310 * added to a device that is powered down. To accomplish this 21311 * we power up the device before adding the scsi watch request, 21312 * since the scsi watch sends a TUR directly to the device 21313 * which the device cannot handle if it is powered down. 21314 */ 21315 if (sd_pm_entry(un) != DDI_SUCCESS) { 21316 mutex_enter(SD_MUTEX(un)); 21317 goto done; 21318 } 21319 21320 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), 21321 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 21322 (caddr_t)dev); 21323 21324 sd_pm_exit(un); 21325 21326 mutex_enter(SD_MUTEX(un)); 21327 if (token == NULL) { 21328 rval = EAGAIN; 21329 goto done; 21330 } 21331 21332 /* 21333 * This is a special case IOCTL that doesn't return 21334 * until the media state changes. Routine sdpower 21335 * knows about and handles this so don't count it 21336 * as an active cmd in the driver, which would 21337 * keep the device busy to the pm framework. 21338 * If the count isn't decremented the device can't 21339 * be powered down. 21340 */ 21341 un->un_ncmds_in_driver--; 21342 ASSERT(un->un_ncmds_in_driver >= 0); 21343 21344 /* 21345 * if a prior request had been made, this will be the same 21346 * token, as scsi_watch was designed that way. 21347 */ 21348 un->un_swr_token = token; 21349 un->un_specified_mediastate = state; 21350 21351 /* 21352 * now wait for media change 21353 * we will not be signalled unless mediastate == state but it is 21354 * still better to test for this condition, since there is a 21355 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED 21356 */ 21357 SD_TRACE(SD_LOG_COMMON, un, 21358 "sd_check_media: waiting for media state change\n"); 21359 while (un->un_mediastate == state) { 21360 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) { 21361 SD_TRACE(SD_LOG_COMMON, un, 21362 "sd_check_media: waiting for media state " 21363 "was interrupted\n"); 21364 un->un_ncmds_in_driver++; 21365 rval = EINTR; 21366 goto done; 21367 } 21368 SD_TRACE(SD_LOG_COMMON, un, 21369 "sd_check_media: received signal, state=%x\n", 21370 un->un_mediastate); 21371 } 21372 /* 21373 * Inc the counter to indicate the device once again 21374 * has an active outstanding cmd. 21375 */ 21376 un->un_ncmds_in_driver++; 21377 } 21378 21379 /* invalidate geometry */ 21380 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) { 21381 sr_ejected(un); 21382 } 21383 21384 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) { 21385 uint64_t capacity; 21386 uint_t lbasize; 21387 21388 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n"); 21389 mutex_exit(SD_MUTEX(un)); 21390 /* 21391 * Since the following routines use SD_PATH_DIRECT, we must 21392 * call PM directly before the upcoming disk accesses. This 21393 * may cause the disk to be power/spin up. 21394 */ 21395 21396 if (sd_pm_entry(un) == DDI_SUCCESS) { 21397 rval = sd_send_scsi_READ_CAPACITY(un, 21398 &capacity, 21399 &lbasize, SD_PATH_DIRECT); 21400 if (rval != 0) { 21401 sd_pm_exit(un); 21402 mutex_enter(SD_MUTEX(un)); 21403 goto done; 21404 } 21405 } else { 21406 rval = EIO; 21407 mutex_enter(SD_MUTEX(un)); 21408 goto done; 21409 } 21410 mutex_enter(SD_MUTEX(un)); 21411 21412 sd_update_block_info(un, lbasize, capacity); 21413 21414 /* 21415 * Check if the media in the device is writable or not 21416 */ 21417 if (ISCD(un)) 21418 sd_check_for_writable_cd(un, SD_PATH_DIRECT); 21419 21420 mutex_exit(SD_MUTEX(un)); 21421 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 21422 if ((cmlb_validate(un->un_cmlbhandle, 0, 21423 (void *)SD_PATH_DIRECT) == 0) && un->un_f_pkstats_enabled) { 21424 sd_set_pstats(un); 21425 SD_TRACE(SD_LOG_IO_PARTITION, un, 21426 "sd_check_media: un:0x%p pstats created and " 21427 "set\n", un); 21428 } 21429 21430 rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 21431 SD_PATH_DIRECT); 21432 sd_pm_exit(un); 21433 21434 mutex_enter(SD_MUTEX(un)); 21435 } 21436 done: 21437 un->un_f_watcht_stopped = FALSE; 21438 /* 21439 * Use of this local token and the mutex ensures that we avoid 21440 * some race conditions associated with terminating the 21441 * scsi watch. 21442 */ 21443 if (token) { 21444 un->un_swr_token = (opaque_t)NULL; 21445 mutex_exit(SD_MUTEX(un)); 21446 (void) scsi_watch_request_terminate(token, 21447 SCSI_WATCH_TERMINATE_WAIT); 21448 mutex_enter(SD_MUTEX(un)); 21449 } 21450 21451 /* 21452 * Update the capacity kstat value, if no media previously 21453 * (capacity kstat is 0) and a media has been inserted 21454 * (un_f_blockcount_is_valid == TRUE) 21455 */ 21456 if (un->un_errstats) { 21457 struct sd_errstats *stp = NULL; 21458 21459 stp = (struct sd_errstats *)un->un_errstats->ks_data; 21460 if ((stp->sd_capacity.value.ui64 == 0) && 21461 (un->un_f_blockcount_is_valid == TRUE)) { 21462 stp->sd_capacity.value.ui64 = 21463 (uint64_t)((uint64_t)un->un_blockcount * 21464 un->un_sys_blocksize); 21465 } 21466 } 21467 mutex_exit(SD_MUTEX(un)); 21468 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n"); 21469 return (rval); 21470 } 21471 21472 21473 /* 21474 * Function: sd_delayed_cv_broadcast 21475 * 21476 * Description: Delayed cv_broadcast to allow for target to recover from media 21477 * insertion. 21478 * 21479 * Arguments: arg - driver soft state (unit) structure 21480 */ 21481 21482 static void 21483 sd_delayed_cv_broadcast(void *arg) 21484 { 21485 struct sd_lun *un = arg; 21486 21487 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n"); 21488 21489 mutex_enter(SD_MUTEX(un)); 21490 un->un_dcvb_timeid = NULL; 21491 cv_broadcast(&un->un_state_cv); 21492 mutex_exit(SD_MUTEX(un)); 21493 } 21494 21495 21496 /* 21497 * Function: sd_media_watch_cb 21498 * 21499 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This 21500 * routine processes the TUR sense data and updates the driver 21501 * state if a transition has occurred. The user thread 21502 * (sd_check_media) is then signalled. 21503 * 21504 * Arguments: arg - the device 'dev_t' is used for context to discriminate 21505 * among multiple watches that share this callback function 21506 * resultp - scsi watch facility result packet containing scsi 21507 * packet, status byte and sense data 21508 * 21509 * Return Code: 0 for success, -1 for failure 21510 */ 21511 21512 static int 21513 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 21514 { 21515 struct sd_lun *un; 21516 struct scsi_status *statusp = resultp->statusp; 21517 uint8_t *sensep = (uint8_t *)resultp->sensep; 21518 enum dkio_state state = DKIO_NONE; 21519 dev_t dev = (dev_t)arg; 21520 uchar_t actual_sense_length; 21521 uint8_t skey, asc, ascq; 21522 21523 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21524 return (-1); 21525 } 21526 actual_sense_length = resultp->actual_sense_length; 21527 21528 mutex_enter(SD_MUTEX(un)); 21529 SD_TRACE(SD_LOG_COMMON, un, 21530 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n", 21531 *((char *)statusp), (void *)sensep, actual_sense_length); 21532 21533 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) { 21534 un->un_mediastate = DKIO_DEV_GONE; 21535 cv_broadcast(&un->un_state_cv); 21536 mutex_exit(SD_MUTEX(un)); 21537 21538 return (0); 21539 } 21540 21541 /* 21542 * If there was a check condition then sensep points to valid sense data 21543 * If status was not a check condition but a reservation or busy status 21544 * then the new state is DKIO_NONE 21545 */ 21546 if (sensep != NULL) { 21547 skey = scsi_sense_key(sensep); 21548 asc = scsi_sense_asc(sensep); 21549 ascq = scsi_sense_ascq(sensep); 21550 21551 SD_INFO(SD_LOG_COMMON, un, 21552 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n", 21553 skey, asc, ascq); 21554 /* This routine only uses up to 13 bytes of sense data. */ 21555 if (actual_sense_length >= 13) { 21556 if (skey == KEY_UNIT_ATTENTION) { 21557 if (asc == 0x28) { 21558 state = DKIO_INSERTED; 21559 } 21560 } else if (skey == KEY_NOT_READY) { 21561 /* 21562 * if 02/04/02 means that the host 21563 * should send start command. Explicitly 21564 * leave the media state as is 21565 * (inserted) as the media is inserted 21566 * and host has stopped device for PM 21567 * reasons. Upon next true read/write 21568 * to this media will bring the 21569 * device to the right state good for 21570 * media access. 21571 */ 21572 if (asc == 0x3a) { 21573 state = DKIO_EJECTED; 21574 } else { 21575 /* 21576 * If the drive is busy with an 21577 * operation or long write, keep the 21578 * media in an inserted state. 21579 */ 21580 21581 if ((asc == 0x04) && 21582 ((ascq == 0x02) || 21583 (ascq == 0x07) || 21584 (ascq == 0x08))) { 21585 state = DKIO_INSERTED; 21586 } 21587 } 21588 } else if (skey == KEY_NO_SENSE) { 21589 if ((asc == 0x00) && (ascq == 0x00)) { 21590 /* 21591 * Sense Data 00/00/00 does not provide 21592 * any information about the state of 21593 * the media. Ignore it. 21594 */ 21595 mutex_exit(SD_MUTEX(un)); 21596 return (0); 21597 } 21598 } 21599 } 21600 } else if ((*((char *)statusp) == STATUS_GOOD) && 21601 (resultp->pkt->pkt_reason == CMD_CMPLT)) { 21602 state = DKIO_INSERTED; 21603 } 21604 21605 SD_TRACE(SD_LOG_COMMON, un, 21606 "sd_media_watch_cb: state=%x, specified=%x\n", 21607 state, un->un_specified_mediastate); 21608 21609 /* 21610 * now signal the waiting thread if this is *not* the specified state; 21611 * delay the signal if the state is DKIO_INSERTED to allow the target 21612 * to recover 21613 */ 21614 if (state != un->un_specified_mediastate) { 21615 un->un_mediastate = state; 21616 if (state == DKIO_INSERTED) { 21617 /* 21618 * delay the signal to give the drive a chance 21619 * to do what it apparently needs to do 21620 */ 21621 SD_TRACE(SD_LOG_COMMON, un, 21622 "sd_media_watch_cb: delayed cv_broadcast\n"); 21623 if (un->un_dcvb_timeid == NULL) { 21624 un->un_dcvb_timeid = 21625 timeout(sd_delayed_cv_broadcast, un, 21626 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY)); 21627 } 21628 } else { 21629 SD_TRACE(SD_LOG_COMMON, un, 21630 "sd_media_watch_cb: immediate cv_broadcast\n"); 21631 cv_broadcast(&un->un_state_cv); 21632 } 21633 } 21634 mutex_exit(SD_MUTEX(un)); 21635 return (0); 21636 } 21637 21638 21639 /* 21640 * Function: sd_dkio_get_temp 21641 * 21642 * Description: This routine is the driver entry point for handling ioctl 21643 * requests to get the disk temperature. 21644 * 21645 * Arguments: dev - the device number 21646 * arg - pointer to user provided dk_temperature structure. 21647 * flag - this argument is a pass through to ddi_copyxxx() 21648 * directly from the mode argument of ioctl(). 21649 * 21650 * Return Code: 0 21651 * EFAULT 21652 * ENXIO 21653 * EAGAIN 21654 */ 21655 21656 static int 21657 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag) 21658 { 21659 struct sd_lun *un = NULL; 21660 struct dk_temperature *dktemp = NULL; 21661 uchar_t *temperature_page; 21662 int rval = 0; 21663 int path_flag = SD_PATH_STANDARD; 21664 21665 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21666 return (ENXIO); 21667 } 21668 21669 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP); 21670 21671 /* copyin the disk temp argument to get the user flags */ 21672 if (ddi_copyin((void *)arg, dktemp, 21673 sizeof (struct dk_temperature), flag) != 0) { 21674 rval = EFAULT; 21675 goto done; 21676 } 21677 21678 /* Initialize the temperature to invalid. */ 21679 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 21680 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 21681 21682 /* 21683 * Note: Investigate removing the "bypass pm" semantic. 21684 * Can we just bypass PM always? 21685 */ 21686 if (dktemp->dkt_flags & DKT_BYPASS_PM) { 21687 path_flag = SD_PATH_DIRECT; 21688 ASSERT(!mutex_owned(&un->un_pm_mutex)); 21689 mutex_enter(&un->un_pm_mutex); 21690 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 21691 /* 21692 * If DKT_BYPASS_PM is set, and the drive happens to be 21693 * in low power mode, we can not wake it up, Need to 21694 * return EAGAIN. 21695 */ 21696 mutex_exit(&un->un_pm_mutex); 21697 rval = EAGAIN; 21698 goto done; 21699 } else { 21700 /* 21701 * Indicate to PM the device is busy. This is required 21702 * to avoid a race - i.e. the ioctl is issuing a 21703 * command and the pm framework brings down the device 21704 * to low power mode (possible power cut-off on some 21705 * platforms). 21706 */ 21707 mutex_exit(&un->un_pm_mutex); 21708 if (sd_pm_entry(un) != DDI_SUCCESS) { 21709 rval = EAGAIN; 21710 goto done; 21711 } 21712 } 21713 } 21714 21715 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP); 21716 21717 if ((rval = sd_send_scsi_LOG_SENSE(un, temperature_page, 21718 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag)) != 0) { 21719 goto done2; 21720 } 21721 21722 /* 21723 * For the current temperature verify that the parameter length is 0x02 21724 * and the parameter code is 0x00 21725 */ 21726 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) && 21727 (temperature_page[5] == 0x00)) { 21728 if (temperature_page[9] == 0xFF) { 21729 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 21730 } else { 21731 dktemp->dkt_cur_temp = (short)(temperature_page[9]); 21732 } 21733 } 21734 21735 /* 21736 * For the reference temperature verify that the parameter 21737 * length is 0x02 and the parameter code is 0x01 21738 */ 21739 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) && 21740 (temperature_page[11] == 0x01)) { 21741 if (temperature_page[15] == 0xFF) { 21742 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 21743 } else { 21744 dktemp->dkt_ref_temp = (short)(temperature_page[15]); 21745 } 21746 } 21747 21748 /* Do the copyout regardless of the temperature commands status. */ 21749 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature), 21750 flag) != 0) { 21751 rval = EFAULT; 21752 } 21753 21754 done2: 21755 if (path_flag == SD_PATH_DIRECT) { 21756 sd_pm_exit(un); 21757 } 21758 21759 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE); 21760 done: 21761 if (dktemp != NULL) { 21762 kmem_free(dktemp, sizeof (struct dk_temperature)); 21763 } 21764 21765 return (rval); 21766 } 21767 21768 21769 /* 21770 * Function: sd_log_page_supported 21771 * 21772 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of 21773 * supported log pages. 21774 * 21775 * Arguments: un - 21776 * log_page - 21777 * 21778 * Return Code: -1 - on error (log sense is optional and may not be supported). 21779 * 0 - log page not found. 21780 * 1 - log page found. 21781 */ 21782 21783 static int 21784 sd_log_page_supported(struct sd_lun *un, int log_page) 21785 { 21786 uchar_t *log_page_data; 21787 int i; 21788 int match = 0; 21789 int log_size; 21790 21791 log_page_data = kmem_zalloc(0xFF, KM_SLEEP); 21792 21793 if (sd_send_scsi_LOG_SENSE(un, log_page_data, 0xFF, 0, 0x01, 0, 21794 SD_PATH_DIRECT) != 0) { 21795 SD_ERROR(SD_LOG_COMMON, un, 21796 "sd_log_page_supported: failed log page retrieval\n"); 21797 kmem_free(log_page_data, 0xFF); 21798 return (-1); 21799 } 21800 log_size = log_page_data[3]; 21801 21802 /* 21803 * The list of supported log pages start from the fourth byte. Check 21804 * until we run out of log pages or a match is found. 21805 */ 21806 for (i = 4; (i < (log_size + 4)) && !match; i++) { 21807 if (log_page_data[i] == log_page) { 21808 match++; 21809 } 21810 } 21811 kmem_free(log_page_data, 0xFF); 21812 return (match); 21813 } 21814 21815 21816 /* 21817 * Function: sd_mhdioc_failfast 21818 * 21819 * Description: This routine is the driver entry point for handling ioctl 21820 * requests to enable/disable the multihost failfast option. 21821 * (MHIOCENFAILFAST) 21822 * 21823 * Arguments: dev - the device number 21824 * arg - user specified probing interval. 21825 * flag - this argument is a pass through to ddi_copyxxx() 21826 * directly from the mode argument of ioctl(). 21827 * 21828 * Return Code: 0 21829 * EFAULT 21830 * ENXIO 21831 */ 21832 21833 static int 21834 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag) 21835 { 21836 struct sd_lun *un = NULL; 21837 int mh_time; 21838 int rval = 0; 21839 21840 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21841 return (ENXIO); 21842 } 21843 21844 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag)) 21845 return (EFAULT); 21846 21847 if (mh_time) { 21848 mutex_enter(SD_MUTEX(un)); 21849 un->un_resvd_status |= SD_FAILFAST; 21850 mutex_exit(SD_MUTEX(un)); 21851 /* 21852 * If mh_time is INT_MAX, then this ioctl is being used for 21853 * SCSI-3 PGR purposes, and we don't need to spawn watch thread. 21854 */ 21855 if (mh_time != INT_MAX) { 21856 rval = sd_check_mhd(dev, mh_time); 21857 } 21858 } else { 21859 (void) sd_check_mhd(dev, 0); 21860 mutex_enter(SD_MUTEX(un)); 21861 un->un_resvd_status &= ~SD_FAILFAST; 21862 mutex_exit(SD_MUTEX(un)); 21863 } 21864 return (rval); 21865 } 21866 21867 21868 /* 21869 * Function: sd_mhdioc_takeown 21870 * 21871 * Description: This routine is the driver entry point for handling ioctl 21872 * requests to forcefully acquire exclusive access rights to the 21873 * multihost disk (MHIOCTKOWN). 21874 * 21875 * Arguments: dev - the device number 21876 * arg - user provided structure specifying the delay 21877 * parameters in milliseconds 21878 * flag - this argument is a pass through to ddi_copyxxx() 21879 * directly from the mode argument of ioctl(). 21880 * 21881 * Return Code: 0 21882 * EFAULT 21883 * ENXIO 21884 */ 21885 21886 static int 21887 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag) 21888 { 21889 struct sd_lun *un = NULL; 21890 struct mhioctkown *tkown = NULL; 21891 int rval = 0; 21892 21893 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21894 return (ENXIO); 21895 } 21896 21897 if (arg != NULL) { 21898 tkown = (struct mhioctkown *) 21899 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP); 21900 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag); 21901 if (rval != 0) { 21902 rval = EFAULT; 21903 goto error; 21904 } 21905 } 21906 21907 rval = sd_take_ownership(dev, tkown); 21908 mutex_enter(SD_MUTEX(un)); 21909 if (rval == 0) { 21910 un->un_resvd_status |= SD_RESERVE; 21911 if (tkown != NULL && tkown->reinstate_resv_delay != 0) { 21912 sd_reinstate_resv_delay = 21913 tkown->reinstate_resv_delay * 1000; 21914 } else { 21915 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 21916 } 21917 /* 21918 * Give the scsi_watch routine interval set by 21919 * the MHIOCENFAILFAST ioctl precedence here. 21920 */ 21921 if ((un->un_resvd_status & SD_FAILFAST) == 0) { 21922 mutex_exit(SD_MUTEX(un)); 21923 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000); 21924 SD_TRACE(SD_LOG_IOCTL_MHD, un, 21925 "sd_mhdioc_takeown : %d\n", 21926 sd_reinstate_resv_delay); 21927 } else { 21928 mutex_exit(SD_MUTEX(un)); 21929 } 21930 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY, 21931 sd_mhd_reset_notify_cb, (caddr_t)un); 21932 } else { 21933 un->un_resvd_status &= ~SD_RESERVE; 21934 mutex_exit(SD_MUTEX(un)); 21935 } 21936 21937 error: 21938 if (tkown != NULL) { 21939 kmem_free(tkown, sizeof (struct mhioctkown)); 21940 } 21941 return (rval); 21942 } 21943 21944 21945 /* 21946 * Function: sd_mhdioc_release 21947 * 21948 * Description: This routine is the driver entry point for handling ioctl 21949 * requests to release exclusive access rights to the multihost 21950 * disk (MHIOCRELEASE). 21951 * 21952 * Arguments: dev - the device number 21953 * 21954 * Return Code: 0 21955 * ENXIO 21956 */ 21957 21958 static int 21959 sd_mhdioc_release(dev_t dev) 21960 { 21961 struct sd_lun *un = NULL; 21962 timeout_id_t resvd_timeid_save; 21963 int resvd_status_save; 21964 int rval = 0; 21965 21966 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21967 return (ENXIO); 21968 } 21969 21970 mutex_enter(SD_MUTEX(un)); 21971 resvd_status_save = un->un_resvd_status; 21972 un->un_resvd_status &= 21973 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE); 21974 if (un->un_resvd_timeid) { 21975 resvd_timeid_save = un->un_resvd_timeid; 21976 un->un_resvd_timeid = NULL; 21977 mutex_exit(SD_MUTEX(un)); 21978 (void) untimeout(resvd_timeid_save); 21979 } else { 21980 mutex_exit(SD_MUTEX(un)); 21981 } 21982 21983 /* 21984 * destroy any pending timeout thread that may be attempting to 21985 * reinstate reservation on this device. 21986 */ 21987 sd_rmv_resv_reclaim_req(dev); 21988 21989 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) { 21990 mutex_enter(SD_MUTEX(un)); 21991 if ((un->un_mhd_token) && 21992 ((un->un_resvd_status & SD_FAILFAST) == 0)) { 21993 mutex_exit(SD_MUTEX(un)); 21994 (void) sd_check_mhd(dev, 0); 21995 } else { 21996 mutex_exit(SD_MUTEX(un)); 21997 } 21998 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 21999 sd_mhd_reset_notify_cb, (caddr_t)un); 22000 } else { 22001 /* 22002 * sd_mhd_watch_cb will restart the resvd recover timeout thread 22003 */ 22004 mutex_enter(SD_MUTEX(un)); 22005 un->un_resvd_status = resvd_status_save; 22006 mutex_exit(SD_MUTEX(un)); 22007 } 22008 return (rval); 22009 } 22010 22011 22012 /* 22013 * Function: sd_mhdioc_register_devid 22014 * 22015 * Description: This routine is the driver entry point for handling ioctl 22016 * requests to register the device id (MHIOCREREGISTERDEVID). 22017 * 22018 * Note: The implementation for this ioctl has been updated to 22019 * be consistent with the original PSARC case (1999/357) 22020 * (4375899, 4241671, 4220005) 22021 * 22022 * Arguments: dev - the device number 22023 * 22024 * Return Code: 0 22025 * ENXIO 22026 */ 22027 22028 static int 22029 sd_mhdioc_register_devid(dev_t dev) 22030 { 22031 struct sd_lun *un = NULL; 22032 int rval = 0; 22033 22034 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22035 return (ENXIO); 22036 } 22037 22038 ASSERT(!mutex_owned(SD_MUTEX(un))); 22039 22040 mutex_enter(SD_MUTEX(un)); 22041 22042 /* If a devid already exists, de-register it */ 22043 if (un->un_devid != NULL) { 22044 ddi_devid_unregister(SD_DEVINFO(un)); 22045 /* 22046 * After unregister devid, needs to free devid memory 22047 */ 22048 ddi_devid_free(un->un_devid); 22049 un->un_devid = NULL; 22050 } 22051 22052 /* Check for reservation conflict */ 22053 mutex_exit(SD_MUTEX(un)); 22054 rval = sd_send_scsi_TEST_UNIT_READY(un, 0); 22055 mutex_enter(SD_MUTEX(un)); 22056 22057 switch (rval) { 22058 case 0: 22059 sd_register_devid(un, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED); 22060 break; 22061 case EACCES: 22062 break; 22063 default: 22064 rval = EIO; 22065 } 22066 22067 mutex_exit(SD_MUTEX(un)); 22068 return (rval); 22069 } 22070 22071 22072 /* 22073 * Function: sd_mhdioc_inkeys 22074 * 22075 * Description: This routine is the driver entry point for handling ioctl 22076 * requests to issue the SCSI-3 Persistent In Read Keys command 22077 * to the device (MHIOCGRP_INKEYS). 22078 * 22079 * Arguments: dev - the device number 22080 * arg - user provided in_keys structure 22081 * flag - this argument is a pass through to ddi_copyxxx() 22082 * directly from the mode argument of ioctl(). 22083 * 22084 * Return Code: code returned by sd_persistent_reservation_in_read_keys() 22085 * ENXIO 22086 * EFAULT 22087 */ 22088 22089 static int 22090 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag) 22091 { 22092 struct sd_lun *un; 22093 mhioc_inkeys_t inkeys; 22094 int rval = 0; 22095 22096 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22097 return (ENXIO); 22098 } 22099 22100 #ifdef _MULTI_DATAMODEL 22101 switch (ddi_model_convert_from(flag & FMODELS)) { 22102 case DDI_MODEL_ILP32: { 22103 struct mhioc_inkeys32 inkeys32; 22104 22105 if (ddi_copyin(arg, &inkeys32, 22106 sizeof (struct mhioc_inkeys32), flag) != 0) { 22107 return (EFAULT); 22108 } 22109 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li; 22110 if ((rval = sd_persistent_reservation_in_read_keys(un, 22111 &inkeys, flag)) != 0) { 22112 return (rval); 22113 } 22114 inkeys32.generation = inkeys.generation; 22115 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32), 22116 flag) != 0) { 22117 return (EFAULT); 22118 } 22119 break; 22120 } 22121 case DDI_MODEL_NONE: 22122 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), 22123 flag) != 0) { 22124 return (EFAULT); 22125 } 22126 if ((rval = sd_persistent_reservation_in_read_keys(un, 22127 &inkeys, flag)) != 0) { 22128 return (rval); 22129 } 22130 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), 22131 flag) != 0) { 22132 return (EFAULT); 22133 } 22134 break; 22135 } 22136 22137 #else /* ! _MULTI_DATAMODEL */ 22138 22139 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) { 22140 return (EFAULT); 22141 } 22142 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag); 22143 if (rval != 0) { 22144 return (rval); 22145 } 22146 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) { 22147 return (EFAULT); 22148 } 22149 22150 #endif /* _MULTI_DATAMODEL */ 22151 22152 return (rval); 22153 } 22154 22155 22156 /* 22157 * Function: sd_mhdioc_inresv 22158 * 22159 * Description: This routine is the driver entry point for handling ioctl 22160 * requests to issue the SCSI-3 Persistent In Read Reservations 22161 * command to the device (MHIOCGRP_INKEYS). 22162 * 22163 * Arguments: dev - the device number 22164 * arg - user provided in_resv structure 22165 * flag - this argument is a pass through to ddi_copyxxx() 22166 * directly from the mode argument of ioctl(). 22167 * 22168 * Return Code: code returned by sd_persistent_reservation_in_read_resv() 22169 * ENXIO 22170 * EFAULT 22171 */ 22172 22173 static int 22174 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag) 22175 { 22176 struct sd_lun *un; 22177 mhioc_inresvs_t inresvs; 22178 int rval = 0; 22179 22180 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22181 return (ENXIO); 22182 } 22183 22184 #ifdef _MULTI_DATAMODEL 22185 22186 switch (ddi_model_convert_from(flag & FMODELS)) { 22187 case DDI_MODEL_ILP32: { 22188 struct mhioc_inresvs32 inresvs32; 22189 22190 if (ddi_copyin(arg, &inresvs32, 22191 sizeof (struct mhioc_inresvs32), flag) != 0) { 22192 return (EFAULT); 22193 } 22194 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li; 22195 if ((rval = sd_persistent_reservation_in_read_resv(un, 22196 &inresvs, flag)) != 0) { 22197 return (rval); 22198 } 22199 inresvs32.generation = inresvs.generation; 22200 if (ddi_copyout(&inresvs32, arg, 22201 sizeof (struct mhioc_inresvs32), flag) != 0) { 22202 return (EFAULT); 22203 } 22204 break; 22205 } 22206 case DDI_MODEL_NONE: 22207 if (ddi_copyin(arg, &inresvs, 22208 sizeof (mhioc_inresvs_t), flag) != 0) { 22209 return (EFAULT); 22210 } 22211 if ((rval = sd_persistent_reservation_in_read_resv(un, 22212 &inresvs, flag)) != 0) { 22213 return (rval); 22214 } 22215 if (ddi_copyout(&inresvs, arg, 22216 sizeof (mhioc_inresvs_t), flag) != 0) { 22217 return (EFAULT); 22218 } 22219 break; 22220 } 22221 22222 #else /* ! _MULTI_DATAMODEL */ 22223 22224 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) { 22225 return (EFAULT); 22226 } 22227 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag); 22228 if (rval != 0) { 22229 return (rval); 22230 } 22231 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) { 22232 return (EFAULT); 22233 } 22234 22235 #endif /* ! _MULTI_DATAMODEL */ 22236 22237 return (rval); 22238 } 22239 22240 22241 /* 22242 * The following routines support the clustering functionality described below 22243 * and implement lost reservation reclaim functionality. 22244 * 22245 * Clustering 22246 * ---------- 22247 * The clustering code uses two different, independent forms of SCSI 22248 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3 22249 * Persistent Group Reservations. For any particular disk, it will use either 22250 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk. 22251 * 22252 * SCSI-2 22253 * The cluster software takes ownership of a multi-hosted disk by issuing the 22254 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the 22255 * MHIOCRELEASE ioctl. Closely related is the MHIOCENFAILFAST ioctl -- a 22256 * cluster, just after taking ownership of the disk with the MHIOCTKOWN ioctl 22257 * then issues the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the 22258 * driver. The meaning of failfast is that if the driver (on this host) ever 22259 * encounters the scsi error return code RESERVATION_CONFLICT from the device, 22260 * it should immediately panic the host. The motivation for this ioctl is that 22261 * if this host does encounter reservation conflict, the underlying cause is 22262 * that some other host of the cluster has decided that this host is no longer 22263 * in the cluster and has seized control of the disks for itself. Since this 22264 * host is no longer in the cluster, it ought to panic itself. The 22265 * MHIOCENFAILFAST ioctl does two things: 22266 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT 22267 * error to panic the host 22268 * (b) it sets up a periodic timer to test whether this host still has 22269 * "access" (in that no other host has reserved the device): if the 22270 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The 22271 * purpose of that periodic timer is to handle scenarios where the host is 22272 * otherwise temporarily quiescent, temporarily doing no real i/o. 22273 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host, 22274 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for 22275 * the device itself. 22276 * 22277 * SCSI-3 PGR 22278 * A direct semantic implementation of the SCSI-3 Persistent Reservation 22279 * facility is supported through the shared multihost disk ioctls 22280 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE, 22281 * MHIOCGRP_PREEMPTANDABORT) 22282 * 22283 * Reservation Reclaim: 22284 * -------------------- 22285 * To support the lost reservation reclaim operations this driver creates a 22286 * single thread to handle reinstating reservations on all devices that have 22287 * lost reservations sd_resv_reclaim_requests are logged for all devices that 22288 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb 22289 * and the reservation reclaim thread loops through the requests to regain the 22290 * lost reservations. 22291 */ 22292 22293 /* 22294 * Function: sd_check_mhd() 22295 * 22296 * Description: This function sets up and submits a scsi watch request or 22297 * terminates an existing watch request. This routine is used in 22298 * support of reservation reclaim. 22299 * 22300 * Arguments: dev - the device 'dev_t' is used for context to discriminate 22301 * among multiple watches that share the callback function 22302 * interval - the number of microseconds specifying the watch 22303 * interval for issuing TEST UNIT READY commands. If 22304 * set to 0 the watch should be terminated. If the 22305 * interval is set to 0 and if the device is required 22306 * to hold reservation while disabling failfast, the 22307 * watch is restarted with an interval of 22308 * reinstate_resv_delay. 22309 * 22310 * Return Code: 0 - Successful submit/terminate of scsi watch request 22311 * ENXIO - Indicates an invalid device was specified 22312 * EAGAIN - Unable to submit the scsi watch request 22313 */ 22314 22315 static int 22316 sd_check_mhd(dev_t dev, int interval) 22317 { 22318 struct sd_lun *un; 22319 opaque_t token; 22320 22321 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22322 return (ENXIO); 22323 } 22324 22325 /* is this a watch termination request? */ 22326 if (interval == 0) { 22327 mutex_enter(SD_MUTEX(un)); 22328 /* if there is an existing watch task then terminate it */ 22329 if (un->un_mhd_token) { 22330 token = un->un_mhd_token; 22331 un->un_mhd_token = NULL; 22332 mutex_exit(SD_MUTEX(un)); 22333 (void) scsi_watch_request_terminate(token, 22334 SCSI_WATCH_TERMINATE_ALL_WAIT); 22335 mutex_enter(SD_MUTEX(un)); 22336 } else { 22337 mutex_exit(SD_MUTEX(un)); 22338 /* 22339 * Note: If we return here we don't check for the 22340 * failfast case. This is the original legacy 22341 * implementation but perhaps we should be checking 22342 * the failfast case. 22343 */ 22344 return (0); 22345 } 22346 /* 22347 * If the device is required to hold reservation while 22348 * disabling failfast, we need to restart the scsi_watch 22349 * routine with an interval of reinstate_resv_delay. 22350 */ 22351 if (un->un_resvd_status & SD_RESERVE) { 22352 interval = sd_reinstate_resv_delay/1000; 22353 } else { 22354 /* no failfast so bail */ 22355 mutex_exit(SD_MUTEX(un)); 22356 return (0); 22357 } 22358 mutex_exit(SD_MUTEX(un)); 22359 } 22360 22361 /* 22362 * adjust minimum time interval to 1 second, 22363 * and convert from msecs to usecs 22364 */ 22365 if (interval > 0 && interval < 1000) { 22366 interval = 1000; 22367 } 22368 interval *= 1000; 22369 22370 /* 22371 * submit the request to the scsi_watch service 22372 */ 22373 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval, 22374 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev); 22375 if (token == NULL) { 22376 return (EAGAIN); 22377 } 22378 22379 /* 22380 * save token for termination later on 22381 */ 22382 mutex_enter(SD_MUTEX(un)); 22383 un->un_mhd_token = token; 22384 mutex_exit(SD_MUTEX(un)); 22385 return (0); 22386 } 22387 22388 22389 /* 22390 * Function: sd_mhd_watch_cb() 22391 * 22392 * Description: This function is the call back function used by the scsi watch 22393 * facility. The scsi watch facility sends the "Test Unit Ready" 22394 * and processes the status. If applicable (i.e. a "Unit Attention" 22395 * status and automatic "Request Sense" not used) the scsi watch 22396 * facility will send a "Request Sense" and retrieve the sense data 22397 * to be passed to this callback function. In either case the 22398 * automatic "Request Sense" or the facility submitting one, this 22399 * callback is passed the status and sense data. 22400 * 22401 * Arguments: arg - the device 'dev_t' is used for context to discriminate 22402 * among multiple watches that share this callback function 22403 * resultp - scsi watch facility result packet containing scsi 22404 * packet, status byte and sense data 22405 * 22406 * Return Code: 0 - continue the watch task 22407 * non-zero - terminate the watch task 22408 */ 22409 22410 static int 22411 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 22412 { 22413 struct sd_lun *un; 22414 struct scsi_status *statusp; 22415 uint8_t *sensep; 22416 struct scsi_pkt *pkt; 22417 uchar_t actual_sense_length; 22418 dev_t dev = (dev_t)arg; 22419 22420 ASSERT(resultp != NULL); 22421 statusp = resultp->statusp; 22422 sensep = (uint8_t *)resultp->sensep; 22423 pkt = resultp->pkt; 22424 actual_sense_length = resultp->actual_sense_length; 22425 22426 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22427 return (ENXIO); 22428 } 22429 22430 SD_TRACE(SD_LOG_IOCTL_MHD, un, 22431 "sd_mhd_watch_cb: reason '%s', status '%s'\n", 22432 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp))); 22433 22434 /* Begin processing of the status and/or sense data */ 22435 if (pkt->pkt_reason != CMD_CMPLT) { 22436 /* Handle the incomplete packet */ 22437 sd_mhd_watch_incomplete(un, pkt); 22438 return (0); 22439 } else if (*((unsigned char *)statusp) != STATUS_GOOD) { 22440 if (*((unsigned char *)statusp) 22441 == STATUS_RESERVATION_CONFLICT) { 22442 /* 22443 * Handle a reservation conflict by panicking if 22444 * configured for failfast or by logging the conflict 22445 * and updating the reservation status 22446 */ 22447 mutex_enter(SD_MUTEX(un)); 22448 if ((un->un_resvd_status & SD_FAILFAST) && 22449 (sd_failfast_enable)) { 22450 sd_panic_for_res_conflict(un); 22451 /*NOTREACHED*/ 22452 } 22453 SD_INFO(SD_LOG_IOCTL_MHD, un, 22454 "sd_mhd_watch_cb: Reservation Conflict\n"); 22455 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 22456 mutex_exit(SD_MUTEX(un)); 22457 } 22458 } 22459 22460 if (sensep != NULL) { 22461 if (actual_sense_length >= (SENSE_LENGTH - 2)) { 22462 mutex_enter(SD_MUTEX(un)); 22463 if ((scsi_sense_asc(sensep) == 22464 SD_SCSI_RESET_SENSE_CODE) && 22465 (un->un_resvd_status & SD_RESERVE)) { 22466 /* 22467 * The additional sense code indicates a power 22468 * on or bus device reset has occurred; update 22469 * the reservation status. 22470 */ 22471 un->un_resvd_status |= 22472 (SD_LOST_RESERVE | SD_WANT_RESERVE); 22473 SD_INFO(SD_LOG_IOCTL_MHD, un, 22474 "sd_mhd_watch_cb: Lost Reservation\n"); 22475 } 22476 } else { 22477 return (0); 22478 } 22479 } else { 22480 mutex_enter(SD_MUTEX(un)); 22481 } 22482 22483 if ((un->un_resvd_status & SD_RESERVE) && 22484 (un->un_resvd_status & SD_LOST_RESERVE)) { 22485 if (un->un_resvd_status & SD_WANT_RESERVE) { 22486 /* 22487 * A reset occurred in between the last probe and this 22488 * one so if a timeout is pending cancel it. 22489 */ 22490 if (un->un_resvd_timeid) { 22491 timeout_id_t temp_id = un->un_resvd_timeid; 22492 un->un_resvd_timeid = NULL; 22493 mutex_exit(SD_MUTEX(un)); 22494 (void) untimeout(temp_id); 22495 mutex_enter(SD_MUTEX(un)); 22496 } 22497 un->un_resvd_status &= ~SD_WANT_RESERVE; 22498 } 22499 if (un->un_resvd_timeid == 0) { 22500 /* Schedule a timeout to handle the lost reservation */ 22501 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover, 22502 (void *)dev, 22503 drv_usectohz(sd_reinstate_resv_delay)); 22504 } 22505 } 22506 mutex_exit(SD_MUTEX(un)); 22507 return (0); 22508 } 22509 22510 22511 /* 22512 * Function: sd_mhd_watch_incomplete() 22513 * 22514 * Description: This function is used to find out why a scsi pkt sent by the 22515 * scsi watch facility was not completed. Under some scenarios this 22516 * routine will return. Otherwise it will send a bus reset to see 22517 * if the drive is still online. 22518 * 22519 * Arguments: un - driver soft state (unit) structure 22520 * pkt - incomplete scsi pkt 22521 */ 22522 22523 static void 22524 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt) 22525 { 22526 int be_chatty; 22527 int perr; 22528 22529 ASSERT(pkt != NULL); 22530 ASSERT(un != NULL); 22531 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT)); 22532 perr = (pkt->pkt_statistics & STAT_PERR); 22533 22534 mutex_enter(SD_MUTEX(un)); 22535 if (un->un_state == SD_STATE_DUMPING) { 22536 mutex_exit(SD_MUTEX(un)); 22537 return; 22538 } 22539 22540 switch (pkt->pkt_reason) { 22541 case CMD_UNX_BUS_FREE: 22542 /* 22543 * If we had a parity error that caused the target to drop BSY*, 22544 * don't be chatty about it. 22545 */ 22546 if (perr && be_chatty) { 22547 be_chatty = 0; 22548 } 22549 break; 22550 case CMD_TAG_REJECT: 22551 /* 22552 * The SCSI-2 spec states that a tag reject will be sent by the 22553 * target if tagged queuing is not supported. A tag reject may 22554 * also be sent during certain initialization periods or to 22555 * control internal resources. For the latter case the target 22556 * may also return Queue Full. 22557 * 22558 * If this driver receives a tag reject from a target that is 22559 * going through an init period or controlling internal 22560 * resources tagged queuing will be disabled. This is a less 22561 * than optimal behavior but the driver is unable to determine 22562 * the target state and assumes tagged queueing is not supported 22563 */ 22564 pkt->pkt_flags = 0; 22565 un->un_tagflags = 0; 22566 22567 if (un->un_f_opt_queueing == TRUE) { 22568 un->un_throttle = min(un->un_throttle, 3); 22569 } else { 22570 un->un_throttle = 1; 22571 } 22572 mutex_exit(SD_MUTEX(un)); 22573 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 22574 mutex_enter(SD_MUTEX(un)); 22575 break; 22576 case CMD_INCOMPLETE: 22577 /* 22578 * The transport stopped with an abnormal state, fallthrough and 22579 * reset the target and/or bus unless selection did not complete 22580 * (indicated by STATE_GOT_BUS) in which case we don't want to 22581 * go through a target/bus reset 22582 */ 22583 if (pkt->pkt_state == STATE_GOT_BUS) { 22584 break; 22585 } 22586 /*FALLTHROUGH*/ 22587 22588 case CMD_TIMEOUT: 22589 default: 22590 /* 22591 * The lun may still be running the command, so a lun reset 22592 * should be attempted. If the lun reset fails or cannot be 22593 * issued, than try a target reset. Lastly try a bus reset. 22594 */ 22595 if ((pkt->pkt_statistics & 22596 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 22597 int reset_retval = 0; 22598 mutex_exit(SD_MUTEX(un)); 22599 if (un->un_f_allow_bus_device_reset == TRUE) { 22600 if (un->un_f_lun_reset_enabled == TRUE) { 22601 reset_retval = 22602 scsi_reset(SD_ADDRESS(un), 22603 RESET_LUN); 22604 } 22605 if (reset_retval == 0) { 22606 reset_retval = 22607 scsi_reset(SD_ADDRESS(un), 22608 RESET_TARGET); 22609 } 22610 } 22611 if (reset_retval == 0) { 22612 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 22613 } 22614 mutex_enter(SD_MUTEX(un)); 22615 } 22616 break; 22617 } 22618 22619 /* A device/bus reset has occurred; update the reservation status. */ 22620 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics & 22621 (STAT_BUS_RESET | STAT_DEV_RESET))) { 22622 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 22623 un->un_resvd_status |= 22624 (SD_LOST_RESERVE | SD_WANT_RESERVE); 22625 SD_INFO(SD_LOG_IOCTL_MHD, un, 22626 "sd_mhd_watch_incomplete: Lost Reservation\n"); 22627 } 22628 } 22629 22630 /* 22631 * The disk has been turned off; Update the device state. 22632 * 22633 * Note: Should we be offlining the disk here? 22634 */ 22635 if (pkt->pkt_state == STATE_GOT_BUS) { 22636 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: " 22637 "Disk not responding to selection\n"); 22638 if (un->un_state != SD_STATE_OFFLINE) { 22639 New_state(un, SD_STATE_OFFLINE); 22640 } 22641 } else if (be_chatty) { 22642 /* 22643 * suppress messages if they are all the same pkt reason; 22644 * with TQ, many (up to 256) are returned with the same 22645 * pkt_reason 22646 */ 22647 if (pkt->pkt_reason != un->un_last_pkt_reason) { 22648 SD_ERROR(SD_LOG_IOCTL_MHD, un, 22649 "sd_mhd_watch_incomplete: " 22650 "SCSI transport failed: reason '%s'\n", 22651 scsi_rname(pkt->pkt_reason)); 22652 } 22653 } 22654 un->un_last_pkt_reason = pkt->pkt_reason; 22655 mutex_exit(SD_MUTEX(un)); 22656 } 22657 22658 22659 /* 22660 * Function: sd_sname() 22661 * 22662 * Description: This is a simple little routine to return a string containing 22663 * a printable description of command status byte for use in 22664 * logging. 22665 * 22666 * Arguments: status - pointer to a status byte 22667 * 22668 * Return Code: char * - string containing status description. 22669 */ 22670 22671 static char * 22672 sd_sname(uchar_t status) 22673 { 22674 switch (status & STATUS_MASK) { 22675 case STATUS_GOOD: 22676 return ("good status"); 22677 case STATUS_CHECK: 22678 return ("check condition"); 22679 case STATUS_MET: 22680 return ("condition met"); 22681 case STATUS_BUSY: 22682 return ("busy"); 22683 case STATUS_INTERMEDIATE: 22684 return ("intermediate"); 22685 case STATUS_INTERMEDIATE_MET: 22686 return ("intermediate - condition met"); 22687 case STATUS_RESERVATION_CONFLICT: 22688 return ("reservation_conflict"); 22689 case STATUS_TERMINATED: 22690 return ("command terminated"); 22691 case STATUS_QFULL: 22692 return ("queue full"); 22693 default: 22694 return ("<unknown status>"); 22695 } 22696 } 22697 22698 22699 /* 22700 * Function: sd_mhd_resvd_recover() 22701 * 22702 * Description: This function adds a reservation entry to the 22703 * sd_resv_reclaim_request list and signals the reservation 22704 * reclaim thread that there is work pending. If the reservation 22705 * reclaim thread has not been previously created this function 22706 * will kick it off. 22707 * 22708 * Arguments: arg - the device 'dev_t' is used for context to discriminate 22709 * among multiple watches that share this callback function 22710 * 22711 * Context: This routine is called by timeout() and is run in interrupt 22712 * context. It must not sleep or call other functions which may 22713 * sleep. 22714 */ 22715 22716 static void 22717 sd_mhd_resvd_recover(void *arg) 22718 { 22719 dev_t dev = (dev_t)arg; 22720 struct sd_lun *un; 22721 struct sd_thr_request *sd_treq = NULL; 22722 struct sd_thr_request *sd_cur = NULL; 22723 struct sd_thr_request *sd_prev = NULL; 22724 int already_there = 0; 22725 22726 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22727 return; 22728 } 22729 22730 mutex_enter(SD_MUTEX(un)); 22731 un->un_resvd_timeid = NULL; 22732 if (un->un_resvd_status & SD_WANT_RESERVE) { 22733 /* 22734 * There was a reset so don't issue the reserve, allow the 22735 * sd_mhd_watch_cb callback function to notice this and 22736 * reschedule the timeout for reservation. 22737 */ 22738 mutex_exit(SD_MUTEX(un)); 22739 return; 22740 } 22741 mutex_exit(SD_MUTEX(un)); 22742 22743 /* 22744 * Add this device to the sd_resv_reclaim_request list and the 22745 * sd_resv_reclaim_thread should take care of the rest. 22746 * 22747 * Note: We can't sleep in this context so if the memory allocation 22748 * fails allow the sd_mhd_watch_cb callback function to notice this and 22749 * reschedule the timeout for reservation. (4378460) 22750 */ 22751 sd_treq = (struct sd_thr_request *) 22752 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP); 22753 if (sd_treq == NULL) { 22754 return; 22755 } 22756 22757 sd_treq->sd_thr_req_next = NULL; 22758 sd_treq->dev = dev; 22759 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22760 if (sd_tr.srq_thr_req_head == NULL) { 22761 sd_tr.srq_thr_req_head = sd_treq; 22762 } else { 22763 sd_cur = sd_prev = sd_tr.srq_thr_req_head; 22764 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) { 22765 if (sd_cur->dev == dev) { 22766 /* 22767 * already in Queue so don't log 22768 * another request for the device 22769 */ 22770 already_there = 1; 22771 break; 22772 } 22773 sd_prev = sd_cur; 22774 } 22775 if (!already_there) { 22776 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: " 22777 "logging request for %lx\n", dev); 22778 sd_prev->sd_thr_req_next = sd_treq; 22779 } else { 22780 kmem_free(sd_treq, sizeof (struct sd_thr_request)); 22781 } 22782 } 22783 22784 /* 22785 * Create a kernel thread to do the reservation reclaim and free up this 22786 * thread. We cannot block this thread while we go away to do the 22787 * reservation reclaim 22788 */ 22789 if (sd_tr.srq_resv_reclaim_thread == NULL) 22790 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0, 22791 sd_resv_reclaim_thread, NULL, 22792 0, &p0, TS_RUN, v.v_maxsyspri - 2); 22793 22794 /* Tell the reservation reclaim thread that it has work to do */ 22795 cv_signal(&sd_tr.srq_resv_reclaim_cv); 22796 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22797 } 22798 22799 /* 22800 * Function: sd_resv_reclaim_thread() 22801 * 22802 * Description: This function implements the reservation reclaim operations 22803 * 22804 * Arguments: arg - the device 'dev_t' is used for context to discriminate 22805 * among multiple watches that share this callback function 22806 */ 22807 22808 static void 22809 sd_resv_reclaim_thread() 22810 { 22811 struct sd_lun *un; 22812 struct sd_thr_request *sd_mhreq; 22813 22814 /* Wait for work */ 22815 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22816 if (sd_tr.srq_thr_req_head == NULL) { 22817 cv_wait(&sd_tr.srq_resv_reclaim_cv, 22818 &sd_tr.srq_resv_reclaim_mutex); 22819 } 22820 22821 /* Loop while we have work */ 22822 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) { 22823 un = ddi_get_soft_state(sd_state, 22824 SDUNIT(sd_tr.srq_thr_cur_req->dev)); 22825 if (un == NULL) { 22826 /* 22827 * softstate structure is NULL so just 22828 * dequeue the request and continue 22829 */ 22830 sd_tr.srq_thr_req_head = 22831 sd_tr.srq_thr_cur_req->sd_thr_req_next; 22832 kmem_free(sd_tr.srq_thr_cur_req, 22833 sizeof (struct sd_thr_request)); 22834 continue; 22835 } 22836 22837 /* dequeue the request */ 22838 sd_mhreq = sd_tr.srq_thr_cur_req; 22839 sd_tr.srq_thr_req_head = 22840 sd_tr.srq_thr_cur_req->sd_thr_req_next; 22841 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22842 22843 /* 22844 * Reclaim reservation only if SD_RESERVE is still set. There 22845 * may have been a call to MHIOCRELEASE before we got here. 22846 */ 22847 mutex_enter(SD_MUTEX(un)); 22848 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 22849 /* 22850 * Note: The SD_LOST_RESERVE flag is cleared before 22851 * reclaiming the reservation. If this is done after the 22852 * call to sd_reserve_release a reservation loss in the 22853 * window between pkt completion of reserve cmd and 22854 * mutex_enter below may not be recognized 22855 */ 22856 un->un_resvd_status &= ~SD_LOST_RESERVE; 22857 mutex_exit(SD_MUTEX(un)); 22858 22859 if (sd_reserve_release(sd_mhreq->dev, 22860 SD_RESERVE) == 0) { 22861 mutex_enter(SD_MUTEX(un)); 22862 un->un_resvd_status |= SD_RESERVE; 22863 mutex_exit(SD_MUTEX(un)); 22864 SD_INFO(SD_LOG_IOCTL_MHD, un, 22865 "sd_resv_reclaim_thread: " 22866 "Reservation Recovered\n"); 22867 } else { 22868 mutex_enter(SD_MUTEX(un)); 22869 un->un_resvd_status |= SD_LOST_RESERVE; 22870 mutex_exit(SD_MUTEX(un)); 22871 SD_INFO(SD_LOG_IOCTL_MHD, un, 22872 "sd_resv_reclaim_thread: Failed " 22873 "Reservation Recovery\n"); 22874 } 22875 } else { 22876 mutex_exit(SD_MUTEX(un)); 22877 } 22878 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22879 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req); 22880 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22881 sd_mhreq = sd_tr.srq_thr_cur_req = NULL; 22882 /* 22883 * wakeup the destroy thread if anyone is waiting on 22884 * us to complete. 22885 */ 22886 cv_signal(&sd_tr.srq_inprocess_cv); 22887 SD_TRACE(SD_LOG_IOCTL_MHD, un, 22888 "sd_resv_reclaim_thread: cv_signalling current request \n"); 22889 } 22890 22891 /* 22892 * cleanup the sd_tr structure now that this thread will not exist 22893 */ 22894 ASSERT(sd_tr.srq_thr_req_head == NULL); 22895 ASSERT(sd_tr.srq_thr_cur_req == NULL); 22896 sd_tr.srq_resv_reclaim_thread = NULL; 22897 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22898 thread_exit(); 22899 } 22900 22901 22902 /* 22903 * Function: sd_rmv_resv_reclaim_req() 22904 * 22905 * Description: This function removes any pending reservation reclaim requests 22906 * for the specified device. 22907 * 22908 * Arguments: dev - the device 'dev_t' 22909 */ 22910 22911 static void 22912 sd_rmv_resv_reclaim_req(dev_t dev) 22913 { 22914 struct sd_thr_request *sd_mhreq; 22915 struct sd_thr_request *sd_prev; 22916 22917 /* Remove a reservation reclaim request from the list */ 22918 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 22919 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) { 22920 /* 22921 * We are attempting to reinstate reservation for 22922 * this device. We wait for sd_reserve_release() 22923 * to return before we return. 22924 */ 22925 cv_wait(&sd_tr.srq_inprocess_cv, 22926 &sd_tr.srq_resv_reclaim_mutex); 22927 } else { 22928 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head; 22929 if (sd_mhreq && sd_mhreq->dev == dev) { 22930 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next; 22931 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22932 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22933 return; 22934 } 22935 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) { 22936 if (sd_mhreq && sd_mhreq->dev == dev) { 22937 break; 22938 } 22939 sd_prev = sd_mhreq; 22940 } 22941 if (sd_mhreq != NULL) { 22942 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next; 22943 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 22944 } 22945 } 22946 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 22947 } 22948 22949 22950 /* 22951 * Function: sd_mhd_reset_notify_cb() 22952 * 22953 * Description: This is a call back function for scsi_reset_notify. This 22954 * function updates the softstate reserved status and logs the 22955 * reset. The driver scsi watch facility callback function 22956 * (sd_mhd_watch_cb) and reservation reclaim thread functionality 22957 * will reclaim the reservation. 22958 * 22959 * Arguments: arg - driver soft state (unit) structure 22960 */ 22961 22962 static void 22963 sd_mhd_reset_notify_cb(caddr_t arg) 22964 { 22965 struct sd_lun *un = (struct sd_lun *)arg; 22966 22967 mutex_enter(SD_MUTEX(un)); 22968 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 22969 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE); 22970 SD_INFO(SD_LOG_IOCTL_MHD, un, 22971 "sd_mhd_reset_notify_cb: Lost Reservation\n"); 22972 } 22973 mutex_exit(SD_MUTEX(un)); 22974 } 22975 22976 22977 /* 22978 * Function: sd_take_ownership() 22979 * 22980 * Description: This routine implements an algorithm to achieve a stable 22981 * reservation on disks which don't implement priority reserve, 22982 * and makes sure that other host lose re-reservation attempts. 22983 * This algorithm contains of a loop that keeps issuing the RESERVE 22984 * for some period of time (min_ownership_delay, default 6 seconds) 22985 * During that loop, it looks to see if there has been a bus device 22986 * reset or bus reset (both of which cause an existing reservation 22987 * to be lost). If the reservation is lost issue RESERVE until a 22988 * period of min_ownership_delay with no resets has gone by, or 22989 * until max_ownership_delay has expired. This loop ensures that 22990 * the host really did manage to reserve the device, in spite of 22991 * resets. The looping for min_ownership_delay (default six 22992 * seconds) is important to early generation clustering products, 22993 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an 22994 * MHIOCENFAILFAST periodic timer of two seconds. By having 22995 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having 22996 * MHIOCENFAILFAST poll every two seconds, the idea is that by the 22997 * time the MHIOCTKOWN ioctl returns, the other host (if any) will 22998 * have already noticed, via the MHIOCENFAILFAST polling, that it 22999 * no longer "owns" the disk and will have panicked itself. Thus, 23000 * the host issuing the MHIOCTKOWN is assured (with timing 23001 * dependencies) that by the time it actually starts to use the 23002 * disk for real work, the old owner is no longer accessing it. 23003 * 23004 * min_ownership_delay is the minimum amount of time for which the 23005 * disk must be reserved continuously devoid of resets before the 23006 * MHIOCTKOWN ioctl will return success. 23007 * 23008 * max_ownership_delay indicates the amount of time by which the 23009 * take ownership should succeed or timeout with an error. 23010 * 23011 * Arguments: dev - the device 'dev_t' 23012 * *p - struct containing timing info. 23013 * 23014 * Return Code: 0 for success or error code 23015 */ 23016 23017 static int 23018 sd_take_ownership(dev_t dev, struct mhioctkown *p) 23019 { 23020 struct sd_lun *un; 23021 int rval; 23022 int err; 23023 int reservation_count = 0; 23024 int min_ownership_delay = 6000000; /* in usec */ 23025 int max_ownership_delay = 30000000; /* in usec */ 23026 clock_t start_time; /* starting time of this algorithm */ 23027 clock_t end_time; /* time limit for giving up */ 23028 clock_t ownership_time; /* time limit for stable ownership */ 23029 clock_t current_time; 23030 clock_t previous_current_time; 23031 23032 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23033 return (ENXIO); 23034 } 23035 23036 /* 23037 * Attempt a device reservation. A priority reservation is requested. 23038 */ 23039 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE)) 23040 != SD_SUCCESS) { 23041 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23042 "sd_take_ownership: return(1)=%d\n", rval); 23043 return (rval); 23044 } 23045 23046 /* Update the softstate reserved status to indicate the reservation */ 23047 mutex_enter(SD_MUTEX(un)); 23048 un->un_resvd_status |= SD_RESERVE; 23049 un->un_resvd_status &= 23050 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT); 23051 mutex_exit(SD_MUTEX(un)); 23052 23053 if (p != NULL) { 23054 if (p->min_ownership_delay != 0) { 23055 min_ownership_delay = p->min_ownership_delay * 1000; 23056 } 23057 if (p->max_ownership_delay != 0) { 23058 max_ownership_delay = p->max_ownership_delay * 1000; 23059 } 23060 } 23061 SD_INFO(SD_LOG_IOCTL_MHD, un, 23062 "sd_take_ownership: min, max delays: %d, %d\n", 23063 min_ownership_delay, max_ownership_delay); 23064 23065 start_time = ddi_get_lbolt(); 23066 current_time = start_time; 23067 ownership_time = current_time + drv_usectohz(min_ownership_delay); 23068 end_time = start_time + drv_usectohz(max_ownership_delay); 23069 23070 while (current_time - end_time < 0) { 23071 delay(drv_usectohz(500000)); 23072 23073 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) { 23074 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) { 23075 mutex_enter(SD_MUTEX(un)); 23076 rval = (un->un_resvd_status & 23077 SD_RESERVATION_CONFLICT) ? EACCES : EIO; 23078 mutex_exit(SD_MUTEX(un)); 23079 break; 23080 } 23081 } 23082 previous_current_time = current_time; 23083 current_time = ddi_get_lbolt(); 23084 mutex_enter(SD_MUTEX(un)); 23085 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) { 23086 ownership_time = ddi_get_lbolt() + 23087 drv_usectohz(min_ownership_delay); 23088 reservation_count = 0; 23089 } else { 23090 reservation_count++; 23091 } 23092 un->un_resvd_status |= SD_RESERVE; 23093 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE); 23094 mutex_exit(SD_MUTEX(un)); 23095 23096 SD_INFO(SD_LOG_IOCTL_MHD, un, 23097 "sd_take_ownership: ticks for loop iteration=%ld, " 23098 "reservation=%s\n", (current_time - previous_current_time), 23099 reservation_count ? "ok" : "reclaimed"); 23100 23101 if (current_time - ownership_time >= 0 && 23102 reservation_count >= 4) { 23103 rval = 0; /* Achieved a stable ownership */ 23104 break; 23105 } 23106 if (current_time - end_time >= 0) { 23107 rval = EACCES; /* No ownership in max possible time */ 23108 break; 23109 } 23110 } 23111 SD_TRACE(SD_LOG_IOCTL_MHD, un, 23112 "sd_take_ownership: return(2)=%d\n", rval); 23113 return (rval); 23114 } 23115 23116 23117 /* 23118 * Function: sd_reserve_release() 23119 * 23120 * Description: This function builds and sends scsi RESERVE, RELEASE, and 23121 * PRIORITY RESERVE commands based on a user specified command type 23122 * 23123 * Arguments: dev - the device 'dev_t' 23124 * cmd - user specified command type; one of SD_PRIORITY_RESERVE, 23125 * SD_RESERVE, SD_RELEASE 23126 * 23127 * Return Code: 0 or Error Code 23128 */ 23129 23130 static int 23131 sd_reserve_release(dev_t dev, int cmd) 23132 { 23133 struct uscsi_cmd *com = NULL; 23134 struct sd_lun *un = NULL; 23135 char cdb[CDB_GROUP0]; 23136 int rval; 23137 23138 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) || 23139 (cmd == SD_PRIORITY_RESERVE)); 23140 23141 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23142 return (ENXIO); 23143 } 23144 23145 /* instantiate and initialize the command and cdb */ 23146 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 23147 bzero(cdb, CDB_GROUP0); 23148 com->uscsi_flags = USCSI_SILENT; 23149 com->uscsi_timeout = un->un_reserve_release_time; 23150 com->uscsi_cdblen = CDB_GROUP0; 23151 com->uscsi_cdb = cdb; 23152 if (cmd == SD_RELEASE) { 23153 cdb[0] = SCMD_RELEASE; 23154 } else { 23155 cdb[0] = SCMD_RESERVE; 23156 } 23157 23158 /* Send the command. */ 23159 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 23160 SD_PATH_STANDARD); 23161 23162 /* 23163 * "break" a reservation that is held by another host, by issuing a 23164 * reset if priority reserve is desired, and we could not get the 23165 * device. 23166 */ 23167 if ((cmd == SD_PRIORITY_RESERVE) && 23168 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 23169 /* 23170 * First try to reset the LUN. If we cannot, then try a target 23171 * reset, followed by a bus reset if the target reset fails. 23172 */ 23173 int reset_retval = 0; 23174 if (un->un_f_lun_reset_enabled == TRUE) { 23175 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 23176 } 23177 if (reset_retval == 0) { 23178 /* The LUN reset either failed or was not issued */ 23179 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 23180 } 23181 if ((reset_retval == 0) && 23182 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) { 23183 rval = EIO; 23184 kmem_free(com, sizeof (*com)); 23185 return (rval); 23186 } 23187 23188 bzero(com, sizeof (struct uscsi_cmd)); 23189 com->uscsi_flags = USCSI_SILENT; 23190 com->uscsi_cdb = cdb; 23191 com->uscsi_cdblen = CDB_GROUP0; 23192 com->uscsi_timeout = 5; 23193 23194 /* 23195 * Reissue the last reserve command, this time without request 23196 * sense. Assume that it is just a regular reserve command. 23197 */ 23198 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 23199 SD_PATH_STANDARD); 23200 } 23201 23202 /* Return an error if still getting a reservation conflict. */ 23203 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 23204 rval = EACCES; 23205 } 23206 23207 kmem_free(com, sizeof (*com)); 23208 return (rval); 23209 } 23210 23211 23212 #define SD_NDUMP_RETRIES 12 23213 /* 23214 * System Crash Dump routine 23215 */ 23216 23217 static int 23218 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 23219 { 23220 int instance; 23221 int partition; 23222 int i; 23223 int err; 23224 struct sd_lun *un; 23225 struct scsi_pkt *wr_pktp; 23226 struct buf *wr_bp; 23227 struct buf wr_buf; 23228 daddr_t tgt_byte_offset; /* rmw - byte offset for target */ 23229 daddr_t tgt_blkno; /* rmw - blkno for target */ 23230 size_t tgt_byte_count; /* rmw - # of bytes to xfer */ 23231 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */ 23232 size_t io_start_offset; 23233 int doing_rmw = FALSE; 23234 int rval; 23235 ssize_t dma_resid; 23236 daddr_t oblkno; 23237 diskaddr_t nblks = 0; 23238 diskaddr_t start_block; 23239 23240 instance = SDUNIT(dev); 23241 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 23242 !SD_IS_VALID_LABEL(un) || ISCD(un)) { 23243 return (ENXIO); 23244 } 23245 23246 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 23247 23248 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); 23249 23250 partition = SDPART(dev); 23251 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition); 23252 23253 /* Validate blocks to dump at against partition size. */ 23254 23255 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 23256 &nblks, &start_block, NULL, NULL, (void *)SD_PATH_DIRECT); 23257 23258 if ((blkno + nblk) > nblks) { 23259 SD_TRACE(SD_LOG_DUMP, un, 23260 "sddump: dump range larger than partition: " 23261 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 23262 blkno, nblk, nblks); 23263 return (EINVAL); 23264 } 23265 23266 mutex_enter(&un->un_pm_mutex); 23267 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 23268 struct scsi_pkt *start_pktp; 23269 23270 mutex_exit(&un->un_pm_mutex); 23271 23272 /* 23273 * use pm framework to power on HBA 1st 23274 */ 23275 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 23276 23277 /* 23278 * Dump no long uses sdpower to power on a device, it's 23279 * in-line here so it can be done in polled mode. 23280 */ 23281 23282 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n"); 23283 23284 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL, 23285 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL); 23286 23287 if (start_pktp == NULL) { 23288 /* We were not given a SCSI packet, fail. */ 23289 return (EIO); 23290 } 23291 bzero(start_pktp->pkt_cdbp, CDB_GROUP0); 23292 start_pktp->pkt_cdbp[0] = SCMD_START_STOP; 23293 start_pktp->pkt_cdbp[4] = SD_TARGET_START; 23294 start_pktp->pkt_flags = FLAG_NOINTR; 23295 23296 mutex_enter(SD_MUTEX(un)); 23297 SD_FILL_SCSI1_LUN(un, start_pktp); 23298 mutex_exit(SD_MUTEX(un)); 23299 /* 23300 * Scsi_poll returns 0 (success) if the command completes and 23301 * the status block is STATUS_GOOD. 23302 */ 23303 if (sd_scsi_poll(un, start_pktp) != 0) { 23304 scsi_destroy_pkt(start_pktp); 23305 return (EIO); 23306 } 23307 scsi_destroy_pkt(start_pktp); 23308 (void) sd_ddi_pm_resume(un); 23309 } else { 23310 mutex_exit(&un->un_pm_mutex); 23311 } 23312 23313 mutex_enter(SD_MUTEX(un)); 23314 un->un_throttle = 0; 23315 23316 /* 23317 * The first time through, reset the specific target device. 23318 * However, when cpr calls sddump we know that sd is in a 23319 * a good state so no bus reset is required. 23320 * Clear sense data via Request Sense cmd. 23321 * In sddump we don't care about allow_bus_device_reset anymore 23322 */ 23323 23324 if ((un->un_state != SD_STATE_SUSPENDED) && 23325 (un->un_state != SD_STATE_DUMPING)) { 23326 23327 New_state(un, SD_STATE_DUMPING); 23328 23329 if (un->un_f_is_fibre == FALSE) { 23330 mutex_exit(SD_MUTEX(un)); 23331 /* 23332 * Attempt a bus reset for parallel scsi. 23333 * 23334 * Note: A bus reset is required because on some host 23335 * systems (i.e. E420R) a bus device reset is 23336 * insufficient to reset the state of the target. 23337 * 23338 * Note: Don't issue the reset for fibre-channel, 23339 * because this tends to hang the bus (loop) for 23340 * too long while everyone is logging out and in 23341 * and the deadman timer for dumping will fire 23342 * before the dump is complete. 23343 */ 23344 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) { 23345 mutex_enter(SD_MUTEX(un)); 23346 Restore_state(un); 23347 mutex_exit(SD_MUTEX(un)); 23348 return (EIO); 23349 } 23350 23351 /* Delay to give the device some recovery time. */ 23352 drv_usecwait(10000); 23353 23354 if (sd_send_polled_RQS(un) == SD_FAILURE) { 23355 SD_INFO(SD_LOG_DUMP, un, 23356 "sddump: sd_send_polled_RQS failed\n"); 23357 } 23358 mutex_enter(SD_MUTEX(un)); 23359 } 23360 } 23361 23362 /* 23363 * Convert the partition-relative block number to a 23364 * disk physical block number. 23365 */ 23366 blkno += start_block; 23367 23368 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno); 23369 23370 23371 /* 23372 * Check if the device has a non-512 block size. 23373 */ 23374 wr_bp = NULL; 23375 if (NOT_DEVBSIZE(un)) { 23376 tgt_byte_offset = blkno * un->un_sys_blocksize; 23377 tgt_byte_count = nblk * un->un_sys_blocksize; 23378 if ((tgt_byte_offset % un->un_tgt_blocksize) || 23379 (tgt_byte_count % un->un_tgt_blocksize)) { 23380 doing_rmw = TRUE; 23381 /* 23382 * Calculate the block number and number of block 23383 * in terms of the media block size. 23384 */ 23385 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 23386 tgt_nblk = 23387 ((tgt_byte_offset + tgt_byte_count + 23388 (un->un_tgt_blocksize - 1)) / 23389 un->un_tgt_blocksize) - tgt_blkno; 23390 23391 /* 23392 * Invoke the routine which is going to do read part 23393 * of read-modify-write. 23394 * Note that this routine returns a pointer to 23395 * a valid bp in wr_bp. 23396 */ 23397 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk, 23398 &wr_bp); 23399 if (err) { 23400 mutex_exit(SD_MUTEX(un)); 23401 return (err); 23402 } 23403 /* 23404 * Offset is being calculated as - 23405 * (original block # * system block size) - 23406 * (new block # * target block size) 23407 */ 23408 io_start_offset = 23409 ((uint64_t)(blkno * un->un_sys_blocksize)) - 23410 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize)); 23411 23412 ASSERT((io_start_offset >= 0) && 23413 (io_start_offset < un->un_tgt_blocksize)); 23414 /* 23415 * Do the modify portion of read modify write. 23416 */ 23417 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset], 23418 (size_t)nblk * un->un_sys_blocksize); 23419 } else { 23420 doing_rmw = FALSE; 23421 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 23422 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize; 23423 } 23424 23425 /* Convert blkno and nblk to target blocks */ 23426 blkno = tgt_blkno; 23427 nblk = tgt_nblk; 23428 } else { 23429 wr_bp = &wr_buf; 23430 bzero(wr_bp, sizeof (struct buf)); 23431 wr_bp->b_flags = B_BUSY; 23432 wr_bp->b_un.b_addr = addr; 23433 wr_bp->b_bcount = nblk << DEV_BSHIFT; 23434 wr_bp->b_resid = 0; 23435 } 23436 23437 mutex_exit(SD_MUTEX(un)); 23438 23439 /* 23440 * Obtain a SCSI packet for the write command. 23441 * It should be safe to call the allocator here without 23442 * worrying about being locked for DVMA mapping because 23443 * the address we're passed is already a DVMA mapping 23444 * 23445 * We are also not going to worry about semaphore ownership 23446 * in the dump buffer. Dumping is single threaded at present. 23447 */ 23448 23449 wr_pktp = NULL; 23450 23451 dma_resid = wr_bp->b_bcount; 23452 oblkno = blkno; 23453 23454 while (dma_resid != 0) { 23455 23456 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 23457 wr_bp->b_flags &= ~B_ERROR; 23458 23459 if (un->un_partial_dma_supported == 1) { 23460 blkno = oblkno + 23461 ((wr_bp->b_bcount - dma_resid) / 23462 un->un_tgt_blocksize); 23463 nblk = dma_resid / un->un_tgt_blocksize; 23464 23465 if (wr_pktp) { 23466 /* 23467 * Partial DMA transfers after initial transfer 23468 */ 23469 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp, 23470 blkno, nblk); 23471 } else { 23472 /* Initial transfer */ 23473 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 23474 un->un_pkt_flags, NULL_FUNC, NULL, 23475 blkno, nblk); 23476 } 23477 } else { 23478 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 23479 0, NULL_FUNC, NULL, blkno, nblk); 23480 } 23481 23482 if (rval == 0) { 23483 /* We were given a SCSI packet, continue. */ 23484 break; 23485 } 23486 23487 if (i == 0) { 23488 if (wr_bp->b_flags & B_ERROR) { 23489 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23490 "no resources for dumping; " 23491 "error code: 0x%x, retrying", 23492 geterror(wr_bp)); 23493 } else { 23494 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23495 "no resources for dumping; retrying"); 23496 } 23497 } else if (i != (SD_NDUMP_RETRIES - 1)) { 23498 if (wr_bp->b_flags & B_ERROR) { 23499 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 23500 "no resources for dumping; error code: " 23501 "0x%x, retrying\n", geterror(wr_bp)); 23502 } 23503 } else { 23504 if (wr_bp->b_flags & B_ERROR) { 23505 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 23506 "no resources for dumping; " 23507 "error code: 0x%x, retries failed, " 23508 "giving up.\n", geterror(wr_bp)); 23509 } else { 23510 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 23511 "no resources for dumping; " 23512 "retries failed, giving up.\n"); 23513 } 23514 mutex_enter(SD_MUTEX(un)); 23515 Restore_state(un); 23516 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) { 23517 mutex_exit(SD_MUTEX(un)); 23518 scsi_free_consistent_buf(wr_bp); 23519 } else { 23520 mutex_exit(SD_MUTEX(un)); 23521 } 23522 return (EIO); 23523 } 23524 drv_usecwait(10000); 23525 } 23526 23527 if (un->un_partial_dma_supported == 1) { 23528 /* 23529 * save the resid from PARTIAL_DMA 23530 */ 23531 dma_resid = wr_pktp->pkt_resid; 23532 if (dma_resid != 0) 23533 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid); 23534 wr_pktp->pkt_resid = 0; 23535 } else { 23536 dma_resid = 0; 23537 } 23538 23539 /* SunBug 1222170 */ 23540 wr_pktp->pkt_flags = FLAG_NOINTR; 23541 23542 err = EIO; 23543 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 23544 23545 /* 23546 * Scsi_poll returns 0 (success) if the command completes and 23547 * the status block is STATUS_GOOD. We should only check 23548 * errors if this condition is not true. Even then we should 23549 * send our own request sense packet only if we have a check 23550 * condition and auto request sense has not been performed by 23551 * the hba. 23552 */ 23553 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n"); 23554 23555 if ((sd_scsi_poll(un, wr_pktp) == 0) && 23556 (wr_pktp->pkt_resid == 0)) { 23557 err = SD_SUCCESS; 23558 break; 23559 } 23560 23561 /* 23562 * Check CMD_DEV_GONE 1st, give up if device is gone. 23563 */ 23564 if (wr_pktp->pkt_reason == CMD_DEV_GONE) { 23565 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 23566 "Error while dumping state...Device is gone\n"); 23567 break; 23568 } 23569 23570 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) { 23571 SD_INFO(SD_LOG_DUMP, un, 23572 "sddump: write failed with CHECK, try # %d\n", i); 23573 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) { 23574 (void) sd_send_polled_RQS(un); 23575 } 23576 23577 continue; 23578 } 23579 23580 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) { 23581 int reset_retval = 0; 23582 23583 SD_INFO(SD_LOG_DUMP, un, 23584 "sddump: write failed with BUSY, try # %d\n", i); 23585 23586 if (un->un_f_lun_reset_enabled == TRUE) { 23587 reset_retval = scsi_reset(SD_ADDRESS(un), 23588 RESET_LUN); 23589 } 23590 if (reset_retval == 0) { 23591 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 23592 } 23593 (void) sd_send_polled_RQS(un); 23594 23595 } else { 23596 SD_INFO(SD_LOG_DUMP, un, 23597 "sddump: write failed with 0x%x, try # %d\n", 23598 SD_GET_PKT_STATUS(wr_pktp), i); 23599 mutex_enter(SD_MUTEX(un)); 23600 sd_reset_target(un, wr_pktp); 23601 mutex_exit(SD_MUTEX(un)); 23602 } 23603 23604 /* 23605 * If we are not getting anywhere with lun/target resets, 23606 * let's reset the bus. 23607 */ 23608 if (i == SD_NDUMP_RETRIES/2) { 23609 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 23610 (void) sd_send_polled_RQS(un); 23611 } 23612 } 23613 } 23614 23615 scsi_destroy_pkt(wr_pktp); 23616 mutex_enter(SD_MUTEX(un)); 23617 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) { 23618 mutex_exit(SD_MUTEX(un)); 23619 scsi_free_consistent_buf(wr_bp); 23620 } else { 23621 mutex_exit(SD_MUTEX(un)); 23622 } 23623 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err); 23624 return (err); 23625 } 23626 23627 /* 23628 * Function: sd_scsi_poll() 23629 * 23630 * Description: This is a wrapper for the scsi_poll call. 23631 * 23632 * Arguments: sd_lun - The unit structure 23633 * scsi_pkt - The scsi packet being sent to the device. 23634 * 23635 * Return Code: 0 - Command completed successfully with good status 23636 * -1 - Command failed. This could indicate a check condition 23637 * or other status value requiring recovery action. 23638 * 23639 * NOTE: This code is only called off sddump(). 23640 */ 23641 23642 static int 23643 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp) 23644 { 23645 int status; 23646 23647 ASSERT(un != NULL); 23648 ASSERT(!mutex_owned(SD_MUTEX(un))); 23649 ASSERT(pktp != NULL); 23650 23651 status = SD_SUCCESS; 23652 23653 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) { 23654 pktp->pkt_flags |= un->un_tagflags; 23655 pktp->pkt_flags &= ~FLAG_NODISCON; 23656 } 23657 23658 status = sd_ddi_scsi_poll(pktp); 23659 /* 23660 * Scsi_poll returns 0 (success) if the command completes and the 23661 * status block is STATUS_GOOD. We should only check errors if this 23662 * condition is not true. Even then we should send our own request 23663 * sense packet only if we have a check condition and auto 23664 * request sense has not been performed by the hba. 23665 * Don't get RQS data if pkt_reason is CMD_DEV_GONE. 23666 */ 23667 if ((status != SD_SUCCESS) && 23668 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) && 23669 (pktp->pkt_state & STATE_ARQ_DONE) == 0 && 23670 (pktp->pkt_reason != CMD_DEV_GONE)) 23671 (void) sd_send_polled_RQS(un); 23672 23673 return (status); 23674 } 23675 23676 /* 23677 * Function: sd_send_polled_RQS() 23678 * 23679 * Description: This sends the request sense command to a device. 23680 * 23681 * Arguments: sd_lun - The unit structure 23682 * 23683 * Return Code: 0 - Command completed successfully with good status 23684 * -1 - Command failed. 23685 * 23686 */ 23687 23688 static int 23689 sd_send_polled_RQS(struct sd_lun *un) 23690 { 23691 int ret_val; 23692 struct scsi_pkt *rqs_pktp; 23693 struct buf *rqs_bp; 23694 23695 ASSERT(un != NULL); 23696 ASSERT(!mutex_owned(SD_MUTEX(un))); 23697 23698 ret_val = SD_SUCCESS; 23699 23700 rqs_pktp = un->un_rqs_pktp; 23701 rqs_bp = un->un_rqs_bp; 23702 23703 mutex_enter(SD_MUTEX(un)); 23704 23705 if (un->un_sense_isbusy) { 23706 ret_val = SD_FAILURE; 23707 mutex_exit(SD_MUTEX(un)); 23708 return (ret_val); 23709 } 23710 23711 /* 23712 * If the request sense buffer (and packet) is not in use, 23713 * let's set the un_sense_isbusy and send our packet 23714 */ 23715 un->un_sense_isbusy = 1; 23716 rqs_pktp->pkt_resid = 0; 23717 rqs_pktp->pkt_reason = 0; 23718 rqs_pktp->pkt_flags |= FLAG_NOINTR; 23719 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH); 23720 23721 mutex_exit(SD_MUTEX(un)); 23722 23723 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at" 23724 " 0x%p\n", rqs_bp->b_un.b_addr); 23725 23726 /* 23727 * Can't send this to sd_scsi_poll, we wrap ourselves around the 23728 * axle - it has a call into us! 23729 */ 23730 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) { 23731 SD_INFO(SD_LOG_COMMON, un, 23732 "sd_send_polled_RQS: RQS failed\n"); 23733 } 23734 23735 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:", 23736 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX); 23737 23738 mutex_enter(SD_MUTEX(un)); 23739 un->un_sense_isbusy = 0; 23740 mutex_exit(SD_MUTEX(un)); 23741 23742 return (ret_val); 23743 } 23744 23745 /* 23746 * Defines needed for localized version of the scsi_poll routine. 23747 */ 23748 #define CSEC 10000 /* usecs */ 23749 #define SEC_TO_CSEC (1000000/CSEC) 23750 23751 /* 23752 * Function: sd_ddi_scsi_poll() 23753 * 23754 * Description: Localized version of the scsi_poll routine. The purpose is to 23755 * send a scsi_pkt to a device as a polled command. This version 23756 * is to ensure more robust handling of transport errors. 23757 * Specifically this routine cures not ready, coming ready 23758 * transition for power up and reset of sonoma's. This can take 23759 * up to 45 seconds for power-on and 20 seconds for reset of a 23760 * sonoma lun. 23761 * 23762 * Arguments: scsi_pkt - The scsi_pkt being sent to a device 23763 * 23764 * Return Code: 0 - Command completed successfully with good status 23765 * -1 - Command failed. 23766 * 23767 * NOTE: This code is almost identical to scsi_poll, however before 6668774 can 23768 * be fixed (removing this code), we need to determine how to handle the 23769 * KEY_UNIT_ATTENTION condition below in conditions not as limited as sddump(). 23770 * 23771 * NOTE: This code is only called off sddump(). 23772 */ 23773 static int 23774 sd_ddi_scsi_poll(struct scsi_pkt *pkt) 23775 { 23776 int rval = -1; 23777 int savef; 23778 long savet; 23779 void (*savec)(); 23780 int timeout; 23781 int busy_count; 23782 int poll_delay; 23783 int rc; 23784 uint8_t *sensep; 23785 struct scsi_arq_status *arqstat; 23786 extern int do_polled_io; 23787 23788 ASSERT(pkt->pkt_scbp); 23789 23790 /* 23791 * save old flags.. 23792 */ 23793 savef = pkt->pkt_flags; 23794 savec = pkt->pkt_comp; 23795 savet = pkt->pkt_time; 23796 23797 pkt->pkt_flags |= FLAG_NOINTR; 23798 23799 /* 23800 * XXX there is nothing in the SCSA spec that states that we should not 23801 * do a callback for polled cmds; however, removing this will break sd 23802 * and probably other target drivers 23803 */ 23804 pkt->pkt_comp = NULL; 23805 23806 /* 23807 * we don't like a polled command without timeout. 23808 * 60 seconds seems long enough. 23809 */ 23810 if (pkt->pkt_time == 0) 23811 pkt->pkt_time = SCSI_POLL_TIMEOUT; 23812 23813 /* 23814 * Send polled cmd. 23815 * 23816 * We do some error recovery for various errors. Tran_busy, 23817 * queue full, and non-dispatched commands are retried every 10 msec. 23818 * as they are typically transient failures. Busy status and Not 23819 * Ready are retried every second as this status takes a while to 23820 * change. 23821 */ 23822 timeout = pkt->pkt_time * SEC_TO_CSEC; 23823 23824 for (busy_count = 0; busy_count < timeout; busy_count++) { 23825 /* 23826 * Initialize pkt status variables. 23827 */ 23828 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0; 23829 23830 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) { 23831 if (rc != TRAN_BUSY) { 23832 /* Transport failed - give up. */ 23833 break; 23834 } else { 23835 /* Transport busy - try again. */ 23836 poll_delay = 1 * CSEC; /* 10 msec. */ 23837 } 23838 } else { 23839 /* 23840 * Transport accepted - check pkt status. 23841 */ 23842 rc = (*pkt->pkt_scbp) & STATUS_MASK; 23843 if ((pkt->pkt_reason == CMD_CMPLT) && 23844 (rc == STATUS_CHECK) && 23845 (pkt->pkt_state & STATE_ARQ_DONE)) { 23846 arqstat = 23847 (struct scsi_arq_status *)(pkt->pkt_scbp); 23848 sensep = (uint8_t *)&arqstat->sts_sensedata; 23849 } else { 23850 sensep = NULL; 23851 } 23852 23853 if ((pkt->pkt_reason == CMD_CMPLT) && 23854 (rc == STATUS_GOOD)) { 23855 /* No error - we're done */ 23856 rval = 0; 23857 break; 23858 23859 } else if (pkt->pkt_reason == CMD_DEV_GONE) { 23860 /* Lost connection - give up */ 23861 break; 23862 23863 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) && 23864 (pkt->pkt_state == 0)) { 23865 /* Pkt not dispatched - try again. */ 23866 poll_delay = 1 * CSEC; /* 10 msec. */ 23867 23868 } else if ((pkt->pkt_reason == CMD_CMPLT) && 23869 (rc == STATUS_QFULL)) { 23870 /* Queue full - try again. */ 23871 poll_delay = 1 * CSEC; /* 10 msec. */ 23872 23873 } else if ((pkt->pkt_reason == CMD_CMPLT) && 23874 (rc == STATUS_BUSY)) { 23875 /* Busy - try again. */ 23876 poll_delay = 100 * CSEC; /* 1 sec. */ 23877 busy_count += (SEC_TO_CSEC - 1); 23878 23879 } else if ((sensep != NULL) && 23880 (scsi_sense_key(sensep) == KEY_UNIT_ATTENTION)) { 23881 /* 23882 * Unit Attention - try again. 23883 * Pretend it took 1 sec. 23884 * NOTE: 'continue' avoids poll_delay 23885 */ 23886 busy_count += (SEC_TO_CSEC - 1); 23887 continue; 23888 23889 } else if ((sensep != NULL) && 23890 (scsi_sense_key(sensep) == KEY_NOT_READY) && 23891 (scsi_sense_asc(sensep) == 0x04) && 23892 (scsi_sense_ascq(sensep) == 0x01)) { 23893 /* 23894 * Not ready -> ready - try again. 23895 * 04h/01h: LUN IS IN PROCESS OF BECOMING READY 23896 * ...same as STATUS_BUSY 23897 */ 23898 poll_delay = 100 * CSEC; /* 1 sec. */ 23899 busy_count += (SEC_TO_CSEC - 1); 23900 23901 } else { 23902 /* BAD status - give up. */ 23903 break; 23904 } 23905 } 23906 23907 if (((curthread->t_flag & T_INTR_THREAD) == 0) && 23908 !do_polled_io) { 23909 delay(drv_usectohz(poll_delay)); 23910 } else { 23911 /* we busy wait during cpr_dump or interrupt threads */ 23912 drv_usecwait(poll_delay); 23913 } 23914 } 23915 23916 pkt->pkt_flags = savef; 23917 pkt->pkt_comp = savec; 23918 pkt->pkt_time = savet; 23919 23920 /* return on error */ 23921 if (rval) 23922 return (rval); 23923 23924 /* 23925 * This is not a performance critical code path. 23926 * 23927 * As an accommodation for scsi_poll callers, to avoid ddi_dma_sync() 23928 * issues associated with looking at DMA memory prior to 23929 * scsi_pkt_destroy(), we scsi_sync_pkt() prior to return. 23930 */ 23931 scsi_sync_pkt(pkt); 23932 return (0); 23933 } 23934 23935 23936 23937 /* 23938 * Function: sd_persistent_reservation_in_read_keys 23939 * 23940 * Description: This routine is the driver entry point for handling CD-ROM 23941 * multi-host persistent reservation requests (MHIOCGRP_INKEYS) 23942 * by sending the SCSI-3 PRIN commands to the device. 23943 * Processes the read keys command response by copying the 23944 * reservation key information into the user provided buffer. 23945 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented. 23946 * 23947 * Arguments: un - Pointer to soft state struct for the target. 23948 * usrp - user provided pointer to multihost Persistent In Read 23949 * Keys structure (mhioc_inkeys_t) 23950 * flag - this argument is a pass through to ddi_copyxxx() 23951 * directly from the mode argument of ioctl(). 23952 * 23953 * Return Code: 0 - Success 23954 * EACCES 23955 * ENOTSUP 23956 * errno return code from sd_send_scsi_cmd() 23957 * 23958 * Context: Can sleep. Does not return until command is completed. 23959 */ 23960 23961 static int 23962 sd_persistent_reservation_in_read_keys(struct sd_lun *un, 23963 mhioc_inkeys_t *usrp, int flag) 23964 { 23965 #ifdef _MULTI_DATAMODEL 23966 struct mhioc_key_list32 li32; 23967 #endif 23968 sd_prin_readkeys_t *in; 23969 mhioc_inkeys_t *ptr; 23970 mhioc_key_list_t li; 23971 uchar_t *data_bufp; 23972 int data_len; 23973 int rval; 23974 size_t copysz; 23975 23976 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) { 23977 return (EINVAL); 23978 } 23979 bzero(&li, sizeof (mhioc_key_list_t)); 23980 23981 /* 23982 * Get the listsize from user 23983 */ 23984 #ifdef _MULTI_DATAMODEL 23985 23986 switch (ddi_model_convert_from(flag & FMODELS)) { 23987 case DDI_MODEL_ILP32: 23988 copysz = sizeof (struct mhioc_key_list32); 23989 if (ddi_copyin(ptr->li, &li32, copysz, flag)) { 23990 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23991 "sd_persistent_reservation_in_read_keys: " 23992 "failed ddi_copyin: mhioc_key_list32_t\n"); 23993 rval = EFAULT; 23994 goto done; 23995 } 23996 li.listsize = li32.listsize; 23997 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list; 23998 break; 23999 24000 case DDI_MODEL_NONE: 24001 copysz = sizeof (mhioc_key_list_t); 24002 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 24003 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24004 "sd_persistent_reservation_in_read_keys: " 24005 "failed ddi_copyin: mhioc_key_list_t\n"); 24006 rval = EFAULT; 24007 goto done; 24008 } 24009 break; 24010 } 24011 24012 #else /* ! _MULTI_DATAMODEL */ 24013 copysz = sizeof (mhioc_key_list_t); 24014 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 24015 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24016 "sd_persistent_reservation_in_read_keys: " 24017 "failed ddi_copyin: mhioc_key_list_t\n"); 24018 rval = EFAULT; 24019 goto done; 24020 } 24021 #endif 24022 24023 data_len = li.listsize * MHIOC_RESV_KEY_SIZE; 24024 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t)); 24025 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 24026 24027 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 24028 data_len, data_bufp)) != 0) { 24029 goto done; 24030 } 24031 in = (sd_prin_readkeys_t *)data_bufp; 24032 ptr->generation = BE_32(in->generation); 24033 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE; 24034 24035 /* 24036 * Return the min(listsize, listlen) keys 24037 */ 24038 #ifdef _MULTI_DATAMODEL 24039 24040 switch (ddi_model_convert_from(flag & FMODELS)) { 24041 case DDI_MODEL_ILP32: 24042 li32.listlen = li.listlen; 24043 if (ddi_copyout(&li32, ptr->li, copysz, flag)) { 24044 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24045 "sd_persistent_reservation_in_read_keys: " 24046 "failed ddi_copyout: mhioc_key_list32_t\n"); 24047 rval = EFAULT; 24048 goto done; 24049 } 24050 break; 24051 24052 case DDI_MODEL_NONE: 24053 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 24054 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24055 "sd_persistent_reservation_in_read_keys: " 24056 "failed ddi_copyout: mhioc_key_list_t\n"); 24057 rval = EFAULT; 24058 goto done; 24059 } 24060 break; 24061 } 24062 24063 #else /* ! _MULTI_DATAMODEL */ 24064 24065 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 24066 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24067 "sd_persistent_reservation_in_read_keys: " 24068 "failed ddi_copyout: mhioc_key_list_t\n"); 24069 rval = EFAULT; 24070 goto done; 24071 } 24072 24073 #endif /* _MULTI_DATAMODEL */ 24074 24075 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE, 24076 li.listsize * MHIOC_RESV_KEY_SIZE); 24077 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) { 24078 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24079 "sd_persistent_reservation_in_read_keys: " 24080 "failed ddi_copyout: keylist\n"); 24081 rval = EFAULT; 24082 } 24083 done: 24084 kmem_free(data_bufp, data_len); 24085 return (rval); 24086 } 24087 24088 24089 /* 24090 * Function: sd_persistent_reservation_in_read_resv 24091 * 24092 * Description: This routine is the driver entry point for handling CD-ROM 24093 * multi-host persistent reservation requests (MHIOCGRP_INRESV) 24094 * by sending the SCSI-3 PRIN commands to the device. 24095 * Process the read persistent reservations command response by 24096 * copying the reservation information into the user provided 24097 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented. 24098 * 24099 * Arguments: un - Pointer to soft state struct for the target. 24100 * usrp - user provided pointer to multihost Persistent In Read 24101 * Keys structure (mhioc_inkeys_t) 24102 * flag - this argument is a pass through to ddi_copyxxx() 24103 * directly from the mode argument of ioctl(). 24104 * 24105 * Return Code: 0 - Success 24106 * EACCES 24107 * ENOTSUP 24108 * errno return code from sd_send_scsi_cmd() 24109 * 24110 * Context: Can sleep. Does not return until command is completed. 24111 */ 24112 24113 static int 24114 sd_persistent_reservation_in_read_resv(struct sd_lun *un, 24115 mhioc_inresvs_t *usrp, int flag) 24116 { 24117 #ifdef _MULTI_DATAMODEL 24118 struct mhioc_resv_desc_list32 resvlist32; 24119 #endif 24120 sd_prin_readresv_t *in; 24121 mhioc_inresvs_t *ptr; 24122 sd_readresv_desc_t *readresv_ptr; 24123 mhioc_resv_desc_list_t resvlist; 24124 mhioc_resv_desc_t resvdesc; 24125 uchar_t *data_bufp; 24126 int data_len; 24127 int rval; 24128 int i; 24129 size_t copysz; 24130 mhioc_resv_desc_t *bufp; 24131 24132 if ((ptr = usrp) == NULL) { 24133 return (EINVAL); 24134 } 24135 24136 /* 24137 * Get the listsize from user 24138 */ 24139 #ifdef _MULTI_DATAMODEL 24140 switch (ddi_model_convert_from(flag & FMODELS)) { 24141 case DDI_MODEL_ILP32: 24142 copysz = sizeof (struct mhioc_resv_desc_list32); 24143 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) { 24144 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24145 "sd_persistent_reservation_in_read_resv: " 24146 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 24147 rval = EFAULT; 24148 goto done; 24149 } 24150 resvlist.listsize = resvlist32.listsize; 24151 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list; 24152 break; 24153 24154 case DDI_MODEL_NONE: 24155 copysz = sizeof (mhioc_resv_desc_list_t); 24156 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 24157 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24158 "sd_persistent_reservation_in_read_resv: " 24159 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 24160 rval = EFAULT; 24161 goto done; 24162 } 24163 break; 24164 } 24165 #else /* ! _MULTI_DATAMODEL */ 24166 copysz = sizeof (mhioc_resv_desc_list_t); 24167 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 24168 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24169 "sd_persistent_reservation_in_read_resv: " 24170 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 24171 rval = EFAULT; 24172 goto done; 24173 } 24174 #endif /* ! _MULTI_DATAMODEL */ 24175 24176 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN; 24177 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t)); 24178 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 24179 24180 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_RESV, 24181 data_len, data_bufp)) != 0) { 24182 goto done; 24183 } 24184 in = (sd_prin_readresv_t *)data_bufp; 24185 ptr->generation = BE_32(in->generation); 24186 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN; 24187 24188 /* 24189 * Return the min(listsize, listlen( keys 24190 */ 24191 #ifdef _MULTI_DATAMODEL 24192 24193 switch (ddi_model_convert_from(flag & FMODELS)) { 24194 case DDI_MODEL_ILP32: 24195 resvlist32.listlen = resvlist.listlen; 24196 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) { 24197 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24198 "sd_persistent_reservation_in_read_resv: " 24199 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 24200 rval = EFAULT; 24201 goto done; 24202 } 24203 break; 24204 24205 case DDI_MODEL_NONE: 24206 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 24207 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24208 "sd_persistent_reservation_in_read_resv: " 24209 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 24210 rval = EFAULT; 24211 goto done; 24212 } 24213 break; 24214 } 24215 24216 #else /* ! _MULTI_DATAMODEL */ 24217 24218 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 24219 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24220 "sd_persistent_reservation_in_read_resv: " 24221 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 24222 rval = EFAULT; 24223 goto done; 24224 } 24225 24226 #endif /* ! _MULTI_DATAMODEL */ 24227 24228 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc; 24229 bufp = resvlist.list; 24230 copysz = sizeof (mhioc_resv_desc_t); 24231 for (i = 0; i < min(resvlist.listlen, resvlist.listsize); 24232 i++, readresv_ptr++, bufp++) { 24233 24234 bcopy(&readresv_ptr->resvkey, &resvdesc.key, 24235 MHIOC_RESV_KEY_SIZE); 24236 resvdesc.type = readresv_ptr->type; 24237 resvdesc.scope = readresv_ptr->scope; 24238 resvdesc.scope_specific_addr = 24239 BE_32(readresv_ptr->scope_specific_addr); 24240 24241 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) { 24242 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24243 "sd_persistent_reservation_in_read_resv: " 24244 "failed ddi_copyout: resvlist\n"); 24245 rval = EFAULT; 24246 goto done; 24247 } 24248 } 24249 done: 24250 kmem_free(data_bufp, data_len); 24251 return (rval); 24252 } 24253 24254 24255 /* 24256 * Function: sr_change_blkmode() 24257 * 24258 * Description: This routine is the driver entry point for handling CD-ROM 24259 * block mode ioctl requests. Support for returning and changing 24260 * the current block size in use by the device is implemented. The 24261 * LBA size is changed via a MODE SELECT Block Descriptor. 24262 * 24263 * This routine issues a mode sense with an allocation length of 24264 * 12 bytes for the mode page header and a single block descriptor. 24265 * 24266 * Arguments: dev - the device 'dev_t' 24267 * cmd - the request type; one of CDROMGBLKMODE (get) or 24268 * CDROMSBLKMODE (set) 24269 * data - current block size or requested block size 24270 * flag - this argument is a pass through to ddi_copyxxx() directly 24271 * from the mode argument of ioctl(). 24272 * 24273 * Return Code: the code returned by sd_send_scsi_cmd() 24274 * EINVAL if invalid arguments are provided 24275 * EFAULT if ddi_copyxxx() fails 24276 * ENXIO if fail ddi_get_soft_state 24277 * EIO if invalid mode sense block descriptor length 24278 * 24279 */ 24280 24281 static int 24282 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag) 24283 { 24284 struct sd_lun *un = NULL; 24285 struct mode_header *sense_mhp, *select_mhp; 24286 struct block_descriptor *sense_desc, *select_desc; 24287 int current_bsize; 24288 int rval = EINVAL; 24289 uchar_t *sense = NULL; 24290 uchar_t *select = NULL; 24291 24292 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE)); 24293 24294 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24295 return (ENXIO); 24296 } 24297 24298 /* 24299 * The block length is changed via the Mode Select block descriptor, the 24300 * "Read/Write Error Recovery" mode page (0x1) contents are not actually 24301 * required as part of this routine. Therefore the mode sense allocation 24302 * length is specified to be the length of a mode page header and a 24303 * block descriptor. 24304 */ 24305 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 24306 24307 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 24308 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD)) != 0) { 24309 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24310 "sr_change_blkmode: Mode Sense Failed\n"); 24311 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 24312 return (rval); 24313 } 24314 24315 /* Check the block descriptor len to handle only 1 block descriptor */ 24316 sense_mhp = (struct mode_header *)sense; 24317 if ((sense_mhp->bdesc_length == 0) || 24318 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) { 24319 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24320 "sr_change_blkmode: Mode Sense returned invalid block" 24321 " descriptor length\n"); 24322 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 24323 return (EIO); 24324 } 24325 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH); 24326 current_bsize = ((sense_desc->blksize_hi << 16) | 24327 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo); 24328 24329 /* Process command */ 24330 switch (cmd) { 24331 case CDROMGBLKMODE: 24332 /* Return the block size obtained during the mode sense */ 24333 if (ddi_copyout(¤t_bsize, (void *)data, 24334 sizeof (int), flag) != 0) 24335 rval = EFAULT; 24336 break; 24337 case CDROMSBLKMODE: 24338 /* Validate the requested block size */ 24339 switch (data) { 24340 case CDROM_BLK_512: 24341 case CDROM_BLK_1024: 24342 case CDROM_BLK_2048: 24343 case CDROM_BLK_2056: 24344 case CDROM_BLK_2336: 24345 case CDROM_BLK_2340: 24346 case CDROM_BLK_2352: 24347 case CDROM_BLK_2368: 24348 case CDROM_BLK_2448: 24349 case CDROM_BLK_2646: 24350 case CDROM_BLK_2647: 24351 break; 24352 default: 24353 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24354 "sr_change_blkmode: " 24355 "Block Size '%ld' Not Supported\n", data); 24356 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 24357 return (EINVAL); 24358 } 24359 24360 /* 24361 * The current block size matches the requested block size so 24362 * there is no need to send the mode select to change the size 24363 */ 24364 if (current_bsize == data) { 24365 break; 24366 } 24367 24368 /* Build the select data for the requested block size */ 24369 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 24370 select_mhp = (struct mode_header *)select; 24371 select_desc = 24372 (struct block_descriptor *)(select + MODE_HEADER_LENGTH); 24373 /* 24374 * The LBA size is changed via the block descriptor, so the 24375 * descriptor is built according to the user data 24376 */ 24377 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH; 24378 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16); 24379 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8); 24380 select_desc->blksize_lo = (char)((data) & 0x000000ff); 24381 24382 /* Send the mode select for the requested block size */ 24383 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 24384 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 24385 SD_PATH_STANDARD)) != 0) { 24386 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24387 "sr_change_blkmode: Mode Select Failed\n"); 24388 /* 24389 * The mode select failed for the requested block size, 24390 * so reset the data for the original block size and 24391 * send it to the target. The error is indicated by the 24392 * return value for the failed mode select. 24393 */ 24394 select_desc->blksize_hi = sense_desc->blksize_hi; 24395 select_desc->blksize_mid = sense_desc->blksize_mid; 24396 select_desc->blksize_lo = sense_desc->blksize_lo; 24397 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 24398 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 24399 SD_PATH_STANDARD); 24400 } else { 24401 ASSERT(!mutex_owned(SD_MUTEX(un))); 24402 mutex_enter(SD_MUTEX(un)); 24403 sd_update_block_info(un, (uint32_t)data, 0); 24404 mutex_exit(SD_MUTEX(un)); 24405 } 24406 break; 24407 default: 24408 /* should not reach here, but check anyway */ 24409 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24410 "sr_change_blkmode: Command '%x' Not Supported\n", cmd); 24411 rval = EINVAL; 24412 break; 24413 } 24414 24415 if (select) { 24416 kmem_free(select, BUFLEN_CHG_BLK_MODE); 24417 } 24418 if (sense) { 24419 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 24420 } 24421 return (rval); 24422 } 24423 24424 24425 /* 24426 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines 24427 * implement driver support for getting and setting the CD speed. The command 24428 * set used will be based on the device type. If the device has not been 24429 * identified as MMC the Toshiba vendor specific mode page will be used. If 24430 * the device is MMC but does not support the Real Time Streaming feature 24431 * the SET CD SPEED command will be used to set speed and mode page 0x2A will 24432 * be used to read the speed. 24433 */ 24434 24435 /* 24436 * Function: sr_change_speed() 24437 * 24438 * Description: This routine is the driver entry point for handling CD-ROM 24439 * drive speed ioctl requests for devices supporting the Toshiba 24440 * vendor specific drive speed mode page. Support for returning 24441 * and changing the current drive speed in use by the device is 24442 * implemented. 24443 * 24444 * Arguments: dev - the device 'dev_t' 24445 * cmd - the request type; one of CDROMGDRVSPEED (get) or 24446 * CDROMSDRVSPEED (set) 24447 * data - current drive speed or requested drive speed 24448 * flag - this argument is a pass through to ddi_copyxxx() directly 24449 * from the mode argument of ioctl(). 24450 * 24451 * Return Code: the code returned by sd_send_scsi_cmd() 24452 * EINVAL if invalid arguments are provided 24453 * EFAULT if ddi_copyxxx() fails 24454 * ENXIO if fail ddi_get_soft_state 24455 * EIO if invalid mode sense block descriptor length 24456 */ 24457 24458 static int 24459 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 24460 { 24461 struct sd_lun *un = NULL; 24462 struct mode_header *sense_mhp, *select_mhp; 24463 struct mode_speed *sense_page, *select_page; 24464 int current_speed; 24465 int rval = EINVAL; 24466 int bd_len; 24467 uchar_t *sense = NULL; 24468 uchar_t *select = NULL; 24469 24470 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 24471 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24472 return (ENXIO); 24473 } 24474 24475 /* 24476 * Note: The drive speed is being modified here according to a Toshiba 24477 * vendor specific mode page (0x31). 24478 */ 24479 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 24480 24481 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 24482 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED, 24483 SD_PATH_STANDARD)) != 0) { 24484 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24485 "sr_change_speed: Mode Sense Failed\n"); 24486 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24487 return (rval); 24488 } 24489 sense_mhp = (struct mode_header *)sense; 24490 24491 /* Check the block descriptor len to handle only 1 block descriptor */ 24492 bd_len = sense_mhp->bdesc_length; 24493 if (bd_len > MODE_BLK_DESC_LENGTH) { 24494 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24495 "sr_change_speed: Mode Sense returned invalid block " 24496 "descriptor length\n"); 24497 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24498 return (EIO); 24499 } 24500 24501 sense_page = (struct mode_speed *) 24502 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 24503 current_speed = sense_page->speed; 24504 24505 /* Process command */ 24506 switch (cmd) { 24507 case CDROMGDRVSPEED: 24508 /* Return the drive speed obtained during the mode sense */ 24509 if (current_speed == 0x2) { 24510 current_speed = CDROM_TWELVE_SPEED; 24511 } 24512 if (ddi_copyout(¤t_speed, (void *)data, 24513 sizeof (int), flag) != 0) { 24514 rval = EFAULT; 24515 } 24516 break; 24517 case CDROMSDRVSPEED: 24518 /* Validate the requested drive speed */ 24519 switch ((uchar_t)data) { 24520 case CDROM_TWELVE_SPEED: 24521 data = 0x2; 24522 /*FALLTHROUGH*/ 24523 case CDROM_NORMAL_SPEED: 24524 case CDROM_DOUBLE_SPEED: 24525 case CDROM_QUAD_SPEED: 24526 case CDROM_MAXIMUM_SPEED: 24527 break; 24528 default: 24529 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24530 "sr_change_speed: " 24531 "Drive Speed '%d' Not Supported\n", (uchar_t)data); 24532 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24533 return (EINVAL); 24534 } 24535 24536 /* 24537 * The current drive speed matches the requested drive speed so 24538 * there is no need to send the mode select to change the speed 24539 */ 24540 if (current_speed == data) { 24541 break; 24542 } 24543 24544 /* Build the select data for the requested drive speed */ 24545 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 24546 select_mhp = (struct mode_header *)select; 24547 select_mhp->bdesc_length = 0; 24548 select_page = 24549 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 24550 select_page = 24551 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 24552 select_page->mode_page.code = CDROM_MODE_SPEED; 24553 select_page->mode_page.length = 2; 24554 select_page->speed = (uchar_t)data; 24555 24556 /* Send the mode select for the requested block size */ 24557 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 24558 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 24559 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 24560 /* 24561 * The mode select failed for the requested drive speed, 24562 * so reset the data for the original drive speed and 24563 * send it to the target. The error is indicated by the 24564 * return value for the failed mode select. 24565 */ 24566 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24567 "sr_drive_speed: Mode Select Failed\n"); 24568 select_page->speed = sense_page->speed; 24569 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 24570 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 24571 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 24572 } 24573 break; 24574 default: 24575 /* should not reach here, but check anyway */ 24576 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24577 "sr_change_speed: Command '%x' Not Supported\n", cmd); 24578 rval = EINVAL; 24579 break; 24580 } 24581 24582 if (select) { 24583 kmem_free(select, BUFLEN_MODE_CDROM_SPEED); 24584 } 24585 if (sense) { 24586 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 24587 } 24588 24589 return (rval); 24590 } 24591 24592 24593 /* 24594 * Function: sr_atapi_change_speed() 24595 * 24596 * Description: This routine is the driver entry point for handling CD-ROM 24597 * drive speed ioctl requests for MMC devices that do not support 24598 * the Real Time Streaming feature (0x107). 24599 * 24600 * Note: This routine will use the SET SPEED command which may not 24601 * be supported by all devices. 24602 * 24603 * Arguments: dev- the device 'dev_t' 24604 * cmd- the request type; one of CDROMGDRVSPEED (get) or 24605 * CDROMSDRVSPEED (set) 24606 * data- current drive speed or requested drive speed 24607 * flag- this argument is a pass through to ddi_copyxxx() directly 24608 * from the mode argument of ioctl(). 24609 * 24610 * Return Code: the code returned by sd_send_scsi_cmd() 24611 * EINVAL if invalid arguments are provided 24612 * EFAULT if ddi_copyxxx() fails 24613 * ENXIO if fail ddi_get_soft_state 24614 * EIO if invalid mode sense block descriptor length 24615 */ 24616 24617 static int 24618 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 24619 { 24620 struct sd_lun *un; 24621 struct uscsi_cmd *com = NULL; 24622 struct mode_header_grp2 *sense_mhp; 24623 uchar_t *sense_page; 24624 uchar_t *sense = NULL; 24625 char cdb[CDB_GROUP5]; 24626 int bd_len; 24627 int current_speed = 0; 24628 int max_speed = 0; 24629 int rval; 24630 24631 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 24632 24633 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24634 return (ENXIO); 24635 } 24636 24637 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 24638 24639 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 24640 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, 24641 SD_PATH_STANDARD)) != 0) { 24642 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24643 "sr_atapi_change_speed: Mode Sense Failed\n"); 24644 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24645 return (rval); 24646 } 24647 24648 /* Check the block descriptor len to handle only 1 block descriptor */ 24649 sense_mhp = (struct mode_header_grp2 *)sense; 24650 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 24651 if (bd_len > MODE_BLK_DESC_LENGTH) { 24652 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24653 "sr_atapi_change_speed: Mode Sense returned invalid " 24654 "block descriptor length\n"); 24655 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24656 return (EIO); 24657 } 24658 24659 /* Calculate the current and maximum drive speeds */ 24660 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 24661 current_speed = (sense_page[14] << 8) | sense_page[15]; 24662 max_speed = (sense_page[8] << 8) | sense_page[9]; 24663 24664 /* Process the command */ 24665 switch (cmd) { 24666 case CDROMGDRVSPEED: 24667 current_speed /= SD_SPEED_1X; 24668 if (ddi_copyout(¤t_speed, (void *)data, 24669 sizeof (int), flag) != 0) 24670 rval = EFAULT; 24671 break; 24672 case CDROMSDRVSPEED: 24673 /* Convert the speed code to KB/sec */ 24674 switch ((uchar_t)data) { 24675 case CDROM_NORMAL_SPEED: 24676 current_speed = SD_SPEED_1X; 24677 break; 24678 case CDROM_DOUBLE_SPEED: 24679 current_speed = 2 * SD_SPEED_1X; 24680 break; 24681 case CDROM_QUAD_SPEED: 24682 current_speed = 4 * SD_SPEED_1X; 24683 break; 24684 case CDROM_TWELVE_SPEED: 24685 current_speed = 12 * SD_SPEED_1X; 24686 break; 24687 case CDROM_MAXIMUM_SPEED: 24688 current_speed = 0xffff; 24689 break; 24690 default: 24691 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24692 "sr_atapi_change_speed: invalid drive speed %d\n", 24693 (uchar_t)data); 24694 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24695 return (EINVAL); 24696 } 24697 24698 /* Check the request against the drive's max speed. */ 24699 if (current_speed != 0xffff) { 24700 if (current_speed > max_speed) { 24701 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24702 return (EINVAL); 24703 } 24704 } 24705 24706 /* 24707 * Build and send the SET SPEED command 24708 * 24709 * Note: The SET SPEED (0xBB) command used in this routine is 24710 * obsolete per the SCSI MMC spec but still supported in the 24711 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 24712 * therefore the command is still implemented in this routine. 24713 */ 24714 bzero(cdb, sizeof (cdb)); 24715 cdb[0] = (char)SCMD_SET_CDROM_SPEED; 24716 cdb[2] = (uchar_t)(current_speed >> 8); 24717 cdb[3] = (uchar_t)current_speed; 24718 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24719 com->uscsi_cdb = (caddr_t)cdb; 24720 com->uscsi_cdblen = CDB_GROUP5; 24721 com->uscsi_bufaddr = NULL; 24722 com->uscsi_buflen = 0; 24723 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24724 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, 0, SD_PATH_STANDARD); 24725 break; 24726 default: 24727 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24728 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd); 24729 rval = EINVAL; 24730 } 24731 24732 if (sense) { 24733 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 24734 } 24735 if (com) { 24736 kmem_free(com, sizeof (*com)); 24737 } 24738 return (rval); 24739 } 24740 24741 24742 /* 24743 * Function: sr_pause_resume() 24744 * 24745 * Description: This routine is the driver entry point for handling CD-ROM 24746 * pause/resume ioctl requests. This only affects the audio play 24747 * operation. 24748 * 24749 * Arguments: dev - the device 'dev_t' 24750 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used 24751 * for setting the resume bit of the cdb. 24752 * 24753 * Return Code: the code returned by sd_send_scsi_cmd() 24754 * EINVAL if invalid mode specified 24755 * 24756 */ 24757 24758 static int 24759 sr_pause_resume(dev_t dev, int cmd) 24760 { 24761 struct sd_lun *un; 24762 struct uscsi_cmd *com; 24763 char cdb[CDB_GROUP1]; 24764 int rval; 24765 24766 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24767 return (ENXIO); 24768 } 24769 24770 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24771 bzero(cdb, CDB_GROUP1); 24772 cdb[0] = SCMD_PAUSE_RESUME; 24773 switch (cmd) { 24774 case CDROMRESUME: 24775 cdb[8] = 1; 24776 break; 24777 case CDROMPAUSE: 24778 cdb[8] = 0; 24779 break; 24780 default: 24781 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:" 24782 " Command '%x' Not Supported\n", cmd); 24783 rval = EINVAL; 24784 goto done; 24785 } 24786 24787 com->uscsi_cdb = cdb; 24788 com->uscsi_cdblen = CDB_GROUP1; 24789 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24790 24791 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24792 SD_PATH_STANDARD); 24793 24794 done: 24795 kmem_free(com, sizeof (*com)); 24796 return (rval); 24797 } 24798 24799 24800 /* 24801 * Function: sr_play_msf() 24802 * 24803 * Description: This routine is the driver entry point for handling CD-ROM 24804 * ioctl requests to output the audio signals at the specified 24805 * starting address and continue the audio play until the specified 24806 * ending address (CDROMPLAYMSF) The address is in Minute Second 24807 * Frame (MSF) format. 24808 * 24809 * Arguments: dev - the device 'dev_t' 24810 * data - pointer to user provided audio msf structure, 24811 * specifying start/end addresses. 24812 * flag - this argument is a pass through to ddi_copyxxx() 24813 * directly from the mode argument of ioctl(). 24814 * 24815 * Return Code: the code returned by sd_send_scsi_cmd() 24816 * EFAULT if ddi_copyxxx() fails 24817 * ENXIO if fail ddi_get_soft_state 24818 * EINVAL if data pointer is NULL 24819 */ 24820 24821 static int 24822 sr_play_msf(dev_t dev, caddr_t data, int flag) 24823 { 24824 struct sd_lun *un; 24825 struct uscsi_cmd *com; 24826 struct cdrom_msf msf_struct; 24827 struct cdrom_msf *msf = &msf_struct; 24828 char cdb[CDB_GROUP1]; 24829 int rval; 24830 24831 if (data == NULL) { 24832 return (EINVAL); 24833 } 24834 24835 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24836 return (ENXIO); 24837 } 24838 24839 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) { 24840 return (EFAULT); 24841 } 24842 24843 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24844 bzero(cdb, CDB_GROUP1); 24845 cdb[0] = SCMD_PLAYAUDIO_MSF; 24846 if (un->un_f_cfg_playmsf_bcd == TRUE) { 24847 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0); 24848 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0); 24849 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0); 24850 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1); 24851 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1); 24852 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1); 24853 } else { 24854 cdb[3] = msf->cdmsf_min0; 24855 cdb[4] = msf->cdmsf_sec0; 24856 cdb[5] = msf->cdmsf_frame0; 24857 cdb[6] = msf->cdmsf_min1; 24858 cdb[7] = msf->cdmsf_sec1; 24859 cdb[8] = msf->cdmsf_frame1; 24860 } 24861 com->uscsi_cdb = cdb; 24862 com->uscsi_cdblen = CDB_GROUP1; 24863 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24864 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24865 SD_PATH_STANDARD); 24866 kmem_free(com, sizeof (*com)); 24867 return (rval); 24868 } 24869 24870 24871 /* 24872 * Function: sr_play_trkind() 24873 * 24874 * Description: This routine is the driver entry point for handling CD-ROM 24875 * ioctl requests to output the audio signals at the specified 24876 * starting address and continue the audio play until the specified 24877 * ending address (CDROMPLAYTRKIND). The address is in Track Index 24878 * format. 24879 * 24880 * Arguments: dev - the device 'dev_t' 24881 * data - pointer to user provided audio track/index structure, 24882 * specifying start/end addresses. 24883 * flag - this argument is a pass through to ddi_copyxxx() 24884 * directly from the mode argument of ioctl(). 24885 * 24886 * Return Code: the code returned by sd_send_scsi_cmd() 24887 * EFAULT if ddi_copyxxx() fails 24888 * ENXIO if fail ddi_get_soft_state 24889 * EINVAL if data pointer is NULL 24890 */ 24891 24892 static int 24893 sr_play_trkind(dev_t dev, caddr_t data, int flag) 24894 { 24895 struct cdrom_ti ti_struct; 24896 struct cdrom_ti *ti = &ti_struct; 24897 struct uscsi_cmd *com = NULL; 24898 char cdb[CDB_GROUP1]; 24899 int rval; 24900 24901 if (data == NULL) { 24902 return (EINVAL); 24903 } 24904 24905 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) { 24906 return (EFAULT); 24907 } 24908 24909 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24910 bzero(cdb, CDB_GROUP1); 24911 cdb[0] = SCMD_PLAYAUDIO_TI; 24912 cdb[4] = ti->cdti_trk0; 24913 cdb[5] = ti->cdti_ind0; 24914 cdb[7] = ti->cdti_trk1; 24915 cdb[8] = ti->cdti_ind1; 24916 com->uscsi_cdb = cdb; 24917 com->uscsi_cdblen = CDB_GROUP1; 24918 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 24919 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24920 SD_PATH_STANDARD); 24921 kmem_free(com, sizeof (*com)); 24922 return (rval); 24923 } 24924 24925 24926 /* 24927 * Function: sr_read_all_subcodes() 24928 * 24929 * Description: This routine is the driver entry point for handling CD-ROM 24930 * ioctl requests to return raw subcode data while the target is 24931 * playing audio (CDROMSUBCODE). 24932 * 24933 * Arguments: dev - the device 'dev_t' 24934 * data - pointer to user provided cdrom subcode structure, 24935 * specifying the transfer length and address. 24936 * flag - this argument is a pass through to ddi_copyxxx() 24937 * directly from the mode argument of ioctl(). 24938 * 24939 * Return Code: the code returned by sd_send_scsi_cmd() 24940 * EFAULT if ddi_copyxxx() fails 24941 * ENXIO if fail ddi_get_soft_state 24942 * EINVAL if data pointer is NULL 24943 */ 24944 24945 static int 24946 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag) 24947 { 24948 struct sd_lun *un = NULL; 24949 struct uscsi_cmd *com = NULL; 24950 struct cdrom_subcode *subcode = NULL; 24951 int rval; 24952 size_t buflen; 24953 char cdb[CDB_GROUP5]; 24954 24955 #ifdef _MULTI_DATAMODEL 24956 /* To support ILP32 applications in an LP64 world */ 24957 struct cdrom_subcode32 cdrom_subcode32; 24958 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32; 24959 #endif 24960 if (data == NULL) { 24961 return (EINVAL); 24962 } 24963 24964 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24965 return (ENXIO); 24966 } 24967 24968 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP); 24969 24970 #ifdef _MULTI_DATAMODEL 24971 switch (ddi_model_convert_from(flag & FMODELS)) { 24972 case DDI_MODEL_ILP32: 24973 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) { 24974 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24975 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24976 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24977 return (EFAULT); 24978 } 24979 /* Convert the ILP32 uscsi data from the application to LP64 */ 24980 cdrom_subcode32tocdrom_subcode(cdsc32, subcode); 24981 break; 24982 case DDI_MODEL_NONE: 24983 if (ddi_copyin(data, subcode, 24984 sizeof (struct cdrom_subcode), flag)) { 24985 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24986 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24987 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24988 return (EFAULT); 24989 } 24990 break; 24991 } 24992 #else /* ! _MULTI_DATAMODEL */ 24993 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) { 24994 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24995 "sr_read_all_subcodes: ddi_copyin Failed\n"); 24996 kmem_free(subcode, sizeof (struct cdrom_subcode)); 24997 return (EFAULT); 24998 } 24999 #endif /* _MULTI_DATAMODEL */ 25000 25001 /* 25002 * Since MMC-2 expects max 3 bytes for length, check if the 25003 * length input is greater than 3 bytes 25004 */ 25005 if ((subcode->cdsc_length & 0xFF000000) != 0) { 25006 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25007 "sr_read_all_subcodes: " 25008 "cdrom transfer length too large: %d (limit %d)\n", 25009 subcode->cdsc_length, 0xFFFFFF); 25010 kmem_free(subcode, sizeof (struct cdrom_subcode)); 25011 return (EINVAL); 25012 } 25013 25014 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length; 25015 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25016 bzero(cdb, CDB_GROUP5); 25017 25018 if (un->un_f_mmc_cap == TRUE) { 25019 cdb[0] = (char)SCMD_READ_CD; 25020 cdb[2] = (char)0xff; 25021 cdb[3] = (char)0xff; 25022 cdb[4] = (char)0xff; 25023 cdb[5] = (char)0xff; 25024 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 25025 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 25026 cdb[8] = ((subcode->cdsc_length) & 0x000000ff); 25027 cdb[10] = 1; 25028 } else { 25029 /* 25030 * Note: A vendor specific command (0xDF) is being used her to 25031 * request a read of all subcodes. 25032 */ 25033 cdb[0] = (char)SCMD_READ_ALL_SUBCODES; 25034 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24); 25035 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 25036 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 25037 cdb[9] = ((subcode->cdsc_length) & 0x000000ff); 25038 } 25039 com->uscsi_cdb = cdb; 25040 com->uscsi_cdblen = CDB_GROUP5; 25041 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr; 25042 com->uscsi_buflen = buflen; 25043 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25044 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25045 SD_PATH_STANDARD); 25046 kmem_free(subcode, sizeof (struct cdrom_subcode)); 25047 kmem_free(com, sizeof (*com)); 25048 return (rval); 25049 } 25050 25051 25052 /* 25053 * Function: sr_read_subchannel() 25054 * 25055 * Description: This routine is the driver entry point for handling CD-ROM 25056 * ioctl requests to return the Q sub-channel data of the CD 25057 * current position block. (CDROMSUBCHNL) The data includes the 25058 * track number, index number, absolute CD-ROM address (LBA or MSF 25059 * format per the user) , track relative CD-ROM address (LBA or MSF 25060 * format per the user), control data and audio status. 25061 * 25062 * Arguments: dev - the device 'dev_t' 25063 * data - pointer to user provided cdrom sub-channel structure 25064 * flag - this argument is a pass through to ddi_copyxxx() 25065 * directly from the mode argument of ioctl(). 25066 * 25067 * Return Code: the code returned by sd_send_scsi_cmd() 25068 * EFAULT if ddi_copyxxx() fails 25069 * ENXIO if fail ddi_get_soft_state 25070 * EINVAL if data pointer is NULL 25071 */ 25072 25073 static int 25074 sr_read_subchannel(dev_t dev, caddr_t data, int flag) 25075 { 25076 struct sd_lun *un; 25077 struct uscsi_cmd *com; 25078 struct cdrom_subchnl subchanel; 25079 struct cdrom_subchnl *subchnl = &subchanel; 25080 char cdb[CDB_GROUP1]; 25081 caddr_t buffer; 25082 int rval; 25083 25084 if (data == NULL) { 25085 return (EINVAL); 25086 } 25087 25088 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25089 (un->un_state == SD_STATE_OFFLINE)) { 25090 return (ENXIO); 25091 } 25092 25093 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) { 25094 return (EFAULT); 25095 } 25096 25097 buffer = kmem_zalloc((size_t)16, KM_SLEEP); 25098 bzero(cdb, CDB_GROUP1); 25099 cdb[0] = SCMD_READ_SUBCHANNEL; 25100 /* Set the MSF bit based on the user requested address format */ 25101 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02; 25102 /* 25103 * Set the Q bit in byte 2 to indicate that Q sub-channel data be 25104 * returned 25105 */ 25106 cdb[2] = 0x40; 25107 /* 25108 * Set byte 3 to specify the return data format. A value of 0x01 25109 * indicates that the CD-ROM current position should be returned. 25110 */ 25111 cdb[3] = 0x01; 25112 cdb[8] = 0x10; 25113 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25114 com->uscsi_cdb = cdb; 25115 com->uscsi_cdblen = CDB_GROUP1; 25116 com->uscsi_bufaddr = buffer; 25117 com->uscsi_buflen = 16; 25118 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25119 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25120 SD_PATH_STANDARD); 25121 if (rval != 0) { 25122 kmem_free(buffer, 16); 25123 kmem_free(com, sizeof (*com)); 25124 return (rval); 25125 } 25126 25127 /* Process the returned Q sub-channel data */ 25128 subchnl->cdsc_audiostatus = buffer[1]; 25129 subchnl->cdsc_adr = (buffer[5] & 0xF0); 25130 subchnl->cdsc_ctrl = (buffer[5] & 0x0F); 25131 subchnl->cdsc_trk = buffer[6]; 25132 subchnl->cdsc_ind = buffer[7]; 25133 if (subchnl->cdsc_format & CDROM_LBA) { 25134 subchnl->cdsc_absaddr.lba = 25135 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 25136 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 25137 subchnl->cdsc_reladdr.lba = 25138 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) + 25139 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]); 25140 } else if (un->un_f_cfg_readsub_bcd == TRUE) { 25141 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]); 25142 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]); 25143 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]); 25144 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]); 25145 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]); 25146 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]); 25147 } else { 25148 subchnl->cdsc_absaddr.msf.minute = buffer[9]; 25149 subchnl->cdsc_absaddr.msf.second = buffer[10]; 25150 subchnl->cdsc_absaddr.msf.frame = buffer[11]; 25151 subchnl->cdsc_reladdr.msf.minute = buffer[13]; 25152 subchnl->cdsc_reladdr.msf.second = buffer[14]; 25153 subchnl->cdsc_reladdr.msf.frame = buffer[15]; 25154 } 25155 kmem_free(buffer, 16); 25156 kmem_free(com, sizeof (*com)); 25157 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag) 25158 != 0) { 25159 return (EFAULT); 25160 } 25161 return (rval); 25162 } 25163 25164 25165 /* 25166 * Function: sr_read_tocentry() 25167 * 25168 * Description: This routine is the driver entry point for handling CD-ROM 25169 * ioctl requests to read from the Table of Contents (TOC) 25170 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL 25171 * fields, the starting address (LBA or MSF format per the user) 25172 * and the data mode if the user specified track is a data track. 25173 * 25174 * Note: The READ HEADER (0x44) command used in this routine is 25175 * obsolete per the SCSI MMC spec but still supported in the 25176 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 25177 * therefore the command is still implemented in this routine. 25178 * 25179 * Arguments: dev - the device 'dev_t' 25180 * data - pointer to user provided toc entry structure, 25181 * specifying the track # and the address format 25182 * (LBA or MSF). 25183 * flag - this argument is a pass through to ddi_copyxxx() 25184 * directly from the mode argument of ioctl(). 25185 * 25186 * Return Code: the code returned by sd_send_scsi_cmd() 25187 * EFAULT if ddi_copyxxx() fails 25188 * ENXIO if fail ddi_get_soft_state 25189 * EINVAL if data pointer is NULL 25190 */ 25191 25192 static int 25193 sr_read_tocentry(dev_t dev, caddr_t data, int flag) 25194 { 25195 struct sd_lun *un = NULL; 25196 struct uscsi_cmd *com; 25197 struct cdrom_tocentry toc_entry; 25198 struct cdrom_tocentry *entry = &toc_entry; 25199 caddr_t buffer; 25200 int rval; 25201 char cdb[CDB_GROUP1]; 25202 25203 if (data == NULL) { 25204 return (EINVAL); 25205 } 25206 25207 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25208 (un->un_state == SD_STATE_OFFLINE)) { 25209 return (ENXIO); 25210 } 25211 25212 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) { 25213 return (EFAULT); 25214 } 25215 25216 /* Validate the requested track and address format */ 25217 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) { 25218 return (EINVAL); 25219 } 25220 25221 if (entry->cdte_track == 0) { 25222 return (EINVAL); 25223 } 25224 25225 buffer = kmem_zalloc((size_t)12, KM_SLEEP); 25226 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25227 bzero(cdb, CDB_GROUP1); 25228 25229 cdb[0] = SCMD_READ_TOC; 25230 /* Set the MSF bit based on the user requested address format */ 25231 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2); 25232 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 25233 cdb[6] = BYTE_TO_BCD(entry->cdte_track); 25234 } else { 25235 cdb[6] = entry->cdte_track; 25236 } 25237 25238 /* 25239 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 25240 * (4 byte TOC response header + 8 byte track descriptor) 25241 */ 25242 cdb[8] = 12; 25243 com->uscsi_cdb = cdb; 25244 com->uscsi_cdblen = CDB_GROUP1; 25245 com->uscsi_bufaddr = buffer; 25246 com->uscsi_buflen = 0x0C; 25247 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ); 25248 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25249 SD_PATH_STANDARD); 25250 if (rval != 0) { 25251 kmem_free(buffer, 12); 25252 kmem_free(com, sizeof (*com)); 25253 return (rval); 25254 } 25255 25256 /* Process the toc entry */ 25257 entry->cdte_adr = (buffer[5] & 0xF0) >> 4; 25258 entry->cdte_ctrl = (buffer[5] & 0x0F); 25259 if (entry->cdte_format & CDROM_LBA) { 25260 entry->cdte_addr.lba = 25261 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 25262 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 25263 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) { 25264 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]); 25265 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]); 25266 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]); 25267 /* 25268 * Send a READ TOC command using the LBA address format to get 25269 * the LBA for the track requested so it can be used in the 25270 * READ HEADER request 25271 * 25272 * Note: The MSF bit of the READ HEADER command specifies the 25273 * output format. The block address specified in that command 25274 * must be in LBA format. 25275 */ 25276 cdb[1] = 0; 25277 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25278 SD_PATH_STANDARD); 25279 if (rval != 0) { 25280 kmem_free(buffer, 12); 25281 kmem_free(com, sizeof (*com)); 25282 return (rval); 25283 } 25284 } else { 25285 entry->cdte_addr.msf.minute = buffer[9]; 25286 entry->cdte_addr.msf.second = buffer[10]; 25287 entry->cdte_addr.msf.frame = buffer[11]; 25288 /* 25289 * Send a READ TOC command using the LBA address format to get 25290 * the LBA for the track requested so it can be used in the 25291 * READ HEADER request 25292 * 25293 * Note: The MSF bit of the READ HEADER command specifies the 25294 * output format. The block address specified in that command 25295 * must be in LBA format. 25296 */ 25297 cdb[1] = 0; 25298 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25299 SD_PATH_STANDARD); 25300 if (rval != 0) { 25301 kmem_free(buffer, 12); 25302 kmem_free(com, sizeof (*com)); 25303 return (rval); 25304 } 25305 } 25306 25307 /* 25308 * Build and send the READ HEADER command to determine the data mode of 25309 * the user specified track. 25310 */ 25311 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) && 25312 (entry->cdte_track != CDROM_LEADOUT)) { 25313 bzero(cdb, CDB_GROUP1); 25314 cdb[0] = SCMD_READ_HEADER; 25315 cdb[2] = buffer[8]; 25316 cdb[3] = buffer[9]; 25317 cdb[4] = buffer[10]; 25318 cdb[5] = buffer[11]; 25319 cdb[8] = 0x08; 25320 com->uscsi_buflen = 0x08; 25321 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25322 SD_PATH_STANDARD); 25323 if (rval == 0) { 25324 entry->cdte_datamode = buffer[0]; 25325 } else { 25326 /* 25327 * READ HEADER command failed, since this is 25328 * obsoleted in one spec, its better to return 25329 * -1 for an invlid track so that we can still 25330 * receive the rest of the TOC data. 25331 */ 25332 entry->cdte_datamode = (uchar_t)-1; 25333 } 25334 } else { 25335 entry->cdte_datamode = (uchar_t)-1; 25336 } 25337 25338 kmem_free(buffer, 12); 25339 kmem_free(com, sizeof (*com)); 25340 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0) 25341 return (EFAULT); 25342 25343 return (rval); 25344 } 25345 25346 25347 /* 25348 * Function: sr_read_tochdr() 25349 * 25350 * Description: This routine is the driver entry point for handling CD-ROM 25351 * ioctl requests to read the Table of Contents (TOC) header 25352 * (CDROMREADTOHDR). The TOC header consists of the disk starting 25353 * and ending track numbers 25354 * 25355 * Arguments: dev - the device 'dev_t' 25356 * data - pointer to user provided toc header structure, 25357 * specifying the starting and ending track numbers. 25358 * flag - this argument is a pass through to ddi_copyxxx() 25359 * directly from the mode argument of ioctl(). 25360 * 25361 * Return Code: the code returned by sd_send_scsi_cmd() 25362 * EFAULT if ddi_copyxxx() fails 25363 * ENXIO if fail ddi_get_soft_state 25364 * EINVAL if data pointer is NULL 25365 */ 25366 25367 static int 25368 sr_read_tochdr(dev_t dev, caddr_t data, int flag) 25369 { 25370 struct sd_lun *un; 25371 struct uscsi_cmd *com; 25372 struct cdrom_tochdr toc_header; 25373 struct cdrom_tochdr *hdr = &toc_header; 25374 char cdb[CDB_GROUP1]; 25375 int rval; 25376 caddr_t buffer; 25377 25378 if (data == NULL) { 25379 return (EINVAL); 25380 } 25381 25382 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25383 (un->un_state == SD_STATE_OFFLINE)) { 25384 return (ENXIO); 25385 } 25386 25387 buffer = kmem_zalloc(4, KM_SLEEP); 25388 bzero(cdb, CDB_GROUP1); 25389 cdb[0] = SCMD_READ_TOC; 25390 /* 25391 * Specifying a track number of 0x00 in the READ TOC command indicates 25392 * that the TOC header should be returned 25393 */ 25394 cdb[6] = 0x00; 25395 /* 25396 * Bytes 7 & 8 are the 4 byte allocation length for TOC header. 25397 * (2 byte data len + 1 byte starting track # + 1 byte ending track #) 25398 */ 25399 cdb[8] = 0x04; 25400 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25401 com->uscsi_cdb = cdb; 25402 com->uscsi_cdblen = CDB_GROUP1; 25403 com->uscsi_bufaddr = buffer; 25404 com->uscsi_buflen = 0x04; 25405 com->uscsi_timeout = 300; 25406 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25407 25408 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25409 SD_PATH_STANDARD); 25410 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 25411 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]); 25412 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]); 25413 } else { 25414 hdr->cdth_trk0 = buffer[2]; 25415 hdr->cdth_trk1 = buffer[3]; 25416 } 25417 kmem_free(buffer, 4); 25418 kmem_free(com, sizeof (*com)); 25419 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) { 25420 return (EFAULT); 25421 } 25422 return (rval); 25423 } 25424 25425 25426 /* 25427 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(), 25428 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for 25429 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data, 25430 * digital audio and extended architecture digital audio. These modes are 25431 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3 25432 * MMC specs. 25433 * 25434 * In addition to support for the various data formats these routines also 25435 * include support for devices that implement only the direct access READ 25436 * commands (0x08, 0x28), devices that implement the READ_CD commands 25437 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and 25438 * READ CDXA commands (0xD8, 0xDB) 25439 */ 25440 25441 /* 25442 * Function: sr_read_mode1() 25443 * 25444 * Description: This routine is the driver entry point for handling CD-ROM 25445 * ioctl read mode1 requests (CDROMREADMODE1). 25446 * 25447 * Arguments: dev - the device 'dev_t' 25448 * data - pointer to user provided cd read structure specifying 25449 * the lba buffer address and length. 25450 * flag - this argument is a pass through to ddi_copyxxx() 25451 * directly from the mode argument of ioctl(). 25452 * 25453 * Return Code: the code returned by sd_send_scsi_cmd() 25454 * EFAULT if ddi_copyxxx() fails 25455 * ENXIO if fail ddi_get_soft_state 25456 * EINVAL if data pointer is NULL 25457 */ 25458 25459 static int 25460 sr_read_mode1(dev_t dev, caddr_t data, int flag) 25461 { 25462 struct sd_lun *un; 25463 struct cdrom_read mode1_struct; 25464 struct cdrom_read *mode1 = &mode1_struct; 25465 int rval; 25466 #ifdef _MULTI_DATAMODEL 25467 /* To support ILP32 applications in an LP64 world */ 25468 struct cdrom_read32 cdrom_read32; 25469 struct cdrom_read32 *cdrd32 = &cdrom_read32; 25470 #endif /* _MULTI_DATAMODEL */ 25471 25472 if (data == NULL) { 25473 return (EINVAL); 25474 } 25475 25476 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25477 (un->un_state == SD_STATE_OFFLINE)) { 25478 return (ENXIO); 25479 } 25480 25481 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25482 "sd_read_mode1: entry: un:0x%p\n", un); 25483 25484 #ifdef _MULTI_DATAMODEL 25485 switch (ddi_model_convert_from(flag & FMODELS)) { 25486 case DDI_MODEL_ILP32: 25487 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 25488 return (EFAULT); 25489 } 25490 /* Convert the ILP32 uscsi data from the application to LP64 */ 25491 cdrom_read32tocdrom_read(cdrd32, mode1); 25492 break; 25493 case DDI_MODEL_NONE: 25494 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 25495 return (EFAULT); 25496 } 25497 } 25498 #else /* ! _MULTI_DATAMODEL */ 25499 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 25500 return (EFAULT); 25501 } 25502 #endif /* _MULTI_DATAMODEL */ 25503 25504 rval = sd_send_scsi_READ(un, mode1->cdread_bufaddr, 25505 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD); 25506 25507 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25508 "sd_read_mode1: exit: un:0x%p\n", un); 25509 25510 return (rval); 25511 } 25512 25513 25514 /* 25515 * Function: sr_read_cd_mode2() 25516 * 25517 * Description: This routine is the driver entry point for handling CD-ROM 25518 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 25519 * support the READ CD (0xBE) command or the 1st generation 25520 * READ CD (0xD4) command. 25521 * 25522 * Arguments: dev - the device 'dev_t' 25523 * data - pointer to user provided cd read structure specifying 25524 * the lba buffer address and length. 25525 * flag - this argument is a pass through to ddi_copyxxx() 25526 * directly from the mode argument of ioctl(). 25527 * 25528 * Return Code: the code returned by sd_send_scsi_cmd() 25529 * EFAULT if ddi_copyxxx() fails 25530 * ENXIO if fail ddi_get_soft_state 25531 * EINVAL if data pointer is NULL 25532 */ 25533 25534 static int 25535 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag) 25536 { 25537 struct sd_lun *un; 25538 struct uscsi_cmd *com; 25539 struct cdrom_read mode2_struct; 25540 struct cdrom_read *mode2 = &mode2_struct; 25541 uchar_t cdb[CDB_GROUP5]; 25542 int nblocks; 25543 int rval; 25544 #ifdef _MULTI_DATAMODEL 25545 /* To support ILP32 applications in an LP64 world */ 25546 struct cdrom_read32 cdrom_read32; 25547 struct cdrom_read32 *cdrd32 = &cdrom_read32; 25548 #endif /* _MULTI_DATAMODEL */ 25549 25550 if (data == NULL) { 25551 return (EINVAL); 25552 } 25553 25554 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25555 (un->un_state == SD_STATE_OFFLINE)) { 25556 return (ENXIO); 25557 } 25558 25559 #ifdef _MULTI_DATAMODEL 25560 switch (ddi_model_convert_from(flag & FMODELS)) { 25561 case DDI_MODEL_ILP32: 25562 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 25563 return (EFAULT); 25564 } 25565 /* Convert the ILP32 uscsi data from the application to LP64 */ 25566 cdrom_read32tocdrom_read(cdrd32, mode2); 25567 break; 25568 case DDI_MODEL_NONE: 25569 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 25570 return (EFAULT); 25571 } 25572 break; 25573 } 25574 25575 #else /* ! _MULTI_DATAMODEL */ 25576 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 25577 return (EFAULT); 25578 } 25579 #endif /* _MULTI_DATAMODEL */ 25580 25581 bzero(cdb, sizeof (cdb)); 25582 if (un->un_f_cfg_read_cd_xd4 == TRUE) { 25583 /* Read command supported by 1st generation atapi drives */ 25584 cdb[0] = SCMD_READ_CDD4; 25585 } else { 25586 /* Universal CD Access Command */ 25587 cdb[0] = SCMD_READ_CD; 25588 } 25589 25590 /* 25591 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book 25592 */ 25593 cdb[1] = CDROM_SECTOR_TYPE_MODE2; 25594 25595 /* set the start address */ 25596 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF); 25597 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF); 25598 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 25599 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF); 25600 25601 /* set the transfer length */ 25602 nblocks = mode2->cdread_buflen / 2336; 25603 cdb[6] = (uchar_t)(nblocks >> 16); 25604 cdb[7] = (uchar_t)(nblocks >> 8); 25605 cdb[8] = (uchar_t)nblocks; 25606 25607 /* set the filter bits */ 25608 cdb[9] = CDROM_READ_CD_USERDATA; 25609 25610 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25611 com->uscsi_cdb = (caddr_t)cdb; 25612 com->uscsi_cdblen = sizeof (cdb); 25613 com->uscsi_bufaddr = mode2->cdread_bufaddr; 25614 com->uscsi_buflen = mode2->cdread_buflen; 25615 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25616 25617 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25618 SD_PATH_STANDARD); 25619 kmem_free(com, sizeof (*com)); 25620 return (rval); 25621 } 25622 25623 25624 /* 25625 * Function: sr_read_mode2() 25626 * 25627 * Description: This routine is the driver entry point for handling CD-ROM 25628 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 25629 * do not support the READ CD (0xBE) command. 25630 * 25631 * Arguments: dev - the device 'dev_t' 25632 * data - pointer to user provided cd read structure specifying 25633 * the lba buffer address and length. 25634 * flag - this argument is a pass through to ddi_copyxxx() 25635 * directly from the mode argument of ioctl(). 25636 * 25637 * Return Code: the code returned by sd_send_scsi_cmd() 25638 * EFAULT if ddi_copyxxx() fails 25639 * ENXIO if fail ddi_get_soft_state 25640 * EINVAL if data pointer is NULL 25641 * EIO if fail to reset block size 25642 * EAGAIN if commands are in progress in the driver 25643 */ 25644 25645 static int 25646 sr_read_mode2(dev_t dev, caddr_t data, int flag) 25647 { 25648 struct sd_lun *un; 25649 struct cdrom_read mode2_struct; 25650 struct cdrom_read *mode2 = &mode2_struct; 25651 int rval; 25652 uint32_t restore_blksize; 25653 struct uscsi_cmd *com; 25654 uchar_t cdb[CDB_GROUP0]; 25655 int nblocks; 25656 25657 #ifdef _MULTI_DATAMODEL 25658 /* To support ILP32 applications in an LP64 world */ 25659 struct cdrom_read32 cdrom_read32; 25660 struct cdrom_read32 *cdrd32 = &cdrom_read32; 25661 #endif /* _MULTI_DATAMODEL */ 25662 25663 if (data == NULL) { 25664 return (EINVAL); 25665 } 25666 25667 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25668 (un->un_state == SD_STATE_OFFLINE)) { 25669 return (ENXIO); 25670 } 25671 25672 /* 25673 * Because this routine will update the device and driver block size 25674 * being used we want to make sure there are no commands in progress. 25675 * If commands are in progress the user will have to try again. 25676 * 25677 * We check for 1 instead of 0 because we increment un_ncmds_in_driver 25678 * in sdioctl to protect commands from sdioctl through to the top of 25679 * sd_uscsi_strategy. See sdioctl for details. 25680 */ 25681 mutex_enter(SD_MUTEX(un)); 25682 if (un->un_ncmds_in_driver != 1) { 25683 mutex_exit(SD_MUTEX(un)); 25684 return (EAGAIN); 25685 } 25686 mutex_exit(SD_MUTEX(un)); 25687 25688 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25689 "sd_read_mode2: entry: un:0x%p\n", un); 25690 25691 #ifdef _MULTI_DATAMODEL 25692 switch (ddi_model_convert_from(flag & FMODELS)) { 25693 case DDI_MODEL_ILP32: 25694 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 25695 return (EFAULT); 25696 } 25697 /* Convert the ILP32 uscsi data from the application to LP64 */ 25698 cdrom_read32tocdrom_read(cdrd32, mode2); 25699 break; 25700 case DDI_MODEL_NONE: 25701 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 25702 return (EFAULT); 25703 } 25704 break; 25705 } 25706 #else /* ! _MULTI_DATAMODEL */ 25707 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) { 25708 return (EFAULT); 25709 } 25710 #endif /* _MULTI_DATAMODEL */ 25711 25712 /* Store the current target block size for restoration later */ 25713 restore_blksize = un->un_tgt_blocksize; 25714 25715 /* Change the device and soft state target block size to 2336 */ 25716 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) { 25717 rval = EIO; 25718 goto done; 25719 } 25720 25721 25722 bzero(cdb, sizeof (cdb)); 25723 25724 /* set READ operation */ 25725 cdb[0] = SCMD_READ; 25726 25727 /* adjust lba for 2kbyte blocks from 512 byte blocks */ 25728 mode2->cdread_lba >>= 2; 25729 25730 /* set the start address */ 25731 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F); 25732 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 25733 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF); 25734 25735 /* set the transfer length */ 25736 nblocks = mode2->cdread_buflen / 2336; 25737 cdb[4] = (uchar_t)nblocks & 0xFF; 25738 25739 /* build command */ 25740 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25741 com->uscsi_cdb = (caddr_t)cdb; 25742 com->uscsi_cdblen = sizeof (cdb); 25743 com->uscsi_bufaddr = mode2->cdread_bufaddr; 25744 com->uscsi_buflen = mode2->cdread_buflen; 25745 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 25746 25747 /* 25748 * Issue SCSI command with user space address for read buffer. 25749 * 25750 * This sends the command through main channel in the driver. 25751 * 25752 * Since this is accessed via an IOCTL call, we go through the 25753 * standard path, so that if the device was powered down, then 25754 * it would be 'awakened' to handle the command. 25755 */ 25756 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 25757 SD_PATH_STANDARD); 25758 25759 kmem_free(com, sizeof (*com)); 25760 25761 /* Restore the device and soft state target block size */ 25762 if (sr_sector_mode(dev, restore_blksize) != 0) { 25763 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25764 "can't do switch back to mode 1\n"); 25765 /* 25766 * If sd_send_scsi_READ succeeded we still need to report 25767 * an error because we failed to reset the block size 25768 */ 25769 if (rval == 0) { 25770 rval = EIO; 25771 } 25772 } 25773 25774 done: 25775 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 25776 "sd_read_mode2: exit: un:0x%p\n", un); 25777 25778 return (rval); 25779 } 25780 25781 25782 /* 25783 * Function: sr_sector_mode() 25784 * 25785 * Description: This utility function is used by sr_read_mode2 to set the target 25786 * block size based on the user specified size. This is a legacy 25787 * implementation based upon a vendor specific mode page 25788 * 25789 * Arguments: dev - the device 'dev_t' 25790 * data - flag indicating if block size is being set to 2336 or 25791 * 512. 25792 * 25793 * Return Code: the code returned by sd_send_scsi_cmd() 25794 * EFAULT if ddi_copyxxx() fails 25795 * ENXIO if fail ddi_get_soft_state 25796 * EINVAL if data pointer is NULL 25797 */ 25798 25799 static int 25800 sr_sector_mode(dev_t dev, uint32_t blksize) 25801 { 25802 struct sd_lun *un; 25803 uchar_t *sense; 25804 uchar_t *select; 25805 int rval; 25806 25807 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 25808 (un->un_state == SD_STATE_OFFLINE)) { 25809 return (ENXIO); 25810 } 25811 25812 sense = kmem_zalloc(20, KM_SLEEP); 25813 25814 /* Note: This is a vendor specific mode page (0x81) */ 25815 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 20, 0x81, 25816 SD_PATH_STANDARD)) != 0) { 25817 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 25818 "sr_sector_mode: Mode Sense failed\n"); 25819 kmem_free(sense, 20); 25820 return (rval); 25821 } 25822 select = kmem_zalloc(20, KM_SLEEP); 25823 select[3] = 0x08; 25824 select[10] = ((blksize >> 8) & 0xff); 25825 select[11] = (blksize & 0xff); 25826 select[12] = 0x01; 25827 select[13] = 0x06; 25828 select[14] = sense[14]; 25829 select[15] = sense[15]; 25830 if (blksize == SD_MODE2_BLKSIZE) { 25831 select[14] |= 0x01; 25832 } 25833 25834 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 20, 25835 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 25836 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 25837 "sr_sector_mode: Mode Select failed\n"); 25838 } else { 25839 /* 25840 * Only update the softstate block size if we successfully 25841 * changed the device block mode. 25842 */ 25843 mutex_enter(SD_MUTEX(un)); 25844 sd_update_block_info(un, blksize, 0); 25845 mutex_exit(SD_MUTEX(un)); 25846 } 25847 kmem_free(sense, 20); 25848 kmem_free(select, 20); 25849 return (rval); 25850 } 25851 25852 25853 /* 25854 * Function: sr_read_cdda() 25855 * 25856 * Description: This routine is the driver entry point for handling CD-ROM 25857 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If 25858 * the target supports CDDA these requests are handled via a vendor 25859 * specific command (0xD8) If the target does not support CDDA 25860 * these requests are handled via the READ CD command (0xBE). 25861 * 25862 * Arguments: dev - the device 'dev_t' 25863 * data - pointer to user provided CD-DA structure specifying 25864 * the track starting address, transfer length, and 25865 * subcode options. 25866 * flag - this argument is a pass through to ddi_copyxxx() 25867 * directly from the mode argument of ioctl(). 25868 * 25869 * Return Code: the code returned by sd_send_scsi_cmd() 25870 * EFAULT if ddi_copyxxx() fails 25871 * ENXIO if fail ddi_get_soft_state 25872 * EINVAL if invalid arguments are provided 25873 * ENOTTY 25874 */ 25875 25876 static int 25877 sr_read_cdda(dev_t dev, caddr_t data, int flag) 25878 { 25879 struct sd_lun *un; 25880 struct uscsi_cmd *com; 25881 struct cdrom_cdda *cdda; 25882 int rval; 25883 size_t buflen; 25884 char cdb[CDB_GROUP5]; 25885 25886 #ifdef _MULTI_DATAMODEL 25887 /* To support ILP32 applications in an LP64 world */ 25888 struct cdrom_cdda32 cdrom_cdda32; 25889 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32; 25890 #endif /* _MULTI_DATAMODEL */ 25891 25892 if (data == NULL) { 25893 return (EINVAL); 25894 } 25895 25896 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25897 return (ENXIO); 25898 } 25899 25900 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP); 25901 25902 #ifdef _MULTI_DATAMODEL 25903 switch (ddi_model_convert_from(flag & FMODELS)) { 25904 case DDI_MODEL_ILP32: 25905 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) { 25906 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25907 "sr_read_cdda: ddi_copyin Failed\n"); 25908 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25909 return (EFAULT); 25910 } 25911 /* Convert the ILP32 uscsi data from the application to LP64 */ 25912 cdrom_cdda32tocdrom_cdda(cdda32, cdda); 25913 break; 25914 case DDI_MODEL_NONE: 25915 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 25916 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25917 "sr_read_cdda: ddi_copyin Failed\n"); 25918 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25919 return (EFAULT); 25920 } 25921 break; 25922 } 25923 #else /* ! _MULTI_DATAMODEL */ 25924 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 25925 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25926 "sr_read_cdda: ddi_copyin Failed\n"); 25927 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25928 return (EFAULT); 25929 } 25930 #endif /* _MULTI_DATAMODEL */ 25931 25932 /* 25933 * Since MMC-2 expects max 3 bytes for length, check if the 25934 * length input is greater than 3 bytes 25935 */ 25936 if ((cdda->cdda_length & 0xFF000000) != 0) { 25937 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: " 25938 "cdrom transfer length too large: %d (limit %d)\n", 25939 cdda->cdda_length, 0xFFFFFF); 25940 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25941 return (EINVAL); 25942 } 25943 25944 switch (cdda->cdda_subcode) { 25945 case CDROM_DA_NO_SUBCODE: 25946 buflen = CDROM_BLK_2352 * cdda->cdda_length; 25947 break; 25948 case CDROM_DA_SUBQ: 25949 buflen = CDROM_BLK_2368 * cdda->cdda_length; 25950 break; 25951 case CDROM_DA_ALL_SUBCODE: 25952 buflen = CDROM_BLK_2448 * cdda->cdda_length; 25953 break; 25954 case CDROM_DA_SUBCODE_ONLY: 25955 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length; 25956 break; 25957 default: 25958 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25959 "sr_read_cdda: Subcode '0x%x' Not Supported\n", 25960 cdda->cdda_subcode); 25961 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25962 return (EINVAL); 25963 } 25964 25965 /* Build and send the command */ 25966 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25967 bzero(cdb, CDB_GROUP5); 25968 25969 if (un->un_f_cfg_cdda == TRUE) { 25970 cdb[0] = (char)SCMD_READ_CD; 25971 cdb[1] = 0x04; 25972 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 25973 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 25974 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 25975 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 25976 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 25977 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 25978 cdb[8] = ((cdda->cdda_length) & 0x000000ff); 25979 cdb[9] = 0x10; 25980 switch (cdda->cdda_subcode) { 25981 case CDROM_DA_NO_SUBCODE : 25982 cdb[10] = 0x0; 25983 break; 25984 case CDROM_DA_SUBQ : 25985 cdb[10] = 0x2; 25986 break; 25987 case CDROM_DA_ALL_SUBCODE : 25988 cdb[10] = 0x1; 25989 break; 25990 case CDROM_DA_SUBCODE_ONLY : 25991 /* FALLTHROUGH */ 25992 default : 25993 kmem_free(cdda, sizeof (struct cdrom_cdda)); 25994 kmem_free(com, sizeof (*com)); 25995 return (ENOTTY); 25996 } 25997 } else { 25998 cdb[0] = (char)SCMD_READ_CDDA; 25999 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 26000 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 26001 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 26002 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 26003 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24); 26004 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 26005 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 26006 cdb[9] = ((cdda->cdda_length) & 0x000000ff); 26007 cdb[10] = cdda->cdda_subcode; 26008 } 26009 26010 com->uscsi_cdb = cdb; 26011 com->uscsi_cdblen = CDB_GROUP5; 26012 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data; 26013 com->uscsi_buflen = buflen; 26014 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26015 26016 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 26017 SD_PATH_STANDARD); 26018 26019 kmem_free(cdda, sizeof (struct cdrom_cdda)); 26020 kmem_free(com, sizeof (*com)); 26021 return (rval); 26022 } 26023 26024 26025 /* 26026 * Function: sr_read_cdxa() 26027 * 26028 * Description: This routine is the driver entry point for handling CD-ROM 26029 * ioctl requests to return CD-XA (Extended Architecture) data. 26030 * (CDROMCDXA). 26031 * 26032 * Arguments: dev - the device 'dev_t' 26033 * data - pointer to user provided CD-XA structure specifying 26034 * the data starting address, transfer length, and format 26035 * flag - this argument is a pass through to ddi_copyxxx() 26036 * directly from the mode argument of ioctl(). 26037 * 26038 * Return Code: the code returned by sd_send_scsi_cmd() 26039 * EFAULT if ddi_copyxxx() fails 26040 * ENXIO if fail ddi_get_soft_state 26041 * EINVAL if data pointer is NULL 26042 */ 26043 26044 static int 26045 sr_read_cdxa(dev_t dev, caddr_t data, int flag) 26046 { 26047 struct sd_lun *un; 26048 struct uscsi_cmd *com; 26049 struct cdrom_cdxa *cdxa; 26050 int rval; 26051 size_t buflen; 26052 char cdb[CDB_GROUP5]; 26053 uchar_t read_flags; 26054 26055 #ifdef _MULTI_DATAMODEL 26056 /* To support ILP32 applications in an LP64 world */ 26057 struct cdrom_cdxa32 cdrom_cdxa32; 26058 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32; 26059 #endif /* _MULTI_DATAMODEL */ 26060 26061 if (data == NULL) { 26062 return (EINVAL); 26063 } 26064 26065 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26066 return (ENXIO); 26067 } 26068 26069 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP); 26070 26071 #ifdef _MULTI_DATAMODEL 26072 switch (ddi_model_convert_from(flag & FMODELS)) { 26073 case DDI_MODEL_ILP32: 26074 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) { 26075 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 26076 return (EFAULT); 26077 } 26078 /* 26079 * Convert the ILP32 uscsi data from the 26080 * application to LP64 for internal use. 26081 */ 26082 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa); 26083 break; 26084 case DDI_MODEL_NONE: 26085 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 26086 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 26087 return (EFAULT); 26088 } 26089 break; 26090 } 26091 #else /* ! _MULTI_DATAMODEL */ 26092 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 26093 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 26094 return (EFAULT); 26095 } 26096 #endif /* _MULTI_DATAMODEL */ 26097 26098 /* 26099 * Since MMC-2 expects max 3 bytes for length, check if the 26100 * length input is greater than 3 bytes 26101 */ 26102 if ((cdxa->cdxa_length & 0xFF000000) != 0) { 26103 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: " 26104 "cdrom transfer length too large: %d (limit %d)\n", 26105 cdxa->cdxa_length, 0xFFFFFF); 26106 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 26107 return (EINVAL); 26108 } 26109 26110 switch (cdxa->cdxa_format) { 26111 case CDROM_XA_DATA: 26112 buflen = CDROM_BLK_2048 * cdxa->cdxa_length; 26113 read_flags = 0x10; 26114 break; 26115 case CDROM_XA_SECTOR_DATA: 26116 buflen = CDROM_BLK_2352 * cdxa->cdxa_length; 26117 read_flags = 0xf8; 26118 break; 26119 case CDROM_XA_DATA_W_ERROR: 26120 buflen = CDROM_BLK_2646 * cdxa->cdxa_length; 26121 read_flags = 0xfc; 26122 break; 26123 default: 26124 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26125 "sr_read_cdxa: Format '0x%x' Not Supported\n", 26126 cdxa->cdxa_format); 26127 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 26128 return (EINVAL); 26129 } 26130 26131 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26132 bzero(cdb, CDB_GROUP5); 26133 if (un->un_f_mmc_cap == TRUE) { 26134 cdb[0] = (char)SCMD_READ_CD; 26135 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 26136 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 26137 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 26138 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 26139 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 26140 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 26141 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff); 26142 cdb[9] = (char)read_flags; 26143 } else { 26144 /* 26145 * Note: A vendor specific command (0xDB) is being used her to 26146 * request a read of all subcodes. 26147 */ 26148 cdb[0] = (char)SCMD_READ_CDXA; 26149 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 26150 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 26151 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 26152 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 26153 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24); 26154 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 26155 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 26156 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff); 26157 cdb[10] = cdxa->cdxa_format; 26158 } 26159 com->uscsi_cdb = cdb; 26160 com->uscsi_cdblen = CDB_GROUP5; 26161 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data; 26162 com->uscsi_buflen = buflen; 26163 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26164 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 26165 SD_PATH_STANDARD); 26166 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 26167 kmem_free(com, sizeof (*com)); 26168 return (rval); 26169 } 26170 26171 26172 /* 26173 * Function: sr_eject() 26174 * 26175 * Description: This routine is the driver entry point for handling CD-ROM 26176 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT) 26177 * 26178 * Arguments: dev - the device 'dev_t' 26179 * 26180 * Return Code: the code returned by sd_send_scsi_cmd() 26181 */ 26182 26183 static int 26184 sr_eject(dev_t dev) 26185 { 26186 struct sd_lun *un; 26187 int rval; 26188 26189 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26190 (un->un_state == SD_STATE_OFFLINE)) { 26191 return (ENXIO); 26192 } 26193 26194 /* 26195 * To prevent race conditions with the eject 26196 * command, keep track of an eject command as 26197 * it progresses. If we are already handling 26198 * an eject command in the driver for the given 26199 * unit and another request to eject is received 26200 * immediately return EAGAIN so we don't lose 26201 * the command if the current eject command fails. 26202 */ 26203 mutex_enter(SD_MUTEX(un)); 26204 if (un->un_f_ejecting == TRUE) { 26205 mutex_exit(SD_MUTEX(un)); 26206 return (EAGAIN); 26207 } 26208 un->un_f_ejecting = TRUE; 26209 mutex_exit(SD_MUTEX(un)); 26210 26211 if ((rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 26212 SD_PATH_STANDARD)) != 0) { 26213 mutex_enter(SD_MUTEX(un)); 26214 un->un_f_ejecting = FALSE; 26215 mutex_exit(SD_MUTEX(un)); 26216 return (rval); 26217 } 26218 26219 rval = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_EJECT, 26220 SD_PATH_STANDARD); 26221 26222 if (rval == 0) { 26223 mutex_enter(SD_MUTEX(un)); 26224 sr_ejected(un); 26225 un->un_mediastate = DKIO_EJECTED; 26226 un->un_f_ejecting = FALSE; 26227 cv_broadcast(&un->un_state_cv); 26228 mutex_exit(SD_MUTEX(un)); 26229 } else { 26230 mutex_enter(SD_MUTEX(un)); 26231 un->un_f_ejecting = FALSE; 26232 mutex_exit(SD_MUTEX(un)); 26233 } 26234 return (rval); 26235 } 26236 26237 26238 /* 26239 * Function: sr_ejected() 26240 * 26241 * Description: This routine updates the soft state structure to invalidate the 26242 * geometry information after the media has been ejected or a 26243 * media eject has been detected. 26244 * 26245 * Arguments: un - driver soft state (unit) structure 26246 */ 26247 26248 static void 26249 sr_ejected(struct sd_lun *un) 26250 { 26251 struct sd_errstats *stp; 26252 26253 ASSERT(un != NULL); 26254 ASSERT(mutex_owned(SD_MUTEX(un))); 26255 26256 un->un_f_blockcount_is_valid = FALSE; 26257 un->un_f_tgt_blocksize_is_valid = FALSE; 26258 mutex_exit(SD_MUTEX(un)); 26259 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 26260 mutex_enter(SD_MUTEX(un)); 26261 26262 if (un->un_errstats != NULL) { 26263 stp = (struct sd_errstats *)un->un_errstats->ks_data; 26264 stp->sd_capacity.value.ui64 = 0; 26265 } 26266 } 26267 26268 26269 /* 26270 * Function: sr_check_wp() 26271 * 26272 * Description: This routine checks the write protection of a removable 26273 * media disk and hotpluggable devices via the write protect bit of 26274 * the Mode Page Header device specific field. Some devices choke 26275 * on unsupported mode page. In order to workaround this issue, 26276 * this routine has been implemented to use 0x3f mode page(request 26277 * for all pages) for all device types. 26278 * 26279 * Arguments: dev - the device 'dev_t' 26280 * 26281 * Return Code: int indicating if the device is write protected (1) or not (0) 26282 * 26283 * Context: Kernel thread. 26284 * 26285 */ 26286 26287 static int 26288 sr_check_wp(dev_t dev) 26289 { 26290 struct sd_lun *un; 26291 uchar_t device_specific; 26292 uchar_t *sense; 26293 int hdrlen; 26294 int rval = FALSE; 26295 26296 /* 26297 * Note: The return codes for this routine should be reworked to 26298 * properly handle the case of a NULL softstate. 26299 */ 26300 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26301 return (FALSE); 26302 } 26303 26304 if (un->un_f_cfg_is_atapi == TRUE) { 26305 /* 26306 * The mode page contents are not required; set the allocation 26307 * length for the mode page header only 26308 */ 26309 hdrlen = MODE_HEADER_LENGTH_GRP2; 26310 sense = kmem_zalloc(hdrlen, KM_SLEEP); 26311 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, hdrlen, 26312 MODEPAGE_ALLPAGES, SD_PATH_STANDARD) != 0) 26313 goto err_exit; 26314 device_specific = 26315 ((struct mode_header_grp2 *)sense)->device_specific; 26316 } else { 26317 hdrlen = MODE_HEADER_LENGTH; 26318 sense = kmem_zalloc(hdrlen, KM_SLEEP); 26319 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, hdrlen, 26320 MODEPAGE_ALLPAGES, SD_PATH_STANDARD) != 0) 26321 goto err_exit; 26322 device_specific = 26323 ((struct mode_header *)sense)->device_specific; 26324 } 26325 26326 /* 26327 * Write protect mode sense failed; not all disks 26328 * understand this query. Return FALSE assuming that 26329 * these devices are not writable. 26330 */ 26331 if (device_specific & WRITE_PROTECT) { 26332 rval = TRUE; 26333 } 26334 26335 err_exit: 26336 kmem_free(sense, hdrlen); 26337 return (rval); 26338 } 26339 26340 /* 26341 * Function: sr_volume_ctrl() 26342 * 26343 * Description: This routine is the driver entry point for handling CD-ROM 26344 * audio output volume ioctl requests. (CDROMVOLCTRL) 26345 * 26346 * Arguments: dev - the device 'dev_t' 26347 * data - pointer to user audio volume control structure 26348 * flag - this argument is a pass through to ddi_copyxxx() 26349 * directly from the mode argument of ioctl(). 26350 * 26351 * Return Code: the code returned by sd_send_scsi_cmd() 26352 * EFAULT if ddi_copyxxx() fails 26353 * ENXIO if fail ddi_get_soft_state 26354 * EINVAL if data pointer is NULL 26355 * 26356 */ 26357 26358 static int 26359 sr_volume_ctrl(dev_t dev, caddr_t data, int flag) 26360 { 26361 struct sd_lun *un; 26362 struct cdrom_volctrl volume; 26363 struct cdrom_volctrl *vol = &volume; 26364 uchar_t *sense_page; 26365 uchar_t *select_page; 26366 uchar_t *sense; 26367 uchar_t *select; 26368 int sense_buflen; 26369 int select_buflen; 26370 int rval; 26371 26372 if (data == NULL) { 26373 return (EINVAL); 26374 } 26375 26376 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26377 (un->un_state == SD_STATE_OFFLINE)) { 26378 return (ENXIO); 26379 } 26380 26381 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) { 26382 return (EFAULT); 26383 } 26384 26385 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 26386 struct mode_header_grp2 *sense_mhp; 26387 struct mode_header_grp2 *select_mhp; 26388 int bd_len; 26389 26390 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN; 26391 select_buflen = MODE_HEADER_LENGTH_GRP2 + 26392 MODEPAGE_AUDIO_CTRL_LEN; 26393 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 26394 select = kmem_zalloc(select_buflen, KM_SLEEP); 26395 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 26396 sense_buflen, MODEPAGE_AUDIO_CTRL, 26397 SD_PATH_STANDARD)) != 0) { 26398 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 26399 "sr_volume_ctrl: Mode Sense Failed\n"); 26400 kmem_free(sense, sense_buflen); 26401 kmem_free(select, select_buflen); 26402 return (rval); 26403 } 26404 sense_mhp = (struct mode_header_grp2 *)sense; 26405 select_mhp = (struct mode_header_grp2 *)select; 26406 bd_len = (sense_mhp->bdesc_length_hi << 8) | 26407 sense_mhp->bdesc_length_lo; 26408 if (bd_len > MODE_BLK_DESC_LENGTH) { 26409 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26410 "sr_volume_ctrl: Mode Sense returned invalid " 26411 "block descriptor length\n"); 26412 kmem_free(sense, sense_buflen); 26413 kmem_free(select, select_buflen); 26414 return (EIO); 26415 } 26416 sense_page = (uchar_t *) 26417 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 26418 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2); 26419 select_mhp->length_msb = 0; 26420 select_mhp->length_lsb = 0; 26421 select_mhp->bdesc_length_hi = 0; 26422 select_mhp->bdesc_length_lo = 0; 26423 } else { 26424 struct mode_header *sense_mhp, *select_mhp; 26425 26426 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 26427 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 26428 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 26429 select = kmem_zalloc(select_buflen, KM_SLEEP); 26430 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 26431 sense_buflen, MODEPAGE_AUDIO_CTRL, 26432 SD_PATH_STANDARD)) != 0) { 26433 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26434 "sr_volume_ctrl: Mode Sense Failed\n"); 26435 kmem_free(sense, sense_buflen); 26436 kmem_free(select, select_buflen); 26437 return (rval); 26438 } 26439 sense_mhp = (struct mode_header *)sense; 26440 select_mhp = (struct mode_header *)select; 26441 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) { 26442 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26443 "sr_volume_ctrl: Mode Sense returned invalid " 26444 "block descriptor length\n"); 26445 kmem_free(sense, sense_buflen); 26446 kmem_free(select, select_buflen); 26447 return (EIO); 26448 } 26449 sense_page = (uchar_t *) 26450 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 26451 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH); 26452 select_mhp->length = 0; 26453 select_mhp->bdesc_length = 0; 26454 } 26455 /* 26456 * Note: An audio control data structure could be created and overlayed 26457 * on the following in place of the array indexing method implemented. 26458 */ 26459 26460 /* Build the select data for the user volume data */ 26461 select_page[0] = MODEPAGE_AUDIO_CTRL; 26462 select_page[1] = 0xE; 26463 /* Set the immediate bit */ 26464 select_page[2] = 0x04; 26465 /* Zero out reserved fields */ 26466 select_page[3] = 0x00; 26467 select_page[4] = 0x00; 26468 /* Return sense data for fields not to be modified */ 26469 select_page[5] = sense_page[5]; 26470 select_page[6] = sense_page[6]; 26471 select_page[7] = sense_page[7]; 26472 /* Set the user specified volume levels for channel 0 and 1 */ 26473 select_page[8] = 0x01; 26474 select_page[9] = vol->channel0; 26475 select_page[10] = 0x02; 26476 select_page[11] = vol->channel1; 26477 /* Channel 2 and 3 are currently unsupported so return the sense data */ 26478 select_page[12] = sense_page[12]; 26479 select_page[13] = sense_page[13]; 26480 select_page[14] = sense_page[14]; 26481 select_page[15] = sense_page[15]; 26482 26483 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 26484 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, select, 26485 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 26486 } else { 26487 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 26488 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 26489 } 26490 26491 kmem_free(sense, sense_buflen); 26492 kmem_free(select, select_buflen); 26493 return (rval); 26494 } 26495 26496 26497 /* 26498 * Function: sr_read_sony_session_offset() 26499 * 26500 * Description: This routine is the driver entry point for handling CD-ROM 26501 * ioctl requests for session offset information. (CDROMREADOFFSET) 26502 * The address of the first track in the last session of a 26503 * multi-session CD-ROM is returned 26504 * 26505 * Note: This routine uses a vendor specific key value in the 26506 * command control field without implementing any vendor check here 26507 * or in the ioctl routine. 26508 * 26509 * Arguments: dev - the device 'dev_t' 26510 * data - pointer to an int to hold the requested address 26511 * flag - this argument is a pass through to ddi_copyxxx() 26512 * directly from the mode argument of ioctl(). 26513 * 26514 * Return Code: the code returned by sd_send_scsi_cmd() 26515 * EFAULT if ddi_copyxxx() fails 26516 * ENXIO if fail ddi_get_soft_state 26517 * EINVAL if data pointer is NULL 26518 */ 26519 26520 static int 26521 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag) 26522 { 26523 struct sd_lun *un; 26524 struct uscsi_cmd *com; 26525 caddr_t buffer; 26526 char cdb[CDB_GROUP1]; 26527 int session_offset = 0; 26528 int rval; 26529 26530 if (data == NULL) { 26531 return (EINVAL); 26532 } 26533 26534 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26535 (un->un_state == SD_STATE_OFFLINE)) { 26536 return (ENXIO); 26537 } 26538 26539 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP); 26540 bzero(cdb, CDB_GROUP1); 26541 cdb[0] = SCMD_READ_TOC; 26542 /* 26543 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 26544 * (4 byte TOC response header + 8 byte response data) 26545 */ 26546 cdb[8] = SONY_SESSION_OFFSET_LEN; 26547 /* Byte 9 is the control byte. A vendor specific value is used */ 26548 cdb[9] = SONY_SESSION_OFFSET_KEY; 26549 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26550 com->uscsi_cdb = cdb; 26551 com->uscsi_cdblen = CDB_GROUP1; 26552 com->uscsi_bufaddr = buffer; 26553 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN; 26554 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26555 26556 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26557 SD_PATH_STANDARD); 26558 if (rval != 0) { 26559 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 26560 kmem_free(com, sizeof (*com)); 26561 return (rval); 26562 } 26563 if (buffer[1] == SONY_SESSION_OFFSET_VALID) { 26564 session_offset = 26565 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 26566 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 26567 /* 26568 * Offset returned offset in current lbasize block's. Convert to 26569 * 2k block's to return to the user 26570 */ 26571 if (un->un_tgt_blocksize == CDROM_BLK_512) { 26572 session_offset >>= 2; 26573 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) { 26574 session_offset >>= 1; 26575 } 26576 } 26577 26578 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) { 26579 rval = EFAULT; 26580 } 26581 26582 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 26583 kmem_free(com, sizeof (*com)); 26584 return (rval); 26585 } 26586 26587 26588 /* 26589 * Function: sd_wm_cache_constructor() 26590 * 26591 * Description: Cache Constructor for the wmap cache for the read/modify/write 26592 * devices. 26593 * 26594 * Arguments: wm - A pointer to the sd_w_map to be initialized. 26595 * un - sd_lun structure for the device. 26596 * flag - the km flags passed to constructor 26597 * 26598 * Return Code: 0 on success. 26599 * -1 on failure. 26600 */ 26601 26602 /*ARGSUSED*/ 26603 static int 26604 sd_wm_cache_constructor(void *wm, void *un, int flags) 26605 { 26606 bzero(wm, sizeof (struct sd_w_map)); 26607 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL); 26608 return (0); 26609 } 26610 26611 26612 /* 26613 * Function: sd_wm_cache_destructor() 26614 * 26615 * Description: Cache destructor for the wmap cache for the read/modify/write 26616 * devices. 26617 * 26618 * Arguments: wm - A pointer to the sd_w_map to be initialized. 26619 * un - sd_lun structure for the device. 26620 */ 26621 /*ARGSUSED*/ 26622 static void 26623 sd_wm_cache_destructor(void *wm, void *un) 26624 { 26625 cv_destroy(&((struct sd_w_map *)wm)->wm_avail); 26626 } 26627 26628 26629 /* 26630 * Function: sd_range_lock() 26631 * 26632 * Description: Lock the range of blocks specified as parameter to ensure 26633 * that read, modify write is atomic and no other i/o writes 26634 * to the same location. The range is specified in terms 26635 * of start and end blocks. Block numbers are the actual 26636 * media block numbers and not system. 26637 * 26638 * Arguments: un - sd_lun structure for the device. 26639 * startb - The starting block number 26640 * endb - The end block number 26641 * typ - type of i/o - simple/read_modify_write 26642 * 26643 * Return Code: wm - pointer to the wmap structure. 26644 * 26645 * Context: This routine can sleep. 26646 */ 26647 26648 static struct sd_w_map * 26649 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ) 26650 { 26651 struct sd_w_map *wmp = NULL; 26652 struct sd_w_map *sl_wmp = NULL; 26653 struct sd_w_map *tmp_wmp; 26654 wm_state state = SD_WM_CHK_LIST; 26655 26656 26657 ASSERT(un != NULL); 26658 ASSERT(!mutex_owned(SD_MUTEX(un))); 26659 26660 mutex_enter(SD_MUTEX(un)); 26661 26662 while (state != SD_WM_DONE) { 26663 26664 switch (state) { 26665 case SD_WM_CHK_LIST: 26666 /* 26667 * This is the starting state. Check the wmap list 26668 * to see if the range is currently available. 26669 */ 26670 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) { 26671 /* 26672 * If this is a simple write and no rmw 26673 * i/o is pending then try to lock the 26674 * range as the range should be available. 26675 */ 26676 state = SD_WM_LOCK_RANGE; 26677 } else { 26678 tmp_wmp = sd_get_range(un, startb, endb); 26679 if (tmp_wmp != NULL) { 26680 if ((wmp != NULL) && ONLIST(un, wmp)) { 26681 /* 26682 * Should not keep onlist wmps 26683 * while waiting this macro 26684 * will also do wmp = NULL; 26685 */ 26686 FREE_ONLIST_WMAP(un, wmp); 26687 } 26688 /* 26689 * sl_wmp is the wmap on which wait 26690 * is done, since the tmp_wmp points 26691 * to the inuse wmap, set sl_wmp to 26692 * tmp_wmp and change the state to sleep 26693 */ 26694 sl_wmp = tmp_wmp; 26695 state = SD_WM_WAIT_MAP; 26696 } else { 26697 state = SD_WM_LOCK_RANGE; 26698 } 26699 26700 } 26701 break; 26702 26703 case SD_WM_LOCK_RANGE: 26704 ASSERT(un->un_wm_cache); 26705 /* 26706 * The range need to be locked, try to get a wmap. 26707 * First attempt it with NO_SLEEP, want to avoid a sleep 26708 * if possible as we will have to release the sd mutex 26709 * if we have to sleep. 26710 */ 26711 if (wmp == NULL) 26712 wmp = kmem_cache_alloc(un->un_wm_cache, 26713 KM_NOSLEEP); 26714 if (wmp == NULL) { 26715 mutex_exit(SD_MUTEX(un)); 26716 _NOTE(DATA_READABLE_WITHOUT_LOCK 26717 (sd_lun::un_wm_cache)) 26718 wmp = kmem_cache_alloc(un->un_wm_cache, 26719 KM_SLEEP); 26720 mutex_enter(SD_MUTEX(un)); 26721 /* 26722 * we released the mutex so recheck and go to 26723 * check list state. 26724 */ 26725 state = SD_WM_CHK_LIST; 26726 } else { 26727 /* 26728 * We exit out of state machine since we 26729 * have the wmap. Do the housekeeping first. 26730 * place the wmap on the wmap list if it is not 26731 * on it already and then set the state to done. 26732 */ 26733 wmp->wm_start = startb; 26734 wmp->wm_end = endb; 26735 wmp->wm_flags = typ | SD_WM_BUSY; 26736 if (typ & SD_WTYPE_RMW) { 26737 un->un_rmw_count++; 26738 } 26739 /* 26740 * If not already on the list then link 26741 */ 26742 if (!ONLIST(un, wmp)) { 26743 wmp->wm_next = un->un_wm; 26744 wmp->wm_prev = NULL; 26745 if (wmp->wm_next) 26746 wmp->wm_next->wm_prev = wmp; 26747 un->un_wm = wmp; 26748 } 26749 state = SD_WM_DONE; 26750 } 26751 break; 26752 26753 case SD_WM_WAIT_MAP: 26754 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY); 26755 /* 26756 * Wait is done on sl_wmp, which is set in the 26757 * check_list state. 26758 */ 26759 sl_wmp->wm_wanted_count++; 26760 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un)); 26761 sl_wmp->wm_wanted_count--; 26762 /* 26763 * We can reuse the memory from the completed sl_wmp 26764 * lock range for our new lock, but only if noone is 26765 * waiting for it. 26766 */ 26767 ASSERT(!(sl_wmp->wm_flags & SD_WM_BUSY)); 26768 if (sl_wmp->wm_wanted_count == 0) { 26769 if (wmp != NULL) 26770 CHK_N_FREEWMP(un, wmp); 26771 wmp = sl_wmp; 26772 } 26773 sl_wmp = NULL; 26774 /* 26775 * After waking up, need to recheck for availability of 26776 * range. 26777 */ 26778 state = SD_WM_CHK_LIST; 26779 break; 26780 26781 default: 26782 panic("sd_range_lock: " 26783 "Unknown state %d in sd_range_lock", state); 26784 /*NOTREACHED*/ 26785 } /* switch(state) */ 26786 26787 } /* while(state != SD_WM_DONE) */ 26788 26789 mutex_exit(SD_MUTEX(un)); 26790 26791 ASSERT(wmp != NULL); 26792 26793 return (wmp); 26794 } 26795 26796 26797 /* 26798 * Function: sd_get_range() 26799 * 26800 * Description: Find if there any overlapping I/O to this one 26801 * Returns the write-map of 1st such I/O, NULL otherwise. 26802 * 26803 * Arguments: un - sd_lun structure for the device. 26804 * startb - The starting block number 26805 * endb - The end block number 26806 * 26807 * Return Code: wm - pointer to the wmap structure. 26808 */ 26809 26810 static struct sd_w_map * 26811 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb) 26812 { 26813 struct sd_w_map *wmp; 26814 26815 ASSERT(un != NULL); 26816 26817 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) { 26818 if (!(wmp->wm_flags & SD_WM_BUSY)) { 26819 continue; 26820 } 26821 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) { 26822 break; 26823 } 26824 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) { 26825 break; 26826 } 26827 } 26828 26829 return (wmp); 26830 } 26831 26832 26833 /* 26834 * Function: sd_free_inlist_wmap() 26835 * 26836 * Description: Unlink and free a write map struct. 26837 * 26838 * Arguments: un - sd_lun structure for the device. 26839 * wmp - sd_w_map which needs to be unlinked. 26840 */ 26841 26842 static void 26843 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp) 26844 { 26845 ASSERT(un != NULL); 26846 26847 if (un->un_wm == wmp) { 26848 un->un_wm = wmp->wm_next; 26849 } else { 26850 wmp->wm_prev->wm_next = wmp->wm_next; 26851 } 26852 26853 if (wmp->wm_next) { 26854 wmp->wm_next->wm_prev = wmp->wm_prev; 26855 } 26856 26857 wmp->wm_next = wmp->wm_prev = NULL; 26858 26859 kmem_cache_free(un->un_wm_cache, wmp); 26860 } 26861 26862 26863 /* 26864 * Function: sd_range_unlock() 26865 * 26866 * Description: Unlock the range locked by wm. 26867 * Free write map if nobody else is waiting on it. 26868 * 26869 * Arguments: un - sd_lun structure for the device. 26870 * wmp - sd_w_map which needs to be unlinked. 26871 */ 26872 26873 static void 26874 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm) 26875 { 26876 ASSERT(un != NULL); 26877 ASSERT(wm != NULL); 26878 ASSERT(!mutex_owned(SD_MUTEX(un))); 26879 26880 mutex_enter(SD_MUTEX(un)); 26881 26882 if (wm->wm_flags & SD_WTYPE_RMW) { 26883 un->un_rmw_count--; 26884 } 26885 26886 if (wm->wm_wanted_count) { 26887 wm->wm_flags = 0; 26888 /* 26889 * Broadcast that the wmap is available now. 26890 */ 26891 cv_broadcast(&wm->wm_avail); 26892 } else { 26893 /* 26894 * If no one is waiting on the map, it should be free'ed. 26895 */ 26896 sd_free_inlist_wmap(un, wm); 26897 } 26898 26899 mutex_exit(SD_MUTEX(un)); 26900 } 26901 26902 26903 /* 26904 * Function: sd_read_modify_write_task 26905 * 26906 * Description: Called from a taskq thread to initiate the write phase of 26907 * a read-modify-write request. This is used for targets where 26908 * un->un_sys_blocksize != un->un_tgt_blocksize. 26909 * 26910 * Arguments: arg - a pointer to the buf(9S) struct for the write command. 26911 * 26912 * Context: Called under taskq thread context. 26913 */ 26914 26915 static void 26916 sd_read_modify_write_task(void *arg) 26917 { 26918 struct sd_mapblocksize_info *bsp; 26919 struct buf *bp; 26920 struct sd_xbuf *xp; 26921 struct sd_lun *un; 26922 26923 bp = arg; /* The bp is given in arg */ 26924 ASSERT(bp != NULL); 26925 26926 /* Get the pointer to the layer-private data struct */ 26927 xp = SD_GET_XBUF(bp); 26928 ASSERT(xp != NULL); 26929 bsp = xp->xb_private; 26930 ASSERT(bsp != NULL); 26931 26932 un = SD_GET_UN(bp); 26933 ASSERT(un != NULL); 26934 ASSERT(!mutex_owned(SD_MUTEX(un))); 26935 26936 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 26937 "sd_read_modify_write_task: entry: buf:0x%p\n", bp); 26938 26939 /* 26940 * This is the write phase of a read-modify-write request, called 26941 * under the context of a taskq thread in response to the completion 26942 * of the read portion of the rmw request completing under interrupt 26943 * context. The write request must be sent from here down the iostart 26944 * chain as if it were being sent from sd_mapblocksize_iostart(), so 26945 * we use the layer index saved in the layer-private data area. 26946 */ 26947 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp); 26948 26949 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 26950 "sd_read_modify_write_task: exit: buf:0x%p\n", bp); 26951 } 26952 26953 26954 /* 26955 * Function: sddump_do_read_of_rmw() 26956 * 26957 * Description: This routine will be called from sddump, If sddump is called 26958 * with an I/O which not aligned on device blocksize boundary 26959 * then the write has to be converted to read-modify-write. 26960 * Do the read part here in order to keep sddump simple. 26961 * Note - That the sd_mutex is held across the call to this 26962 * routine. 26963 * 26964 * Arguments: un - sd_lun 26965 * blkno - block number in terms of media block size. 26966 * nblk - number of blocks. 26967 * bpp - pointer to pointer to the buf structure. On return 26968 * from this function, *bpp points to the valid buffer 26969 * to which the write has to be done. 26970 * 26971 * Return Code: 0 for success or errno-type return code 26972 */ 26973 26974 static int 26975 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 26976 struct buf **bpp) 26977 { 26978 int err; 26979 int i; 26980 int rval; 26981 struct buf *bp; 26982 struct scsi_pkt *pkt = NULL; 26983 uint32_t target_blocksize; 26984 26985 ASSERT(un != NULL); 26986 ASSERT(mutex_owned(SD_MUTEX(un))); 26987 26988 target_blocksize = un->un_tgt_blocksize; 26989 26990 mutex_exit(SD_MUTEX(un)); 26991 26992 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL, 26993 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL); 26994 if (bp == NULL) { 26995 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26996 "no resources for dumping; giving up"); 26997 err = ENOMEM; 26998 goto done; 26999 } 27000 27001 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL, 27002 blkno, nblk); 27003 if (rval != 0) { 27004 scsi_free_consistent_buf(bp); 27005 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27006 "no resources for dumping; giving up"); 27007 err = ENOMEM; 27008 goto done; 27009 } 27010 27011 pkt->pkt_flags |= FLAG_NOINTR; 27012 27013 err = EIO; 27014 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 27015 27016 /* 27017 * Scsi_poll returns 0 (success) if the command completes and 27018 * the status block is STATUS_GOOD. We should only check 27019 * errors if this condition is not true. Even then we should 27020 * send our own request sense packet only if we have a check 27021 * condition and auto request sense has not been performed by 27022 * the hba. 27023 */ 27024 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n"); 27025 27026 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) { 27027 err = 0; 27028 break; 27029 } 27030 27031 /* 27032 * Check CMD_DEV_GONE 1st, give up if device is gone, 27033 * no need to read RQS data. 27034 */ 27035 if (pkt->pkt_reason == CMD_DEV_GONE) { 27036 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27037 "Error while dumping state with rmw..." 27038 "Device is gone\n"); 27039 break; 27040 } 27041 27042 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) { 27043 SD_INFO(SD_LOG_DUMP, un, 27044 "sddump: read failed with CHECK, try # %d\n", i); 27045 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) { 27046 (void) sd_send_polled_RQS(un); 27047 } 27048 27049 continue; 27050 } 27051 27052 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) { 27053 int reset_retval = 0; 27054 27055 SD_INFO(SD_LOG_DUMP, un, 27056 "sddump: read failed with BUSY, try # %d\n", i); 27057 27058 if (un->un_f_lun_reset_enabled == TRUE) { 27059 reset_retval = scsi_reset(SD_ADDRESS(un), 27060 RESET_LUN); 27061 } 27062 if (reset_retval == 0) { 27063 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 27064 } 27065 (void) sd_send_polled_RQS(un); 27066 27067 } else { 27068 SD_INFO(SD_LOG_DUMP, un, 27069 "sddump: read failed with 0x%x, try # %d\n", 27070 SD_GET_PKT_STATUS(pkt), i); 27071 mutex_enter(SD_MUTEX(un)); 27072 sd_reset_target(un, pkt); 27073 mutex_exit(SD_MUTEX(un)); 27074 } 27075 27076 /* 27077 * If we are not getting anywhere with lun/target resets, 27078 * let's reset the bus. 27079 */ 27080 if (i > SD_NDUMP_RETRIES/2) { 27081 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 27082 (void) sd_send_polled_RQS(un); 27083 } 27084 27085 } 27086 scsi_destroy_pkt(pkt); 27087 27088 if (err != 0) { 27089 scsi_free_consistent_buf(bp); 27090 *bpp = NULL; 27091 } else { 27092 *bpp = bp; 27093 } 27094 27095 done: 27096 mutex_enter(SD_MUTEX(un)); 27097 return (err); 27098 } 27099 27100 27101 /* 27102 * Function: sd_failfast_flushq 27103 * 27104 * Description: Take all bp's on the wait queue that have B_FAILFAST set 27105 * in b_flags and move them onto the failfast queue, then kick 27106 * off a thread to return all bp's on the failfast queue to 27107 * their owners with an error set. 27108 * 27109 * Arguments: un - pointer to the soft state struct for the instance. 27110 * 27111 * Context: may execute in interrupt context. 27112 */ 27113 27114 static void 27115 sd_failfast_flushq(struct sd_lun *un) 27116 { 27117 struct buf *bp; 27118 struct buf *next_waitq_bp; 27119 struct buf *prev_waitq_bp = NULL; 27120 27121 ASSERT(un != NULL); 27122 ASSERT(mutex_owned(SD_MUTEX(un))); 27123 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE); 27124 ASSERT(un->un_failfast_bp == NULL); 27125 27126 SD_TRACE(SD_LOG_IO_FAILFAST, un, 27127 "sd_failfast_flushq: entry: un:0x%p\n", un); 27128 27129 /* 27130 * Check if we should flush all bufs when entering failfast state, or 27131 * just those with B_FAILFAST set. 27132 */ 27133 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) { 27134 /* 27135 * Move *all* bp's on the wait queue to the failfast flush 27136 * queue, including those that do NOT have B_FAILFAST set. 27137 */ 27138 if (un->un_failfast_headp == NULL) { 27139 ASSERT(un->un_failfast_tailp == NULL); 27140 un->un_failfast_headp = un->un_waitq_headp; 27141 } else { 27142 ASSERT(un->un_failfast_tailp != NULL); 27143 un->un_failfast_tailp->av_forw = un->un_waitq_headp; 27144 } 27145 27146 un->un_failfast_tailp = un->un_waitq_tailp; 27147 27148 /* update kstat for each bp moved out of the waitq */ 27149 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) { 27150 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 27151 } 27152 27153 /* empty the waitq */ 27154 un->un_waitq_headp = un->un_waitq_tailp = NULL; 27155 27156 } else { 27157 /* 27158 * Go thru the wait queue, pick off all entries with 27159 * B_FAILFAST set, and move these onto the failfast queue. 27160 */ 27161 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) { 27162 /* 27163 * Save the pointer to the next bp on the wait queue, 27164 * so we get to it on the next iteration of this loop. 27165 */ 27166 next_waitq_bp = bp->av_forw; 27167 27168 /* 27169 * If this bp from the wait queue does NOT have 27170 * B_FAILFAST set, just move on to the next element 27171 * in the wait queue. Note, this is the only place 27172 * where it is correct to set prev_waitq_bp. 27173 */ 27174 if ((bp->b_flags & B_FAILFAST) == 0) { 27175 prev_waitq_bp = bp; 27176 continue; 27177 } 27178 27179 /* 27180 * Remove the bp from the wait queue. 27181 */ 27182 if (bp == un->un_waitq_headp) { 27183 /* The bp is the first element of the waitq. */ 27184 un->un_waitq_headp = next_waitq_bp; 27185 if (un->un_waitq_headp == NULL) { 27186 /* The wait queue is now empty */ 27187 un->un_waitq_tailp = NULL; 27188 } 27189 } else { 27190 /* 27191 * The bp is either somewhere in the middle 27192 * or at the end of the wait queue. 27193 */ 27194 ASSERT(un->un_waitq_headp != NULL); 27195 ASSERT(prev_waitq_bp != NULL); 27196 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST) 27197 == 0); 27198 if (bp == un->un_waitq_tailp) { 27199 /* bp is the last entry on the waitq. */ 27200 ASSERT(next_waitq_bp == NULL); 27201 un->un_waitq_tailp = prev_waitq_bp; 27202 } 27203 prev_waitq_bp->av_forw = next_waitq_bp; 27204 } 27205 bp->av_forw = NULL; 27206 27207 /* 27208 * update kstat since the bp is moved out of 27209 * the waitq 27210 */ 27211 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 27212 27213 /* 27214 * Now put the bp onto the failfast queue. 27215 */ 27216 if (un->un_failfast_headp == NULL) { 27217 /* failfast queue is currently empty */ 27218 ASSERT(un->un_failfast_tailp == NULL); 27219 un->un_failfast_headp = 27220 un->un_failfast_tailp = bp; 27221 } else { 27222 /* Add the bp to the end of the failfast q */ 27223 ASSERT(un->un_failfast_tailp != NULL); 27224 ASSERT(un->un_failfast_tailp->b_flags & 27225 B_FAILFAST); 27226 un->un_failfast_tailp->av_forw = bp; 27227 un->un_failfast_tailp = bp; 27228 } 27229 } 27230 } 27231 27232 /* 27233 * Now return all bp's on the failfast queue to their owners. 27234 */ 27235 while ((bp = un->un_failfast_headp) != NULL) { 27236 27237 un->un_failfast_headp = bp->av_forw; 27238 if (un->un_failfast_headp == NULL) { 27239 un->un_failfast_tailp = NULL; 27240 } 27241 27242 /* 27243 * We want to return the bp with a failure error code, but 27244 * we do not want a call to sd_start_cmds() to occur here, 27245 * so use sd_return_failed_command_no_restart() instead of 27246 * sd_return_failed_command(). 27247 */ 27248 sd_return_failed_command_no_restart(un, bp, EIO); 27249 } 27250 27251 /* Flush the xbuf queues if required. */ 27252 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) { 27253 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback); 27254 } 27255 27256 SD_TRACE(SD_LOG_IO_FAILFAST, un, 27257 "sd_failfast_flushq: exit: un:0x%p\n", un); 27258 } 27259 27260 27261 /* 27262 * Function: sd_failfast_flushq_callback 27263 * 27264 * Description: Return TRUE if the given bp meets the criteria for failfast 27265 * flushing. Used with ddi_xbuf_flushq(9F). 27266 * 27267 * Arguments: bp - ptr to buf struct to be examined. 27268 * 27269 * Context: Any 27270 */ 27271 27272 static int 27273 sd_failfast_flushq_callback(struct buf *bp) 27274 { 27275 /* 27276 * Return TRUE if (1) we want to flush ALL bufs when the failfast 27277 * state is entered; OR (2) the given bp has B_FAILFAST set. 27278 */ 27279 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) || 27280 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE); 27281 } 27282 27283 27284 27285 /* 27286 * Function: sd_setup_next_xfer 27287 * 27288 * Description: Prepare next I/O operation using DMA_PARTIAL 27289 * 27290 */ 27291 27292 static int 27293 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 27294 struct scsi_pkt *pkt, struct sd_xbuf *xp) 27295 { 27296 ssize_t num_blks_not_xfered; 27297 daddr_t strt_blk_num; 27298 ssize_t bytes_not_xfered; 27299 int rval; 27300 27301 ASSERT(pkt->pkt_resid == 0); 27302 27303 /* 27304 * Calculate next block number and amount to be transferred. 27305 * 27306 * How much data NOT transfered to the HBA yet. 27307 */ 27308 bytes_not_xfered = xp->xb_dma_resid; 27309 27310 /* 27311 * figure how many blocks NOT transfered to the HBA yet. 27312 */ 27313 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered); 27314 27315 /* 27316 * set starting block number to the end of what WAS transfered. 27317 */ 27318 strt_blk_num = xp->xb_blkno + 27319 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered); 27320 27321 /* 27322 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt 27323 * will call scsi_initpkt with NULL_FUNC so we do not have to release 27324 * the disk mutex here. 27325 */ 27326 rval = sd_setup_next_rw_pkt(un, pkt, bp, 27327 strt_blk_num, num_blks_not_xfered); 27328 27329 if (rval == 0) { 27330 27331 /* 27332 * Success. 27333 * 27334 * Adjust things if there are still more blocks to be 27335 * transfered. 27336 */ 27337 xp->xb_dma_resid = pkt->pkt_resid; 27338 pkt->pkt_resid = 0; 27339 27340 return (1); 27341 } 27342 27343 /* 27344 * There's really only one possible return value from 27345 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt 27346 * returns NULL. 27347 */ 27348 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 27349 27350 bp->b_resid = bp->b_bcount; 27351 bp->b_flags |= B_ERROR; 27352 27353 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27354 "Error setting up next portion of DMA transfer\n"); 27355 27356 return (0); 27357 } 27358 27359 /* 27360 * Function: sd_panic_for_res_conflict 27361 * 27362 * Description: Call panic with a string formatted with "Reservation Conflict" 27363 * and a human readable identifier indicating the SD instance 27364 * that experienced the reservation conflict. 27365 * 27366 * Arguments: un - pointer to the soft state struct for the instance. 27367 * 27368 * Context: may execute in interrupt context. 27369 */ 27370 27371 #define SD_RESV_CONFLICT_FMT_LEN 40 27372 void 27373 sd_panic_for_res_conflict(struct sd_lun *un) 27374 { 27375 char panic_str[SD_RESV_CONFLICT_FMT_LEN+MAXPATHLEN]; 27376 char path_str[MAXPATHLEN]; 27377 27378 (void) snprintf(panic_str, sizeof (panic_str), 27379 "Reservation Conflict\nDisk: %s", 27380 ddi_pathname(SD_DEVINFO(un), path_str)); 27381 27382 panic(panic_str); 27383 } 27384 27385 /* 27386 * Note: The following sd_faultinjection_ioctl( ) routines implement 27387 * driver support for handling fault injection for error analysis 27388 * causing faults in multiple layers of the driver. 27389 * 27390 */ 27391 27392 #ifdef SD_FAULT_INJECTION 27393 static uint_t sd_fault_injection_on = 0; 27394 27395 /* 27396 * Function: sd_faultinjection_ioctl() 27397 * 27398 * Description: This routine is the driver entry point for handling 27399 * faultinjection ioctls to inject errors into the 27400 * layer model 27401 * 27402 * Arguments: cmd - the ioctl cmd received 27403 * arg - the arguments from user and returns 27404 */ 27405 27406 static void 27407 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) { 27408 27409 uint_t i; 27410 uint_t rval; 27411 27412 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); 27413 27414 mutex_enter(SD_MUTEX(un)); 27415 27416 switch (cmd) { 27417 case SDIOCRUN: 27418 /* Allow pushed faults to be injected */ 27419 SD_INFO(SD_LOG_SDTEST, un, 27420 "sd_faultinjection_ioctl: Injecting Fault Run\n"); 27421 27422 sd_fault_injection_on = 1; 27423 27424 SD_INFO(SD_LOG_IOERR, un, 27425 "sd_faultinjection_ioctl: run finished\n"); 27426 break; 27427 27428 case SDIOCSTART: 27429 /* Start Injection Session */ 27430 SD_INFO(SD_LOG_SDTEST, un, 27431 "sd_faultinjection_ioctl: Injecting Fault Start\n"); 27432 27433 sd_fault_injection_on = 0; 27434 un->sd_injection_mask = 0xFFFFFFFF; 27435 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 27436 un->sd_fi_fifo_pkt[i] = NULL; 27437 un->sd_fi_fifo_xb[i] = NULL; 27438 un->sd_fi_fifo_un[i] = NULL; 27439 un->sd_fi_fifo_arq[i] = NULL; 27440 } 27441 un->sd_fi_fifo_start = 0; 27442 un->sd_fi_fifo_end = 0; 27443 27444 mutex_enter(&(un->un_fi_mutex)); 27445 un->sd_fi_log[0] = '\0'; 27446 un->sd_fi_buf_len = 0; 27447 mutex_exit(&(un->un_fi_mutex)); 27448 27449 SD_INFO(SD_LOG_IOERR, un, 27450 "sd_faultinjection_ioctl: start finished\n"); 27451 break; 27452 27453 case SDIOCSTOP: 27454 /* Stop Injection Session */ 27455 SD_INFO(SD_LOG_SDTEST, un, 27456 "sd_faultinjection_ioctl: Injecting Fault Stop\n"); 27457 sd_fault_injection_on = 0; 27458 un->sd_injection_mask = 0x0; 27459 27460 /* Empty stray or unuseds structs from fifo */ 27461 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 27462 if (un->sd_fi_fifo_pkt[i] != NULL) { 27463 kmem_free(un->sd_fi_fifo_pkt[i], 27464 sizeof (struct sd_fi_pkt)); 27465 } 27466 if (un->sd_fi_fifo_xb[i] != NULL) { 27467 kmem_free(un->sd_fi_fifo_xb[i], 27468 sizeof (struct sd_fi_xb)); 27469 } 27470 if (un->sd_fi_fifo_un[i] != NULL) { 27471 kmem_free(un->sd_fi_fifo_un[i], 27472 sizeof (struct sd_fi_un)); 27473 } 27474 if (un->sd_fi_fifo_arq[i] != NULL) { 27475 kmem_free(un->sd_fi_fifo_arq[i], 27476 sizeof (struct sd_fi_arq)); 27477 } 27478 un->sd_fi_fifo_pkt[i] = NULL; 27479 un->sd_fi_fifo_un[i] = NULL; 27480 un->sd_fi_fifo_xb[i] = NULL; 27481 un->sd_fi_fifo_arq[i] = NULL; 27482 } 27483 un->sd_fi_fifo_start = 0; 27484 un->sd_fi_fifo_end = 0; 27485 27486 SD_INFO(SD_LOG_IOERR, un, 27487 "sd_faultinjection_ioctl: stop finished\n"); 27488 break; 27489 27490 case SDIOCINSERTPKT: 27491 /* Store a packet struct to be pushed onto fifo */ 27492 SD_INFO(SD_LOG_SDTEST, un, 27493 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n"); 27494 27495 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27496 27497 sd_fault_injection_on = 0; 27498 27499 /* No more that SD_FI_MAX_ERROR allowed in Queue */ 27500 if (un->sd_fi_fifo_pkt[i] != NULL) { 27501 kmem_free(un->sd_fi_fifo_pkt[i], 27502 sizeof (struct sd_fi_pkt)); 27503 } 27504 if (arg != NULL) { 27505 un->sd_fi_fifo_pkt[i] = 27506 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP); 27507 if (un->sd_fi_fifo_pkt[i] == NULL) { 27508 /* Alloc failed don't store anything */ 27509 break; 27510 } 27511 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i], 27512 sizeof (struct sd_fi_pkt), 0); 27513 if (rval == -1) { 27514 kmem_free(un->sd_fi_fifo_pkt[i], 27515 sizeof (struct sd_fi_pkt)); 27516 un->sd_fi_fifo_pkt[i] = NULL; 27517 } 27518 } else { 27519 SD_INFO(SD_LOG_IOERR, un, 27520 "sd_faultinjection_ioctl: pkt null\n"); 27521 } 27522 break; 27523 27524 case SDIOCINSERTXB: 27525 /* Store a xb struct to be pushed onto fifo */ 27526 SD_INFO(SD_LOG_SDTEST, un, 27527 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n"); 27528 27529 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27530 27531 sd_fault_injection_on = 0; 27532 27533 if (un->sd_fi_fifo_xb[i] != NULL) { 27534 kmem_free(un->sd_fi_fifo_xb[i], 27535 sizeof (struct sd_fi_xb)); 27536 un->sd_fi_fifo_xb[i] = NULL; 27537 } 27538 if (arg != NULL) { 27539 un->sd_fi_fifo_xb[i] = 27540 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP); 27541 if (un->sd_fi_fifo_xb[i] == NULL) { 27542 /* Alloc failed don't store anything */ 27543 break; 27544 } 27545 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i], 27546 sizeof (struct sd_fi_xb), 0); 27547 27548 if (rval == -1) { 27549 kmem_free(un->sd_fi_fifo_xb[i], 27550 sizeof (struct sd_fi_xb)); 27551 un->sd_fi_fifo_xb[i] = NULL; 27552 } 27553 } else { 27554 SD_INFO(SD_LOG_IOERR, un, 27555 "sd_faultinjection_ioctl: xb null\n"); 27556 } 27557 break; 27558 27559 case SDIOCINSERTUN: 27560 /* Store a un struct to be pushed onto fifo */ 27561 SD_INFO(SD_LOG_SDTEST, un, 27562 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n"); 27563 27564 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27565 27566 sd_fault_injection_on = 0; 27567 27568 if (un->sd_fi_fifo_un[i] != NULL) { 27569 kmem_free(un->sd_fi_fifo_un[i], 27570 sizeof (struct sd_fi_un)); 27571 un->sd_fi_fifo_un[i] = NULL; 27572 } 27573 if (arg != NULL) { 27574 un->sd_fi_fifo_un[i] = 27575 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP); 27576 if (un->sd_fi_fifo_un[i] == NULL) { 27577 /* Alloc failed don't store anything */ 27578 break; 27579 } 27580 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i], 27581 sizeof (struct sd_fi_un), 0); 27582 if (rval == -1) { 27583 kmem_free(un->sd_fi_fifo_un[i], 27584 sizeof (struct sd_fi_un)); 27585 un->sd_fi_fifo_un[i] = NULL; 27586 } 27587 27588 } else { 27589 SD_INFO(SD_LOG_IOERR, un, 27590 "sd_faultinjection_ioctl: un null\n"); 27591 } 27592 27593 break; 27594 27595 case SDIOCINSERTARQ: 27596 /* Store a arq struct to be pushed onto fifo */ 27597 SD_INFO(SD_LOG_SDTEST, un, 27598 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n"); 27599 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 27600 27601 sd_fault_injection_on = 0; 27602 27603 if (un->sd_fi_fifo_arq[i] != NULL) { 27604 kmem_free(un->sd_fi_fifo_arq[i], 27605 sizeof (struct sd_fi_arq)); 27606 un->sd_fi_fifo_arq[i] = NULL; 27607 } 27608 if (arg != NULL) { 27609 un->sd_fi_fifo_arq[i] = 27610 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP); 27611 if (un->sd_fi_fifo_arq[i] == NULL) { 27612 /* Alloc failed don't store anything */ 27613 break; 27614 } 27615 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i], 27616 sizeof (struct sd_fi_arq), 0); 27617 if (rval == -1) { 27618 kmem_free(un->sd_fi_fifo_arq[i], 27619 sizeof (struct sd_fi_arq)); 27620 un->sd_fi_fifo_arq[i] = NULL; 27621 } 27622 27623 } else { 27624 SD_INFO(SD_LOG_IOERR, un, 27625 "sd_faultinjection_ioctl: arq null\n"); 27626 } 27627 27628 break; 27629 27630 case SDIOCPUSH: 27631 /* Push stored xb, pkt, un, and arq onto fifo */ 27632 sd_fault_injection_on = 0; 27633 27634 if (arg != NULL) { 27635 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); 27636 if (rval != -1 && 27637 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 27638 un->sd_fi_fifo_end += i; 27639 } 27640 } else { 27641 SD_INFO(SD_LOG_IOERR, un, 27642 "sd_faultinjection_ioctl: push arg null\n"); 27643 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 27644 un->sd_fi_fifo_end++; 27645 } 27646 } 27647 SD_INFO(SD_LOG_IOERR, un, 27648 "sd_faultinjection_ioctl: push to end=%d\n", 27649 un->sd_fi_fifo_end); 27650 break; 27651 27652 case SDIOCRETRIEVE: 27653 /* Return buffer of log from Injection session */ 27654 SD_INFO(SD_LOG_SDTEST, un, 27655 "sd_faultinjection_ioctl: Injecting Fault Retreive"); 27656 27657 sd_fault_injection_on = 0; 27658 27659 mutex_enter(&(un->un_fi_mutex)); 27660 rval = ddi_copyout(un->sd_fi_log, (void *)arg, 27661 un->sd_fi_buf_len+1, 0); 27662 mutex_exit(&(un->un_fi_mutex)); 27663 27664 if (rval == -1) { 27665 /* 27666 * arg is possibly invalid setting 27667 * it to NULL for return 27668 */ 27669 arg = NULL; 27670 } 27671 break; 27672 } 27673 27674 mutex_exit(SD_MUTEX(un)); 27675 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl:" 27676 " exit\n"); 27677 } 27678 27679 27680 /* 27681 * Function: sd_injection_log() 27682 * 27683 * Description: This routine adds buff to the already existing injection log 27684 * for retrieval via faultinjection_ioctl for use in fault 27685 * detection and recovery 27686 * 27687 * Arguments: buf - the string to add to the log 27688 */ 27689 27690 static void 27691 sd_injection_log(char *buf, struct sd_lun *un) 27692 { 27693 uint_t len; 27694 27695 ASSERT(un != NULL); 27696 ASSERT(buf != NULL); 27697 27698 mutex_enter(&(un->un_fi_mutex)); 27699 27700 len = min(strlen(buf), 255); 27701 /* Add logged value to Injection log to be returned later */ 27702 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) { 27703 uint_t offset = strlen((char *)un->sd_fi_log); 27704 char *destp = (char *)un->sd_fi_log + offset; 27705 int i; 27706 for (i = 0; i < len; i++) { 27707 *destp++ = *buf++; 27708 } 27709 un->sd_fi_buf_len += len; 27710 un->sd_fi_log[un->sd_fi_buf_len] = '\0'; 27711 } 27712 27713 mutex_exit(&(un->un_fi_mutex)); 27714 } 27715 27716 27717 /* 27718 * Function: sd_faultinjection() 27719 * 27720 * Description: This routine takes the pkt and changes its 27721 * content based on error injection scenerio. 27722 * 27723 * Arguments: pktp - packet to be changed 27724 */ 27725 27726 static void 27727 sd_faultinjection(struct scsi_pkt *pktp) 27728 { 27729 uint_t i; 27730 struct sd_fi_pkt *fi_pkt; 27731 struct sd_fi_xb *fi_xb; 27732 struct sd_fi_un *fi_un; 27733 struct sd_fi_arq *fi_arq; 27734 struct buf *bp; 27735 struct sd_xbuf *xb; 27736 struct sd_lun *un; 27737 27738 ASSERT(pktp != NULL); 27739 27740 /* pull bp xb and un from pktp */ 27741 bp = (struct buf *)pktp->pkt_private; 27742 xb = SD_GET_XBUF(bp); 27743 un = SD_GET_UN(bp); 27744 27745 ASSERT(un != NULL); 27746 27747 mutex_enter(SD_MUTEX(un)); 27748 27749 SD_TRACE(SD_LOG_SDTEST, un, 27750 "sd_faultinjection: entry Injection from sdintr\n"); 27751 27752 /* if injection is off return */ 27753 if (sd_fault_injection_on == 0 || 27754 un->sd_fi_fifo_start == un->sd_fi_fifo_end) { 27755 mutex_exit(SD_MUTEX(un)); 27756 return; 27757 } 27758 27759 27760 /* take next set off fifo */ 27761 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR; 27762 27763 fi_pkt = un->sd_fi_fifo_pkt[i]; 27764 fi_xb = un->sd_fi_fifo_xb[i]; 27765 fi_un = un->sd_fi_fifo_un[i]; 27766 fi_arq = un->sd_fi_fifo_arq[i]; 27767 27768 27769 /* set variables accordingly */ 27770 /* set pkt if it was on fifo */ 27771 if (fi_pkt != NULL) { 27772 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags"); 27773 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp"); 27774 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp"); 27775 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state"); 27776 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics"); 27777 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason"); 27778 27779 } 27780 27781 /* set xb if it was on fifo */ 27782 if (fi_xb != NULL) { 27783 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno"); 27784 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid"); 27785 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count"); 27786 SD_CONDSET(xb, xb, xb_victim_retry_count, 27787 "xb_victim_retry_count"); 27788 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status"); 27789 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state"); 27790 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid"); 27791 27792 /* copy in block data from sense */ 27793 if (fi_xb->xb_sense_data[0] != -1) { 27794 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, 27795 SENSE_LENGTH); 27796 } 27797 27798 /* copy in extended sense codes */ 27799 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_code, 27800 "es_code"); 27801 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_key, 27802 "es_key"); 27803 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_add_code, 27804 "es_add_code"); 27805 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, 27806 es_qual_code, "es_qual_code"); 27807 } 27808 27809 /* set un if it was on fifo */ 27810 if (fi_un != NULL) { 27811 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb"); 27812 SD_CONDSET(un, un, un_ctype, "un_ctype"); 27813 SD_CONDSET(un, un, un_reset_retry_count, 27814 "un_reset_retry_count"); 27815 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type"); 27816 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status"); 27817 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled"); 27818 SD_CONDSET(un, un, un_f_allow_bus_device_reset, 27819 "un_f_allow_bus_device_reset"); 27820 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing"); 27821 27822 } 27823 27824 /* copy in auto request sense if it was on fifo */ 27825 if (fi_arq != NULL) { 27826 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq)); 27827 } 27828 27829 /* free structs */ 27830 if (un->sd_fi_fifo_pkt[i] != NULL) { 27831 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt)); 27832 } 27833 if (un->sd_fi_fifo_xb[i] != NULL) { 27834 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb)); 27835 } 27836 if (un->sd_fi_fifo_un[i] != NULL) { 27837 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un)); 27838 } 27839 if (un->sd_fi_fifo_arq[i] != NULL) { 27840 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); 27841 } 27842 27843 /* 27844 * kmem_free does not gurantee to set to NULL 27845 * since we uses these to determine if we set 27846 * values or not lets confirm they are always 27847 * NULL after free 27848 */ 27849 un->sd_fi_fifo_pkt[i] = NULL; 27850 un->sd_fi_fifo_un[i] = NULL; 27851 un->sd_fi_fifo_xb[i] = NULL; 27852 un->sd_fi_fifo_arq[i] = NULL; 27853 27854 un->sd_fi_fifo_start++; 27855 27856 mutex_exit(SD_MUTEX(un)); 27857 27858 SD_TRACE(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n"); 27859 } 27860 27861 #endif /* SD_FAULT_INJECTION */ 27862 27863 /* 27864 * This routine is invoked in sd_unit_attach(). Before calling it, the 27865 * properties in conf file should be processed already, and "hotpluggable" 27866 * property was processed also. 27867 * 27868 * The sd driver distinguishes 3 different type of devices: removable media, 27869 * non-removable media, and hotpluggable. Below the differences are defined: 27870 * 27871 * 1. Device ID 27872 * 27873 * The device ID of a device is used to identify this device. Refer to 27874 * ddi_devid_register(9F). 27875 * 27876 * For a non-removable media disk device which can provide 0x80 or 0x83 27877 * VPD page (refer to INQUIRY command of SCSI SPC specification), a unique 27878 * device ID is created to identify this device. For other non-removable 27879 * media devices, a default device ID is created only if this device has 27880 * at least 2 alter cylinders. Otherwise, this device has no devid. 27881 * 27882 * ------------------------------------------------------- 27883 * removable media hotpluggable | Can Have Device ID 27884 * ------------------------------------------------------- 27885 * false false | Yes 27886 * false true | Yes 27887 * true x | No 27888 * ------------------------------------------------------ 27889 * 27890 * 27891 * 2. SCSI group 4 commands 27892 * 27893 * In SCSI specs, only some commands in group 4 command set can use 27894 * 8-byte addresses that can be used to access >2TB storage spaces. 27895 * Other commands have no such capability. Without supporting group4, 27896 * it is impossible to make full use of storage spaces of a disk with 27897 * capacity larger than 2TB. 27898 * 27899 * ----------------------------------------------- 27900 * removable media hotpluggable LP64 | Group 27901 * ----------------------------------------------- 27902 * false false false | 1 27903 * false false true | 4 27904 * false true false | 1 27905 * false true true | 4 27906 * true x x | 5 27907 * ----------------------------------------------- 27908 * 27909 * 27910 * 3. Check for VTOC Label 27911 * 27912 * If a direct-access disk has no EFI label, sd will check if it has a 27913 * valid VTOC label. Now, sd also does that check for removable media 27914 * and hotpluggable devices. 27915 * 27916 * -------------------------------------------------------------- 27917 * Direct-Access removable media hotpluggable | Check Label 27918 * ------------------------------------------------------------- 27919 * false false false | No 27920 * false false true | No 27921 * false true false | Yes 27922 * false true true | Yes 27923 * true x x | Yes 27924 * -------------------------------------------------------------- 27925 * 27926 * 27927 * 4. Building default VTOC label 27928 * 27929 * As section 3 says, sd checks if some kinds of devices have VTOC label. 27930 * If those devices have no valid VTOC label, sd(7d) will attempt to 27931 * create default VTOC for them. Currently sd creates default VTOC label 27932 * for all devices on x86 platform (VTOC_16), but only for removable 27933 * media devices on SPARC (VTOC_8). 27934 * 27935 * ----------------------------------------------------------- 27936 * removable media hotpluggable platform | Default Label 27937 * ----------------------------------------------------------- 27938 * false false sparc | No 27939 * false true x86 | Yes 27940 * false true sparc | Yes 27941 * true x x | Yes 27942 * ---------------------------------------------------------- 27943 * 27944 * 27945 * 5. Supported blocksizes of target devices 27946 * 27947 * Sd supports non-512-byte blocksize for removable media devices only. 27948 * For other devices, only 512-byte blocksize is supported. This may be 27949 * changed in near future because some RAID devices require non-512-byte 27950 * blocksize 27951 * 27952 * ----------------------------------------------------------- 27953 * removable media hotpluggable | non-512-byte blocksize 27954 * ----------------------------------------------------------- 27955 * false false | No 27956 * false true | No 27957 * true x | Yes 27958 * ----------------------------------------------------------- 27959 * 27960 * 27961 * 6. Automatic mount & unmount 27962 * 27963 * Sd(7d) driver provides DKIOCREMOVABLE ioctl. This ioctl is used to query 27964 * if a device is removable media device. It return 1 for removable media 27965 * devices, and 0 for others. 27966 * 27967 * The automatic mounting subsystem should distinguish between the types 27968 * of devices and apply automounting policies to each. 27969 * 27970 * 27971 * 7. fdisk partition management 27972 * 27973 * Fdisk is traditional partition method on x86 platform. Sd(7d) driver 27974 * just supports fdisk partitions on x86 platform. On sparc platform, sd 27975 * doesn't support fdisk partitions at all. Note: pcfs(7fs) can recognize 27976 * fdisk partitions on both x86 and SPARC platform. 27977 * 27978 * ----------------------------------------------------------- 27979 * platform removable media USB/1394 | fdisk supported 27980 * ----------------------------------------------------------- 27981 * x86 X X | true 27982 * ------------------------------------------------------------ 27983 * sparc X X | false 27984 * ------------------------------------------------------------ 27985 * 27986 * 27987 * 8. MBOOT/MBR 27988 * 27989 * Although sd(7d) doesn't support fdisk on SPARC platform, it does support 27990 * read/write mboot for removable media devices on sparc platform. 27991 * 27992 * ----------------------------------------------------------- 27993 * platform removable media USB/1394 | mboot supported 27994 * ----------------------------------------------------------- 27995 * x86 X X | true 27996 * ------------------------------------------------------------ 27997 * sparc false false | false 27998 * sparc false true | true 27999 * sparc true false | true 28000 * sparc true true | true 28001 * ------------------------------------------------------------ 28002 * 28003 * 28004 * 9. error handling during opening device 28005 * 28006 * If failed to open a disk device, an errno is returned. For some kinds 28007 * of errors, different errno is returned depending on if this device is 28008 * a removable media device. This brings USB/1394 hard disks in line with 28009 * expected hard disk behavior. It is not expected that this breaks any 28010 * application. 28011 * 28012 * ------------------------------------------------------ 28013 * removable media hotpluggable | errno 28014 * ------------------------------------------------------ 28015 * false false | EIO 28016 * false true | EIO 28017 * true x | ENXIO 28018 * ------------------------------------------------------ 28019 * 28020 * 28021 * 11. ioctls: DKIOCEJECT, CDROMEJECT 28022 * 28023 * These IOCTLs are applicable only to removable media devices. 28024 * 28025 * ----------------------------------------------------------- 28026 * removable media hotpluggable |DKIOCEJECT, CDROMEJECT 28027 * ----------------------------------------------------------- 28028 * false false | No 28029 * false true | No 28030 * true x | Yes 28031 * ----------------------------------------------------------- 28032 * 28033 * 28034 * 12. Kstats for partitions 28035 * 28036 * sd creates partition kstat for non-removable media devices. USB and 28037 * Firewire hard disks now have partition kstats 28038 * 28039 * ------------------------------------------------------ 28040 * removable media hotpluggable | kstat 28041 * ------------------------------------------------------ 28042 * false false | Yes 28043 * false true | Yes 28044 * true x | No 28045 * ------------------------------------------------------ 28046 * 28047 * 28048 * 13. Removable media & hotpluggable properties 28049 * 28050 * Sd driver creates a "removable-media" property for removable media 28051 * devices. Parent nexus drivers create a "hotpluggable" property if 28052 * it supports hotplugging. 28053 * 28054 * --------------------------------------------------------------------- 28055 * removable media hotpluggable | "removable-media" " hotpluggable" 28056 * --------------------------------------------------------------------- 28057 * false false | No No 28058 * false true | No Yes 28059 * true false | Yes No 28060 * true true | Yes Yes 28061 * --------------------------------------------------------------------- 28062 * 28063 * 28064 * 14. Power Management 28065 * 28066 * sd only power manages removable media devices or devices that support 28067 * LOG_SENSE or have a "pm-capable" property (PSARC/2002/250) 28068 * 28069 * A parent nexus that supports hotplugging can also set "pm-capable" 28070 * if the disk can be power managed. 28071 * 28072 * ------------------------------------------------------------ 28073 * removable media hotpluggable pm-capable | power manage 28074 * ------------------------------------------------------------ 28075 * false false false | No 28076 * false false true | Yes 28077 * false true false | No 28078 * false true true | Yes 28079 * true x x | Yes 28080 * ------------------------------------------------------------ 28081 * 28082 * USB and firewire hard disks can now be power managed independently 28083 * of the framebuffer 28084 * 28085 * 28086 * 15. Support for USB disks with capacity larger than 1TB 28087 * 28088 * Currently, sd doesn't permit a fixed disk device with capacity 28089 * larger than 1TB to be used in a 32-bit operating system environment. 28090 * However, sd doesn't do that for removable media devices. Instead, it 28091 * assumes that removable media devices cannot have a capacity larger 28092 * than 1TB. Therefore, using those devices on 32-bit system is partially 28093 * supported, which can cause some unexpected results. 28094 * 28095 * --------------------------------------------------------------------- 28096 * removable media USB/1394 | Capacity > 1TB | Used in 32-bit env 28097 * --------------------------------------------------------------------- 28098 * false false | true | no 28099 * false true | true | no 28100 * true false | true | Yes 28101 * true true | true | Yes 28102 * --------------------------------------------------------------------- 28103 * 28104 * 28105 * 16. Check write-protection at open time 28106 * 28107 * When a removable media device is being opened for writing without NDELAY 28108 * flag, sd will check if this device is writable. If attempting to open 28109 * without NDELAY flag a write-protected device, this operation will abort. 28110 * 28111 * ------------------------------------------------------------ 28112 * removable media USB/1394 | WP Check 28113 * ------------------------------------------------------------ 28114 * false false | No 28115 * false true | No 28116 * true false | Yes 28117 * true true | Yes 28118 * ------------------------------------------------------------ 28119 * 28120 * 28121 * 17. syslog when corrupted VTOC is encountered 28122 * 28123 * Currently, if an invalid VTOC is encountered, sd only print syslog 28124 * for fixed SCSI disks. 28125 * ------------------------------------------------------------ 28126 * removable media USB/1394 | print syslog 28127 * ------------------------------------------------------------ 28128 * false false | Yes 28129 * false true | No 28130 * true false | No 28131 * true true | No 28132 * ------------------------------------------------------------ 28133 */ 28134 static void 28135 sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi) 28136 { 28137 int pm_capable_prop; 28138 28139 ASSERT(un->un_sd); 28140 ASSERT(un->un_sd->sd_inq); 28141 28142 /* 28143 * Enable SYNC CACHE support for all devices. 28144 */ 28145 un->un_f_sync_cache_supported = TRUE; 28146 28147 if (un->un_sd->sd_inq->inq_rmb) { 28148 /* 28149 * The media of this device is removable. And for this kind 28150 * of devices, it is possible to change medium after opening 28151 * devices. Thus we should support this operation. 28152 */ 28153 un->un_f_has_removable_media = TRUE; 28154 28155 /* 28156 * support non-512-byte blocksize of removable media devices 28157 */ 28158 un->un_f_non_devbsize_supported = TRUE; 28159 28160 /* 28161 * Assume that all removable media devices support DOOR_LOCK 28162 */ 28163 un->un_f_doorlock_supported = TRUE; 28164 28165 /* 28166 * For a removable media device, it is possible to be opened 28167 * with NDELAY flag when there is no media in drive, in this 28168 * case we don't care if device is writable. But if without 28169 * NDELAY flag, we need to check if media is write-protected. 28170 */ 28171 un->un_f_chk_wp_open = TRUE; 28172 28173 /* 28174 * need to start a SCSI watch thread to monitor media state, 28175 * when media is being inserted or ejected, notify syseventd. 28176 */ 28177 un->un_f_monitor_media_state = TRUE; 28178 28179 /* 28180 * Some devices don't support START_STOP_UNIT command. 28181 * Therefore, we'd better check if a device supports it 28182 * before sending it. 28183 */ 28184 un->un_f_check_start_stop = TRUE; 28185 28186 /* 28187 * support eject media ioctl: 28188 * FDEJECT, DKIOCEJECT, CDROMEJECT 28189 */ 28190 un->un_f_eject_media_supported = TRUE; 28191 28192 /* 28193 * Because many removable-media devices don't support 28194 * LOG_SENSE, we couldn't use this command to check if 28195 * a removable media device support power-management. 28196 * We assume that they support power-management via 28197 * START_STOP_UNIT command and can be spun up and down 28198 * without limitations. 28199 */ 28200 un->un_f_pm_supported = TRUE; 28201 28202 /* 28203 * Need to create a zero length (Boolean) property 28204 * removable-media for the removable media devices. 28205 * Note that the return value of the property is not being 28206 * checked, since if unable to create the property 28207 * then do not want the attach to fail altogether. Consistent 28208 * with other property creation in attach. 28209 */ 28210 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, 28211 DDI_PROP_CANSLEEP, "removable-media", NULL, 0); 28212 28213 } else { 28214 /* 28215 * create device ID for device 28216 */ 28217 un->un_f_devid_supported = TRUE; 28218 28219 /* 28220 * Spin up non-removable-media devices once it is attached 28221 */ 28222 un->un_f_attach_spinup = TRUE; 28223 28224 /* 28225 * According to SCSI specification, Sense data has two kinds of 28226 * format: fixed format, and descriptor format. At present, we 28227 * don't support descriptor format sense data for removable 28228 * media. 28229 */ 28230 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT) { 28231 un->un_f_descr_format_supported = TRUE; 28232 } 28233 28234 /* 28235 * kstats are created only for non-removable media devices. 28236 * 28237 * Set this in sd.conf to 0 in order to disable kstats. The 28238 * default is 1, so they are enabled by default. 28239 */ 28240 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY, 28241 SD_DEVINFO(un), DDI_PROP_DONTPASS, 28242 "enable-partition-kstats", 1)); 28243 28244 /* 28245 * Check if HBA has set the "pm-capable" property. 28246 * If "pm-capable" exists and is non-zero then we can 28247 * power manage the device without checking the start/stop 28248 * cycle count log sense page. 28249 * 28250 * If "pm-capable" exists and is SD_PM_CAPABLE_FALSE (0) 28251 * then we should not power manage the device. 28252 * 28253 * If "pm-capable" doesn't exist then pm_capable_prop will 28254 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case, 28255 * sd will check the start/stop cycle count log sense page 28256 * and power manage the device if the cycle count limit has 28257 * not been exceeded. 28258 */ 28259 pm_capable_prop = ddi_prop_get_int(DDI_DEV_T_ANY, devi, 28260 DDI_PROP_DONTPASS, "pm-capable", SD_PM_CAPABLE_UNDEFINED); 28261 if (pm_capable_prop == SD_PM_CAPABLE_UNDEFINED) { 28262 un->un_f_log_sense_supported = TRUE; 28263 } else { 28264 /* 28265 * pm-capable property exists. 28266 * 28267 * Convert "TRUE" values for pm_capable_prop to 28268 * SD_PM_CAPABLE_TRUE (1) to make it easier to check 28269 * later. "TRUE" values are any values except 28270 * SD_PM_CAPABLE_FALSE (0) and 28271 * SD_PM_CAPABLE_UNDEFINED (-1) 28272 */ 28273 if (pm_capable_prop == SD_PM_CAPABLE_FALSE) { 28274 un->un_f_log_sense_supported = FALSE; 28275 } else { 28276 un->un_f_pm_supported = TRUE; 28277 } 28278 28279 SD_INFO(SD_LOG_ATTACH_DETACH, un, 28280 "sd_unit_attach: un:0x%p pm-capable " 28281 "property set to %d.\n", un, un->un_f_pm_supported); 28282 } 28283 } 28284 28285 if (un->un_f_is_hotpluggable) { 28286 28287 /* 28288 * Have to watch hotpluggable devices as well, since 28289 * that's the only way for userland applications to 28290 * detect hot removal while device is busy/mounted. 28291 */ 28292 un->un_f_monitor_media_state = TRUE; 28293 28294 un->un_f_check_start_stop = TRUE; 28295 28296 } 28297 } 28298 28299 /* 28300 * sd_tg_rdwr: 28301 * Provides rdwr access for cmlb via sd_tgops. The start_block is 28302 * in sys block size, req_length in bytes. 28303 * 28304 */ 28305 static int 28306 sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 28307 diskaddr_t start_block, size_t reqlength, void *tg_cookie) 28308 { 28309 struct sd_lun *un; 28310 int path_flag = (int)(uintptr_t)tg_cookie; 28311 char *dkl = NULL; 28312 diskaddr_t real_addr = start_block; 28313 diskaddr_t first_byte, end_block; 28314 28315 size_t buffer_size = reqlength; 28316 int rval; 28317 diskaddr_t cap; 28318 uint32_t lbasize; 28319 28320 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 28321 if (un == NULL) 28322 return (ENXIO); 28323 28324 if (cmd != TG_READ && cmd != TG_WRITE) 28325 return (EINVAL); 28326 28327 mutex_enter(SD_MUTEX(un)); 28328 if (un->un_f_tgt_blocksize_is_valid == FALSE) { 28329 mutex_exit(SD_MUTEX(un)); 28330 rval = sd_send_scsi_READ_CAPACITY(un, (uint64_t *)&cap, 28331 &lbasize, path_flag); 28332 if (rval != 0) 28333 return (rval); 28334 mutex_enter(SD_MUTEX(un)); 28335 sd_update_block_info(un, lbasize, cap); 28336 if ((un->un_f_tgt_blocksize_is_valid == FALSE)) { 28337 mutex_exit(SD_MUTEX(un)); 28338 return (EIO); 28339 } 28340 } 28341 28342 if (NOT_DEVBSIZE(un)) { 28343 /* 28344 * sys_blocksize != tgt_blocksize, need to re-adjust 28345 * blkno and save the index to beginning of dk_label 28346 */ 28347 first_byte = SD_SYSBLOCKS2BYTES(un, start_block); 28348 real_addr = first_byte / un->un_tgt_blocksize; 28349 28350 end_block = (first_byte + reqlength + 28351 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize; 28352 28353 /* round up buffer size to multiple of target block size */ 28354 buffer_size = (end_block - real_addr) * un->un_tgt_blocksize; 28355 28356 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_tg_rdwr", 28357 "label_addr: 0x%x allocation size: 0x%x\n", 28358 real_addr, buffer_size); 28359 28360 if (((first_byte % un->un_tgt_blocksize) != 0) || 28361 (reqlength % un->un_tgt_blocksize) != 0) 28362 /* the request is not aligned */ 28363 dkl = kmem_zalloc(buffer_size, KM_SLEEP); 28364 } 28365 28366 /* 28367 * The MMC standard allows READ CAPACITY to be 28368 * inaccurate by a bounded amount (in the interest of 28369 * response latency). As a result, failed READs are 28370 * commonplace (due to the reading of metadata and not 28371 * data). Depending on the per-Vendor/drive Sense data, 28372 * the failed READ can cause many (unnecessary) retries. 28373 */ 28374 28375 if (ISCD(un) && (cmd == TG_READ) && 28376 (un->un_f_blockcount_is_valid == TRUE) && 28377 ((start_block == (un->un_blockcount - 1))|| 28378 (start_block == (un->un_blockcount - 2)))) { 28379 path_flag = SD_PATH_DIRECT_PRIORITY; 28380 } 28381 28382 mutex_exit(SD_MUTEX(un)); 28383 if (cmd == TG_READ) { 28384 rval = sd_send_scsi_READ(un, (dkl != NULL)? dkl: bufaddr, 28385 buffer_size, real_addr, path_flag); 28386 if (dkl != NULL) 28387 bcopy(dkl + SD_TGTBYTEOFFSET(un, start_block, 28388 real_addr), bufaddr, reqlength); 28389 } else { 28390 if (dkl) { 28391 rval = sd_send_scsi_READ(un, dkl, buffer_size, 28392 real_addr, path_flag); 28393 if (rval) { 28394 kmem_free(dkl, buffer_size); 28395 return (rval); 28396 } 28397 bcopy(bufaddr, dkl + SD_TGTBYTEOFFSET(un, start_block, 28398 real_addr), reqlength); 28399 } 28400 rval = sd_send_scsi_WRITE(un, (dkl != NULL)? dkl: bufaddr, 28401 buffer_size, real_addr, path_flag); 28402 } 28403 28404 if (dkl != NULL) 28405 kmem_free(dkl, buffer_size); 28406 28407 return (rval); 28408 } 28409 28410 28411 static int 28412 sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie) 28413 { 28414 28415 struct sd_lun *un; 28416 diskaddr_t cap; 28417 uint32_t lbasize; 28418 int path_flag = (int)(uintptr_t)tg_cookie; 28419 int ret = 0; 28420 28421 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 28422 if (un == NULL) 28423 return (ENXIO); 28424 28425 switch (cmd) { 28426 case TG_GETPHYGEOM: 28427 case TG_GETVIRTGEOM: 28428 case TG_GETCAPACITY: 28429 case TG_GETBLOCKSIZE: 28430 mutex_enter(SD_MUTEX(un)); 28431 28432 if ((un->un_f_blockcount_is_valid == TRUE) && 28433 (un->un_f_tgt_blocksize_is_valid == TRUE)) { 28434 cap = un->un_blockcount; 28435 lbasize = un->un_tgt_blocksize; 28436 mutex_exit(SD_MUTEX(un)); 28437 } else { 28438 mutex_exit(SD_MUTEX(un)); 28439 ret = sd_send_scsi_READ_CAPACITY(un, (uint64_t *)&cap, 28440 &lbasize, path_flag); 28441 if (ret != 0) 28442 return (ret); 28443 mutex_enter(SD_MUTEX(un)); 28444 sd_update_block_info(un, lbasize, cap); 28445 if ((un->un_f_blockcount_is_valid == FALSE) || 28446 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 28447 mutex_exit(SD_MUTEX(un)); 28448 return (EIO); 28449 } 28450 mutex_exit(SD_MUTEX(un)); 28451 } 28452 28453 if (cmd == TG_GETCAPACITY) { 28454 *(diskaddr_t *)arg = cap; 28455 return (0); 28456 } 28457 28458 if (cmd == TG_GETBLOCKSIZE) { 28459 *(uint32_t *)arg = lbasize; 28460 return (0); 28461 } 28462 28463 if (cmd == TG_GETPHYGEOM) 28464 ret = sd_get_physical_geometry(un, (cmlb_geom_t *)arg, 28465 cap, lbasize, path_flag); 28466 else 28467 /* TG_GETVIRTGEOM */ 28468 ret = sd_get_virtual_geometry(un, 28469 (cmlb_geom_t *)arg, cap, lbasize); 28470 28471 return (ret); 28472 28473 case TG_GETATTR: 28474 mutex_enter(SD_MUTEX(un)); 28475 ((tg_attribute_t *)arg)->media_is_writable = 28476 un->un_f_mmc_writable_media; 28477 mutex_exit(SD_MUTEX(un)); 28478 return (0); 28479 default: 28480 return (ENOTTY); 28481 28482 } 28483 28484 } 28485