1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * SCSI disk target driver. 29 */ 30 #include <sys/scsi/scsi.h> 31 #include <sys/dkbad.h> 32 #include <sys/dklabel.h> 33 #include <sys/dkio.h> 34 #include <sys/fdio.h> 35 #include <sys/cdio.h> 36 #include <sys/mhd.h> 37 #include <sys/vtoc.h> 38 #include <sys/dktp/fdisk.h> 39 #include <sys/kstat.h> 40 #include <sys/vtrace.h> 41 #include <sys/note.h> 42 #include <sys/thread.h> 43 #include <sys/proc.h> 44 #include <sys/efi_partition.h> 45 #include <sys/var.h> 46 #include <sys/aio_req.h> 47 48 #ifdef __lock_lint 49 #define _LP64 50 #define __amd64 51 #endif 52 53 #if (defined(__fibre)) 54 /* Note: is there a leadville version of the following? */ 55 #include <sys/fc4/fcal_linkapp.h> 56 #endif 57 #include <sys/taskq.h> 58 #include <sys/uuid.h> 59 #include <sys/byteorder.h> 60 #include <sys/sdt.h> 61 62 #include "sd_xbuf.h" 63 64 #include <sys/scsi/targets/sddef.h> 65 #include <sys/cmlb.h> 66 #include <sys/sysevent/eventdefs.h> 67 #include <sys/sysevent/dev.h> 68 69 #include <sys/fm/protocol.h> 70 71 /* 72 * Loadable module info. 73 */ 74 #if (defined(__fibre)) 75 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver" 76 char _depends_on[] = "misc/scsi misc/cmlb drv/fcp"; 77 #else 78 #define SD_MODULE_NAME "SCSI Disk Driver" 79 char _depends_on[] = "misc/scsi misc/cmlb"; 80 #endif 81 82 /* 83 * Define the interconnect type, to allow the driver to distinguish 84 * between parallel SCSI (sd) and fibre channel (ssd) behaviors. 85 * 86 * This is really for backward compatibility. In the future, the driver 87 * should actually check the "interconnect-type" property as reported by 88 * the HBA; however at present this property is not defined by all HBAs, 89 * so we will use this #define (1) to permit the driver to run in 90 * backward-compatibility mode; and (2) to print a notification message 91 * if an FC HBA does not support the "interconnect-type" property. The 92 * behavior of the driver will be to assume parallel SCSI behaviors unless 93 * the "interconnect-type" property is defined by the HBA **AND** has a 94 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or 95 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre 96 * Channel behaviors (as per the old ssd). (Note that the 97 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and 98 * will result in the driver assuming parallel SCSI behaviors.) 99 * 100 * (see common/sys/scsi/impl/services.h) 101 * 102 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default 103 * since some FC HBAs may already support that, and there is some code in 104 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the 105 * default would confuse that code, and besides things should work fine 106 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the 107 * "interconnect_type" property. 108 * 109 */ 110 #if (defined(__fibre)) 111 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE 112 #else 113 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL 114 #endif 115 116 /* 117 * The name of the driver, established from the module name in _init. 118 */ 119 static char *sd_label = NULL; 120 121 /* 122 * Driver name is unfortunately prefixed on some driver.conf properties. 123 */ 124 #if (defined(__fibre)) 125 #define sd_max_xfer_size ssd_max_xfer_size 126 #define sd_config_list ssd_config_list 127 static char *sd_max_xfer_size = "ssd_max_xfer_size"; 128 static char *sd_config_list = "ssd-config-list"; 129 #else 130 static char *sd_max_xfer_size = "sd_max_xfer_size"; 131 static char *sd_config_list = "sd-config-list"; 132 #endif 133 134 /* 135 * Driver global variables 136 */ 137 138 #if (defined(__fibre)) 139 /* 140 * These #defines are to avoid namespace collisions that occur because this 141 * code is currently used to compile two separate driver modules: sd and ssd. 142 * All global variables need to be treated this way (even if declared static) 143 * in order to allow the debugger to resolve the names properly. 144 * It is anticipated that in the near future the ssd module will be obsoleted, 145 * at which time this namespace issue should go away. 146 */ 147 #define sd_state ssd_state 148 #define sd_io_time ssd_io_time 149 #define sd_failfast_enable ssd_failfast_enable 150 #define sd_ua_retry_count ssd_ua_retry_count 151 #define sd_report_pfa ssd_report_pfa 152 #define sd_max_throttle ssd_max_throttle 153 #define sd_min_throttle ssd_min_throttle 154 #define sd_rot_delay ssd_rot_delay 155 156 #define sd_retry_on_reservation_conflict \ 157 ssd_retry_on_reservation_conflict 158 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay 159 #define sd_resv_conflict_name ssd_resv_conflict_name 160 161 #define sd_component_mask ssd_component_mask 162 #define sd_level_mask ssd_level_mask 163 #define sd_debug_un ssd_debug_un 164 #define sd_error_level ssd_error_level 165 166 #define sd_xbuf_active_limit ssd_xbuf_active_limit 167 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit 168 169 #define sd_tr ssd_tr 170 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout 171 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout 172 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable 173 #define sd_check_media_time ssd_check_media_time 174 #define sd_wait_cmds_complete ssd_wait_cmds_complete 175 #define sd_label_mutex ssd_label_mutex 176 #define sd_detach_mutex ssd_detach_mutex 177 #define sd_log_buf ssd_log_buf 178 #define sd_log_mutex ssd_log_mutex 179 180 #define sd_disk_table ssd_disk_table 181 #define sd_disk_table_size ssd_disk_table_size 182 #define sd_sense_mutex ssd_sense_mutex 183 #define sd_cdbtab ssd_cdbtab 184 185 #define sd_cb_ops ssd_cb_ops 186 #define sd_ops ssd_ops 187 #define sd_additional_codes ssd_additional_codes 188 #define sd_tgops ssd_tgops 189 190 #define sd_minor_data ssd_minor_data 191 #define sd_minor_data_efi ssd_minor_data_efi 192 193 #define sd_tq ssd_tq 194 #define sd_wmr_tq ssd_wmr_tq 195 #define sd_taskq_name ssd_taskq_name 196 #define sd_wmr_taskq_name ssd_wmr_taskq_name 197 #define sd_taskq_minalloc ssd_taskq_minalloc 198 #define sd_taskq_maxalloc ssd_taskq_maxalloc 199 200 #define sd_dump_format_string ssd_dump_format_string 201 202 #define sd_iostart_chain ssd_iostart_chain 203 #define sd_iodone_chain ssd_iodone_chain 204 205 #define sd_pm_idletime ssd_pm_idletime 206 207 #define sd_force_pm_supported ssd_force_pm_supported 208 209 #define sd_dtype_optical_bind ssd_dtype_optical_bind 210 211 #define sd_ssc_init ssd_ssc_init 212 #define sd_ssc_send ssd_ssc_send 213 #define sd_ssc_fini ssd_ssc_fini 214 #define sd_ssc_assessment ssd_ssc_assessment 215 #define sd_ssc_post ssd_ssc_post 216 #define sd_ssc_print ssd_ssc_print 217 #define sd_ssc_ereport_post ssd_ssc_ereport_post 218 #define sd_ssc_set_info ssd_ssc_set_info 219 #define sd_ssc_extract_info ssd_ssc_extract_info 220 221 #endif 222 223 #ifdef SDDEBUG 224 int sd_force_pm_supported = 0; 225 #endif /* SDDEBUG */ 226 227 void *sd_state = NULL; 228 int sd_io_time = SD_IO_TIME; 229 int sd_failfast_enable = 1; 230 int sd_ua_retry_count = SD_UA_RETRY_COUNT; 231 int sd_report_pfa = 1; 232 int sd_max_throttle = SD_MAX_THROTTLE; 233 int sd_min_throttle = SD_MIN_THROTTLE; 234 int sd_rot_delay = 4; /* Default 4ms Rotation delay */ 235 int sd_qfull_throttle_enable = TRUE; 236 237 int sd_retry_on_reservation_conflict = 1; 238 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 239 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay)) 240 241 static int sd_dtype_optical_bind = -1; 242 243 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */ 244 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict"; 245 246 /* 247 * Global data for debug logging. To enable debug printing, sd_component_mask 248 * and sd_level_mask should be set to the desired bit patterns as outlined in 249 * sddef.h. 250 */ 251 uint_t sd_component_mask = 0x0; 252 uint_t sd_level_mask = 0x0; 253 struct sd_lun *sd_debug_un = NULL; 254 uint_t sd_error_level = SCSI_ERR_RETRYABLE; 255 256 /* Note: these may go away in the future... */ 257 static uint32_t sd_xbuf_active_limit = 512; 258 static uint32_t sd_xbuf_reserve_limit = 16; 259 260 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 }; 261 262 /* 263 * Timer value used to reset the throttle after it has been reduced 264 * (typically in response to TRAN_BUSY or STATUS_QFULL) 265 */ 266 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT; 267 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT; 268 269 /* 270 * Interval value associated with the media change scsi watch. 271 */ 272 static int sd_check_media_time = 3000000; 273 274 /* 275 * Wait value used for in progress operations during a DDI_SUSPEND 276 */ 277 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE; 278 279 /* 280 * sd_label_mutex protects a static buffer used in the disk label 281 * component of the driver 282 */ 283 static kmutex_t sd_label_mutex; 284 285 /* 286 * sd_detach_mutex protects un_layer_count, un_detach_count, and 287 * un_opens_in_progress in the sd_lun structure. 288 */ 289 static kmutex_t sd_detach_mutex; 290 291 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex, 292 sd_lun::{un_layer_count un_detach_count un_opens_in_progress})) 293 294 /* 295 * Global buffer and mutex for debug logging 296 */ 297 static char sd_log_buf[1024]; 298 static kmutex_t sd_log_mutex; 299 300 /* 301 * Structs and globals for recording attached lun information. 302 * This maintains a chain. Each node in the chain represents a SCSI controller. 303 * The structure records the number of luns attached to each target connected 304 * with the controller. 305 * For parallel scsi device only. 306 */ 307 struct sd_scsi_hba_tgt_lun { 308 struct sd_scsi_hba_tgt_lun *next; 309 dev_info_t *pdip; 310 int nlun[NTARGETS_WIDE]; 311 }; 312 313 /* 314 * Flag to indicate the lun is attached or detached 315 */ 316 #define SD_SCSI_LUN_ATTACH 0 317 #define SD_SCSI_LUN_DETACH 1 318 319 static kmutex_t sd_scsi_target_lun_mutex; 320 static struct sd_scsi_hba_tgt_lun *sd_scsi_target_lun_head = NULL; 321 322 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 323 sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip)) 324 325 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 326 sd_scsi_target_lun_head)) 327 328 /* 329 * "Smart" Probe Caching structs, globals, #defines, etc. 330 * For parallel scsi and non-self-identify device only. 331 */ 332 333 /* 334 * The following resources and routines are implemented to support 335 * "smart" probing, which caches the scsi_probe() results in an array, 336 * in order to help avoid long probe times. 337 */ 338 struct sd_scsi_probe_cache { 339 struct sd_scsi_probe_cache *next; 340 dev_info_t *pdip; 341 int cache[NTARGETS_WIDE]; 342 }; 343 344 static kmutex_t sd_scsi_probe_cache_mutex; 345 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; 346 347 /* 348 * Really we only need protection on the head of the linked list, but 349 * better safe than sorry. 350 */ 351 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 352 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip)) 353 354 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 355 sd_scsi_probe_cache_head)) 356 357 358 /* 359 * Vendor specific data name property declarations 360 */ 361 362 #if defined(__fibre) || defined(__i386) ||defined(__amd64) 363 364 static sd_tunables seagate_properties = { 365 SEAGATE_THROTTLE_VALUE, 366 0, 367 0, 368 0, 369 0, 370 0, 371 0, 372 0, 373 0 374 }; 375 376 377 static sd_tunables fujitsu_properties = { 378 FUJITSU_THROTTLE_VALUE, 379 0, 380 0, 381 0, 382 0, 383 0, 384 0, 385 0, 386 0 387 }; 388 389 static sd_tunables ibm_properties = { 390 IBM_THROTTLE_VALUE, 391 0, 392 0, 393 0, 394 0, 395 0, 396 0, 397 0, 398 0 399 }; 400 401 static sd_tunables purple_properties = { 402 PURPLE_THROTTLE_VALUE, 403 0, 404 0, 405 PURPLE_BUSY_RETRIES, 406 PURPLE_RESET_RETRY_COUNT, 407 PURPLE_RESERVE_RELEASE_TIME, 408 0, 409 0, 410 0 411 }; 412 413 static sd_tunables sve_properties = { 414 SVE_THROTTLE_VALUE, 415 0, 416 0, 417 SVE_BUSY_RETRIES, 418 SVE_RESET_RETRY_COUNT, 419 SVE_RESERVE_RELEASE_TIME, 420 SVE_MIN_THROTTLE_VALUE, 421 SVE_DISKSORT_DISABLED_FLAG, 422 0 423 }; 424 425 static sd_tunables maserati_properties = { 426 0, 427 0, 428 0, 429 0, 430 0, 431 0, 432 0, 433 MASERATI_DISKSORT_DISABLED_FLAG, 434 MASERATI_LUN_RESET_ENABLED_FLAG 435 }; 436 437 static sd_tunables pirus_properties = { 438 PIRUS_THROTTLE_VALUE, 439 0, 440 PIRUS_NRR_COUNT, 441 PIRUS_BUSY_RETRIES, 442 PIRUS_RESET_RETRY_COUNT, 443 0, 444 PIRUS_MIN_THROTTLE_VALUE, 445 PIRUS_DISKSORT_DISABLED_FLAG, 446 PIRUS_LUN_RESET_ENABLED_FLAG 447 }; 448 449 #endif 450 451 #if (defined(__sparc) && !defined(__fibre)) || \ 452 (defined(__i386) || defined(__amd64)) 453 454 455 static sd_tunables elite_properties = { 456 ELITE_THROTTLE_VALUE, 457 0, 458 0, 459 0, 460 0, 461 0, 462 0, 463 0, 464 0 465 }; 466 467 static sd_tunables st31200n_properties = { 468 ST31200N_THROTTLE_VALUE, 469 0, 470 0, 471 0, 472 0, 473 0, 474 0, 475 0, 476 0 477 }; 478 479 #endif /* Fibre or not */ 480 481 static sd_tunables lsi_properties_scsi = { 482 LSI_THROTTLE_VALUE, 483 0, 484 LSI_NOTREADY_RETRIES, 485 0, 486 0, 487 0, 488 0, 489 0, 490 0 491 }; 492 493 static sd_tunables symbios_properties = { 494 SYMBIOS_THROTTLE_VALUE, 495 0, 496 SYMBIOS_NOTREADY_RETRIES, 497 0, 498 0, 499 0, 500 0, 501 0, 502 0 503 }; 504 505 static sd_tunables lsi_properties = { 506 0, 507 0, 508 LSI_NOTREADY_RETRIES, 509 0, 510 0, 511 0, 512 0, 513 0, 514 0 515 }; 516 517 static sd_tunables lsi_oem_properties = { 518 0, 519 0, 520 LSI_OEM_NOTREADY_RETRIES, 521 0, 522 0, 523 0, 524 0, 525 0, 526 0, 527 1 528 }; 529 530 531 532 #if (defined(SD_PROP_TST)) 533 534 #define SD_TST_CTYPE_VAL CTYPE_CDROM 535 #define SD_TST_THROTTLE_VAL 16 536 #define SD_TST_NOTREADY_VAL 12 537 #define SD_TST_BUSY_VAL 60 538 #define SD_TST_RST_RETRY_VAL 36 539 #define SD_TST_RSV_REL_TIME 60 540 541 static sd_tunables tst_properties = { 542 SD_TST_THROTTLE_VAL, 543 SD_TST_CTYPE_VAL, 544 SD_TST_NOTREADY_VAL, 545 SD_TST_BUSY_VAL, 546 SD_TST_RST_RETRY_VAL, 547 SD_TST_RSV_REL_TIME, 548 0, 549 0, 550 0 551 }; 552 #endif 553 554 /* This is similar to the ANSI toupper implementation */ 555 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C)) 556 557 /* 558 * Static Driver Configuration Table 559 * 560 * This is the table of disks which need throttle adjustment (or, perhaps 561 * something else as defined by the flags at a future time.) device_id 562 * is a string consisting of concatenated vid (vendor), pid (product/model) 563 * and revision strings as defined in the scsi_inquiry structure. Offsets of 564 * the parts of the string are as defined by the sizes in the scsi_inquiry 565 * structure. Device type is searched as far as the device_id string is 566 * defined. Flags defines which values are to be set in the driver from the 567 * properties list. 568 * 569 * Entries below which begin and end with a "*" are a special case. 570 * These do not have a specific vendor, and the string which follows 571 * can appear anywhere in the 16 byte PID portion of the inquiry data. 572 * 573 * Entries below which begin and end with a " " (blank) are a special 574 * case. The comparison function will treat multiple consecutive blanks 575 * as equivalent to a single blank. For example, this causes a 576 * sd_disk_table entry of " NEC CDROM " to match a device's id string 577 * of "NEC CDROM". 578 * 579 * Note: The MD21 controller type has been obsoleted. 580 * ST318202F is a Legacy device 581 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been 582 * made with an FC connection. The entries here are a legacy. 583 */ 584 static sd_disk_config_t sd_disk_table[] = { 585 #if defined(__fibre) || defined(__i386) || defined(__amd64) 586 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 587 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 588 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 589 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 590 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 591 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 592 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 593 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 594 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 595 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 596 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 597 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 598 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 599 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 600 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 601 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 602 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 603 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 604 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 605 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 606 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 607 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 608 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 609 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties }, 610 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties }, 611 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties }, 612 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties }, 613 { "IBM 1724-100", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 614 { "IBM 1726-2xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 615 { "IBM 1726-22x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 616 { "IBM 1726-4xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 617 { "IBM 1726-42x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 618 { "IBM 1726-3xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 619 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 620 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 621 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 622 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 623 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 624 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 625 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 626 { "IBM 1814", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 627 { "IBM 1814-200", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 628 { "IBM 1818", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 629 { "DELL MD3000", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 630 { "DELL MD3000i", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 631 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 632 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 633 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 634 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 635 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT | 636 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 637 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT | 638 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 639 { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties }, 640 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, 641 { "SUN T3", SD_CONF_BSET_THROTTLE | 642 SD_CONF_BSET_BSY_RETRY_COUNT| 643 SD_CONF_BSET_RST_RETRIES| 644 SD_CONF_BSET_RSV_REL_TIME, 645 &purple_properties }, 646 { "SUN SESS01", SD_CONF_BSET_THROTTLE | 647 SD_CONF_BSET_BSY_RETRY_COUNT| 648 SD_CONF_BSET_RST_RETRIES| 649 SD_CONF_BSET_RSV_REL_TIME| 650 SD_CONF_BSET_MIN_THROTTLE| 651 SD_CONF_BSET_DISKSORT_DISABLED, 652 &sve_properties }, 653 { "SUN T4", SD_CONF_BSET_THROTTLE | 654 SD_CONF_BSET_BSY_RETRY_COUNT| 655 SD_CONF_BSET_RST_RETRIES| 656 SD_CONF_BSET_RSV_REL_TIME, 657 &purple_properties }, 658 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED | 659 SD_CONF_BSET_LUN_RESET_ENABLED, 660 &maserati_properties }, 661 { "SUN SE6920", SD_CONF_BSET_THROTTLE | 662 SD_CONF_BSET_NRR_COUNT| 663 SD_CONF_BSET_BSY_RETRY_COUNT| 664 SD_CONF_BSET_RST_RETRIES| 665 SD_CONF_BSET_MIN_THROTTLE| 666 SD_CONF_BSET_DISKSORT_DISABLED| 667 SD_CONF_BSET_LUN_RESET_ENABLED, 668 &pirus_properties }, 669 { "SUN SE6940", SD_CONF_BSET_THROTTLE | 670 SD_CONF_BSET_NRR_COUNT| 671 SD_CONF_BSET_BSY_RETRY_COUNT| 672 SD_CONF_BSET_RST_RETRIES| 673 SD_CONF_BSET_MIN_THROTTLE| 674 SD_CONF_BSET_DISKSORT_DISABLED| 675 SD_CONF_BSET_LUN_RESET_ENABLED, 676 &pirus_properties }, 677 { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE | 678 SD_CONF_BSET_NRR_COUNT| 679 SD_CONF_BSET_BSY_RETRY_COUNT| 680 SD_CONF_BSET_RST_RETRIES| 681 SD_CONF_BSET_MIN_THROTTLE| 682 SD_CONF_BSET_DISKSORT_DISABLED| 683 SD_CONF_BSET_LUN_RESET_ENABLED, 684 &pirus_properties }, 685 { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE | 686 SD_CONF_BSET_NRR_COUNT| 687 SD_CONF_BSET_BSY_RETRY_COUNT| 688 SD_CONF_BSET_RST_RETRIES| 689 SD_CONF_BSET_MIN_THROTTLE| 690 SD_CONF_BSET_DISKSORT_DISABLED| 691 SD_CONF_BSET_LUN_RESET_ENABLED, 692 &pirus_properties }, 693 { "SUN PSX1000", SD_CONF_BSET_THROTTLE | 694 SD_CONF_BSET_NRR_COUNT| 695 SD_CONF_BSET_BSY_RETRY_COUNT| 696 SD_CONF_BSET_RST_RETRIES| 697 SD_CONF_BSET_MIN_THROTTLE| 698 SD_CONF_BSET_DISKSORT_DISABLED| 699 SD_CONF_BSET_LUN_RESET_ENABLED, 700 &pirus_properties }, 701 { "SUN SE6330", SD_CONF_BSET_THROTTLE | 702 SD_CONF_BSET_NRR_COUNT| 703 SD_CONF_BSET_BSY_RETRY_COUNT| 704 SD_CONF_BSET_RST_RETRIES| 705 SD_CONF_BSET_MIN_THROTTLE| 706 SD_CONF_BSET_DISKSORT_DISABLED| 707 SD_CONF_BSET_LUN_RESET_ENABLED, 708 &pirus_properties }, 709 { "SUN STK6580_6780", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 710 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 711 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 712 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 713 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 714 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties }, 715 #endif /* fibre or NON-sparc platforms */ 716 #if ((defined(__sparc) && !defined(__fibre)) ||\ 717 (defined(__i386) || defined(__amd64))) 718 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties }, 719 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties }, 720 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL }, 721 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL }, 722 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL }, 723 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL }, 724 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, 725 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, 726 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, 727 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, 728 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, 729 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL }, 730 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT, 731 &symbios_properties }, 732 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT, 733 &lsi_properties_scsi }, 734 #if defined(__i386) || defined(__amd64) 735 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD 736 | SD_CONF_BSET_READSUB_BCD 737 | SD_CONF_BSET_READ_TOC_ADDR_BCD 738 | SD_CONF_BSET_NO_READ_HEADER 739 | SD_CONF_BSET_READ_CD_XD4), NULL }, 740 741 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD 742 | SD_CONF_BSET_READSUB_BCD 743 | SD_CONF_BSET_READ_TOC_ADDR_BCD 744 | SD_CONF_BSET_NO_READ_HEADER 745 | SD_CONF_BSET_READ_CD_XD4), NULL }, 746 #endif /* __i386 || __amd64 */ 747 #endif /* sparc NON-fibre or NON-sparc platforms */ 748 749 #if (defined(SD_PROP_TST)) 750 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE 751 | SD_CONF_BSET_CTYPE 752 | SD_CONF_BSET_NRR_COUNT 753 | SD_CONF_BSET_FAB_DEVID 754 | SD_CONF_BSET_NOCACHE 755 | SD_CONF_BSET_BSY_RETRY_COUNT 756 | SD_CONF_BSET_PLAYMSF_BCD 757 | SD_CONF_BSET_READSUB_BCD 758 | SD_CONF_BSET_READ_TOC_TRK_BCD 759 | SD_CONF_BSET_READ_TOC_ADDR_BCD 760 | SD_CONF_BSET_NO_READ_HEADER 761 | SD_CONF_BSET_READ_CD_XD4 762 | SD_CONF_BSET_RST_RETRIES 763 | SD_CONF_BSET_RSV_REL_TIME 764 | SD_CONF_BSET_TUR_CHECK), &tst_properties}, 765 #endif 766 }; 767 768 static const int sd_disk_table_size = 769 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t); 770 771 772 773 #define SD_INTERCONNECT_PARALLEL 0 774 #define SD_INTERCONNECT_FABRIC 1 775 #define SD_INTERCONNECT_FIBRE 2 776 #define SD_INTERCONNECT_SSA 3 777 #define SD_INTERCONNECT_SATA 4 778 #define SD_IS_PARALLEL_SCSI(un) \ 779 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL) 780 #define SD_IS_SERIAL(un) \ 781 ((un)->un_interconnect_type == SD_INTERCONNECT_SATA) 782 783 /* 784 * Definitions used by device id registration routines 785 */ 786 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */ 787 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */ 788 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */ 789 790 static kmutex_t sd_sense_mutex = {0}; 791 792 /* 793 * Macros for updates of the driver state 794 */ 795 #define New_state(un, s) \ 796 (un)->un_last_state = (un)->un_state, (un)->un_state = (s) 797 #define Restore_state(un) \ 798 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); } 799 800 static struct sd_cdbinfo sd_cdbtab[] = { 801 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, }, 802 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, }, 803 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, }, 804 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, }, 805 }; 806 807 /* 808 * Specifies the number of seconds that must have elapsed since the last 809 * cmd. has completed for a device to be declared idle to the PM framework. 810 */ 811 static int sd_pm_idletime = 1; 812 813 /* 814 * Internal function prototypes 815 */ 816 817 #if (defined(__fibre)) 818 /* 819 * These #defines are to avoid namespace collisions that occur because this 820 * code is currently used to compile two separate driver modules: sd and ssd. 821 * All function names need to be treated this way (even if declared static) 822 * in order to allow the debugger to resolve the names properly. 823 * It is anticipated that in the near future the ssd module will be obsoleted, 824 * at which time this ugliness should go away. 825 */ 826 #define sd_log_trace ssd_log_trace 827 #define sd_log_info ssd_log_info 828 #define sd_log_err ssd_log_err 829 #define sdprobe ssdprobe 830 #define sdinfo ssdinfo 831 #define sd_prop_op ssd_prop_op 832 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init 833 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini 834 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache 835 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache 836 #define sd_scsi_target_lun_init ssd_scsi_target_lun_init 837 #define sd_scsi_target_lun_fini ssd_scsi_target_lun_fini 838 #define sd_scsi_get_target_lun_count ssd_scsi_get_target_lun_count 839 #define sd_scsi_update_lun_on_target ssd_scsi_update_lun_on_target 840 #define sd_spin_up_unit ssd_spin_up_unit 841 #define sd_enable_descr_sense ssd_enable_descr_sense 842 #define sd_reenable_dsense_task ssd_reenable_dsense_task 843 #define sd_set_mmc_caps ssd_set_mmc_caps 844 #define sd_read_unit_properties ssd_read_unit_properties 845 #define sd_process_sdconf_file ssd_process_sdconf_file 846 #define sd_process_sdconf_table ssd_process_sdconf_table 847 #define sd_sdconf_id_match ssd_sdconf_id_match 848 #define sd_blank_cmp ssd_blank_cmp 849 #define sd_chk_vers1_data ssd_chk_vers1_data 850 #define sd_set_vers1_properties ssd_set_vers1_properties 851 852 #define sd_get_physical_geometry ssd_get_physical_geometry 853 #define sd_get_virtual_geometry ssd_get_virtual_geometry 854 #define sd_update_block_info ssd_update_block_info 855 #define sd_register_devid ssd_register_devid 856 #define sd_get_devid ssd_get_devid 857 #define sd_create_devid ssd_create_devid 858 #define sd_write_deviceid ssd_write_deviceid 859 #define sd_check_vpd_page_support ssd_check_vpd_page_support 860 #define sd_setup_pm ssd_setup_pm 861 #define sd_create_pm_components ssd_create_pm_components 862 #define sd_ddi_suspend ssd_ddi_suspend 863 #define sd_ddi_pm_suspend ssd_ddi_pm_suspend 864 #define sd_ddi_resume ssd_ddi_resume 865 #define sd_ddi_pm_resume ssd_ddi_pm_resume 866 #define sdpower ssdpower 867 #define sdattach ssdattach 868 #define sddetach ssddetach 869 #define sd_unit_attach ssd_unit_attach 870 #define sd_unit_detach ssd_unit_detach 871 #define sd_set_unit_attributes ssd_set_unit_attributes 872 #define sd_create_errstats ssd_create_errstats 873 #define sd_set_errstats ssd_set_errstats 874 #define sd_set_pstats ssd_set_pstats 875 #define sddump ssddump 876 #define sd_scsi_poll ssd_scsi_poll 877 #define sd_send_polled_RQS ssd_send_polled_RQS 878 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll 879 #define sd_init_event_callbacks ssd_init_event_callbacks 880 #define sd_event_callback ssd_event_callback 881 #define sd_cache_control ssd_cache_control 882 #define sd_get_write_cache_enabled ssd_get_write_cache_enabled 883 #define sd_get_nv_sup ssd_get_nv_sup 884 #define sd_make_device ssd_make_device 885 #define sdopen ssdopen 886 #define sdclose ssdclose 887 #define sd_ready_and_valid ssd_ready_and_valid 888 #define sdmin ssdmin 889 #define sdread ssdread 890 #define sdwrite ssdwrite 891 #define sdaread ssdaread 892 #define sdawrite ssdawrite 893 #define sdstrategy ssdstrategy 894 #define sdioctl ssdioctl 895 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart 896 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart 897 #define sd_checksum_iostart ssd_checksum_iostart 898 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart 899 #define sd_pm_iostart ssd_pm_iostart 900 #define sd_core_iostart ssd_core_iostart 901 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone 902 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone 903 #define sd_checksum_iodone ssd_checksum_iodone 904 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone 905 #define sd_pm_iodone ssd_pm_iodone 906 #define sd_initpkt_for_buf ssd_initpkt_for_buf 907 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf 908 #define sd_setup_rw_pkt ssd_setup_rw_pkt 909 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt 910 #define sd_buf_iodone ssd_buf_iodone 911 #define sd_uscsi_strategy ssd_uscsi_strategy 912 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi 913 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi 914 #define sd_uscsi_iodone ssd_uscsi_iodone 915 #define sd_xbuf_strategy ssd_xbuf_strategy 916 #define sd_xbuf_init ssd_xbuf_init 917 #define sd_pm_entry ssd_pm_entry 918 #define sd_pm_exit ssd_pm_exit 919 920 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler 921 #define sd_pm_timeout_handler ssd_pm_timeout_handler 922 923 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq 924 #define sdintr ssdintr 925 #define sd_start_cmds ssd_start_cmds 926 #define sd_send_scsi_cmd ssd_send_scsi_cmd 927 #define sd_bioclone_alloc ssd_bioclone_alloc 928 #define sd_bioclone_free ssd_bioclone_free 929 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc 930 #define sd_shadow_buf_free ssd_shadow_buf_free 931 #define sd_print_transport_rejected_message \ 932 ssd_print_transport_rejected_message 933 #define sd_retry_command ssd_retry_command 934 #define sd_set_retry_bp ssd_set_retry_bp 935 #define sd_send_request_sense_command ssd_send_request_sense_command 936 #define sd_start_retry_command ssd_start_retry_command 937 #define sd_start_direct_priority_command \ 938 ssd_start_direct_priority_command 939 #define sd_return_failed_command ssd_return_failed_command 940 #define sd_return_failed_command_no_restart \ 941 ssd_return_failed_command_no_restart 942 #define sd_return_command ssd_return_command 943 #define sd_sync_with_callback ssd_sync_with_callback 944 #define sdrunout ssdrunout 945 #define sd_mark_rqs_busy ssd_mark_rqs_busy 946 #define sd_mark_rqs_idle ssd_mark_rqs_idle 947 #define sd_reduce_throttle ssd_reduce_throttle 948 #define sd_restore_throttle ssd_restore_throttle 949 #define sd_print_incomplete_msg ssd_print_incomplete_msg 950 #define sd_init_cdb_limits ssd_init_cdb_limits 951 #define sd_pkt_status_good ssd_pkt_status_good 952 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition 953 #define sd_pkt_status_busy ssd_pkt_status_busy 954 #define sd_pkt_status_reservation_conflict \ 955 ssd_pkt_status_reservation_conflict 956 #define sd_pkt_status_qfull ssd_pkt_status_qfull 957 #define sd_handle_request_sense ssd_handle_request_sense 958 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense 959 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg 960 #define sd_validate_sense_data ssd_validate_sense_data 961 #define sd_decode_sense ssd_decode_sense 962 #define sd_print_sense_msg ssd_print_sense_msg 963 #define sd_sense_key_no_sense ssd_sense_key_no_sense 964 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error 965 #define sd_sense_key_not_ready ssd_sense_key_not_ready 966 #define sd_sense_key_medium_or_hardware_error \ 967 ssd_sense_key_medium_or_hardware_error 968 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request 969 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention 970 #define sd_sense_key_fail_command ssd_sense_key_fail_command 971 #define sd_sense_key_blank_check ssd_sense_key_blank_check 972 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command 973 #define sd_sense_key_default ssd_sense_key_default 974 #define sd_print_retry_msg ssd_print_retry_msg 975 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg 976 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete 977 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err 978 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset 979 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted 980 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout 981 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free 982 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject 983 #define sd_pkt_reason_default ssd_pkt_reason_default 984 #define sd_reset_target ssd_reset_target 985 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback 986 #define sd_start_stop_unit_task ssd_start_stop_unit_task 987 #define sd_taskq_create ssd_taskq_create 988 #define sd_taskq_delete ssd_taskq_delete 989 #define sd_target_change_task ssd_target_change_task 990 #define sd_log_lun_expansion_event ssd_log_lun_expansion_event 991 #define sd_media_change_task ssd_media_change_task 992 #define sd_handle_mchange ssd_handle_mchange 993 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK 994 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY 995 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16 996 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION 997 #define sd_send_scsi_feature_GET_CONFIGURATION \ 998 sd_send_scsi_feature_GET_CONFIGURATION 999 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT 1000 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY 1001 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY 1002 #define sd_send_scsi_PERSISTENT_RESERVE_IN \ 1003 ssd_send_scsi_PERSISTENT_RESERVE_IN 1004 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \ 1005 ssd_send_scsi_PERSISTENT_RESERVE_OUT 1006 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE 1007 #define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \ 1008 ssd_send_scsi_SYNCHRONIZE_CACHE_biodone 1009 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE 1010 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT 1011 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR 1012 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE 1013 #define sd_alloc_rqs ssd_alloc_rqs 1014 #define sd_free_rqs ssd_free_rqs 1015 #define sd_dump_memory ssd_dump_memory 1016 #define sd_get_media_info ssd_get_media_info 1017 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info 1018 #define sd_nvpair_str_decode ssd_nvpair_str_decode 1019 #define sd_strtok_r ssd_strtok_r 1020 #define sd_set_properties ssd_set_properties 1021 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf 1022 #define sd_setup_next_xfer ssd_setup_next_xfer 1023 #define sd_dkio_get_temp ssd_dkio_get_temp 1024 #define sd_check_mhd ssd_check_mhd 1025 #define sd_mhd_watch_cb ssd_mhd_watch_cb 1026 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete 1027 #define sd_sname ssd_sname 1028 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover 1029 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread 1030 #define sd_take_ownership ssd_take_ownership 1031 #define sd_reserve_release ssd_reserve_release 1032 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req 1033 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb 1034 #define sd_persistent_reservation_in_read_keys \ 1035 ssd_persistent_reservation_in_read_keys 1036 #define sd_persistent_reservation_in_read_resv \ 1037 ssd_persistent_reservation_in_read_resv 1038 #define sd_mhdioc_takeown ssd_mhdioc_takeown 1039 #define sd_mhdioc_failfast ssd_mhdioc_failfast 1040 #define sd_mhdioc_release ssd_mhdioc_release 1041 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid 1042 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys 1043 #define sd_mhdioc_inresv ssd_mhdioc_inresv 1044 #define sr_change_blkmode ssr_change_blkmode 1045 #define sr_change_speed ssr_change_speed 1046 #define sr_atapi_change_speed ssr_atapi_change_speed 1047 #define sr_pause_resume ssr_pause_resume 1048 #define sr_play_msf ssr_play_msf 1049 #define sr_play_trkind ssr_play_trkind 1050 #define sr_read_all_subcodes ssr_read_all_subcodes 1051 #define sr_read_subchannel ssr_read_subchannel 1052 #define sr_read_tocentry ssr_read_tocentry 1053 #define sr_read_tochdr ssr_read_tochdr 1054 #define sr_read_cdda ssr_read_cdda 1055 #define sr_read_cdxa ssr_read_cdxa 1056 #define sr_read_mode1 ssr_read_mode1 1057 #define sr_read_mode2 ssr_read_mode2 1058 #define sr_read_cd_mode2 ssr_read_cd_mode2 1059 #define sr_sector_mode ssr_sector_mode 1060 #define sr_eject ssr_eject 1061 #define sr_ejected ssr_ejected 1062 #define sr_check_wp ssr_check_wp 1063 #define sd_check_media ssd_check_media 1064 #define sd_media_watch_cb ssd_media_watch_cb 1065 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast 1066 #define sr_volume_ctrl ssr_volume_ctrl 1067 #define sr_read_sony_session_offset ssr_read_sony_session_offset 1068 #define sd_log_page_supported ssd_log_page_supported 1069 #define sd_check_for_writable_cd ssd_check_for_writable_cd 1070 #define sd_wm_cache_constructor ssd_wm_cache_constructor 1071 #define sd_wm_cache_destructor ssd_wm_cache_destructor 1072 #define sd_range_lock ssd_range_lock 1073 #define sd_get_range ssd_get_range 1074 #define sd_free_inlist_wmap ssd_free_inlist_wmap 1075 #define sd_range_unlock ssd_range_unlock 1076 #define sd_read_modify_write_task ssd_read_modify_write_task 1077 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw 1078 1079 #define sd_iostart_chain ssd_iostart_chain 1080 #define sd_iodone_chain ssd_iodone_chain 1081 #define sd_initpkt_map ssd_initpkt_map 1082 #define sd_destroypkt_map ssd_destroypkt_map 1083 #define sd_chain_type_map ssd_chain_type_map 1084 #define sd_chain_index_map ssd_chain_index_map 1085 1086 #define sd_failfast_flushctl ssd_failfast_flushctl 1087 #define sd_failfast_flushq ssd_failfast_flushq 1088 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback 1089 1090 #define sd_is_lsi ssd_is_lsi 1091 #define sd_tg_rdwr ssd_tg_rdwr 1092 #define sd_tg_getinfo ssd_tg_getinfo 1093 1094 #endif /* #if (defined(__fibre)) */ 1095 1096 1097 int _init(void); 1098 int _fini(void); 1099 int _info(struct modinfo *modinfop); 1100 1101 /*PRINTFLIKE3*/ 1102 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1103 /*PRINTFLIKE3*/ 1104 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1105 /*PRINTFLIKE3*/ 1106 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1107 1108 static int sdprobe(dev_info_t *devi); 1109 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 1110 void **result); 1111 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1112 int mod_flags, char *name, caddr_t valuep, int *lengthp); 1113 1114 /* 1115 * Smart probe for parallel scsi 1116 */ 1117 static void sd_scsi_probe_cache_init(void); 1118 static void sd_scsi_probe_cache_fini(void); 1119 static void sd_scsi_clear_probe_cache(void); 1120 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)()); 1121 1122 /* 1123 * Attached luns on target for parallel scsi 1124 */ 1125 static void sd_scsi_target_lun_init(void); 1126 static void sd_scsi_target_lun_fini(void); 1127 static int sd_scsi_get_target_lun_count(dev_info_t *dip, int target); 1128 static void sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag); 1129 1130 static int sd_spin_up_unit(sd_ssc_t *ssc); 1131 1132 /* 1133 * Using sd_ssc_init to establish sd_ssc_t struct 1134 * Using sd_ssc_send to send uscsi internal command 1135 * Using sd_ssc_fini to free sd_ssc_t struct 1136 */ 1137 static sd_ssc_t *sd_ssc_init(struct sd_lun *un); 1138 static int sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, 1139 int flag, enum uio_seg dataspace, int path_flag); 1140 static void sd_ssc_fini(sd_ssc_t *ssc); 1141 1142 /* 1143 * Using sd_ssc_assessment to set correct type-of-assessment 1144 * Using sd_ssc_post to post ereport & system log 1145 * sd_ssc_post will call sd_ssc_print to print system log 1146 * sd_ssc_post will call sd_ssd_ereport_post to post ereport 1147 */ 1148 static void sd_ssc_assessment(sd_ssc_t *ssc, 1149 enum sd_type_assessment tp_assess); 1150 1151 static void sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess); 1152 static void sd_ssc_print(sd_ssc_t *ssc, int sd_severity); 1153 static void sd_ssc_ereport_post(sd_ssc_t *ssc, 1154 enum sd_driver_assessment drv_assess); 1155 1156 /* 1157 * Using sd_ssc_set_info to mark an un-decodable-data error. 1158 * Using sd_ssc_extract_info to transfer information from internal 1159 * data structures to sd_ssc_t. 1160 */ 1161 static void sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, 1162 const char *fmt, ...); 1163 static void sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un, 1164 struct scsi_pkt *pktp, struct buf *bp, struct sd_xbuf *xp); 1165 1166 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1167 enum uio_seg dataspace, int path_flag); 1168 1169 #ifdef _LP64 1170 static void sd_enable_descr_sense(sd_ssc_t *ssc); 1171 static void sd_reenable_dsense_task(void *arg); 1172 #endif /* _LP64 */ 1173 1174 static void sd_set_mmc_caps(sd_ssc_t *ssc); 1175 1176 static void sd_read_unit_properties(struct sd_lun *un); 1177 static int sd_process_sdconf_file(struct sd_lun *un); 1178 static void sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str); 1179 static char *sd_strtok_r(char *string, const char *sepset, char **lasts); 1180 static void sd_set_properties(struct sd_lun *un, char *name, char *value); 1181 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags, 1182 int *data_list, sd_tunables *values); 1183 static void sd_process_sdconf_table(struct sd_lun *un); 1184 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen); 1185 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen); 1186 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 1187 int list_len, char *dataname_ptr); 1188 static void sd_set_vers1_properties(struct sd_lun *un, int flags, 1189 sd_tunables *prop_list); 1190 1191 static void sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi, 1192 int reservation_flag); 1193 static int sd_get_devid(sd_ssc_t *ssc); 1194 static ddi_devid_t sd_create_devid(sd_ssc_t *ssc); 1195 static int sd_write_deviceid(sd_ssc_t *ssc); 1196 static int sd_get_devid_page(struct sd_lun *un, uchar_t *wwn, int *len); 1197 static int sd_check_vpd_page_support(sd_ssc_t *ssc); 1198 1199 static void sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi); 1200 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); 1201 1202 static int sd_ddi_suspend(dev_info_t *devi); 1203 static int sd_ddi_pm_suspend(struct sd_lun *un); 1204 static int sd_ddi_resume(dev_info_t *devi); 1205 static int sd_ddi_pm_resume(struct sd_lun *un); 1206 static int sdpower(dev_info_t *devi, int component, int level); 1207 1208 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 1209 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 1210 static int sd_unit_attach(dev_info_t *devi); 1211 static int sd_unit_detach(dev_info_t *devi); 1212 1213 static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi); 1214 static void sd_create_errstats(struct sd_lun *un, int instance); 1215 static void sd_set_errstats(struct sd_lun *un); 1216 static void sd_set_pstats(struct sd_lun *un); 1217 1218 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 1219 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt); 1220 static int sd_send_polled_RQS(struct sd_lun *un); 1221 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt); 1222 1223 #if (defined(__fibre)) 1224 /* 1225 * Event callbacks (photon) 1226 */ 1227 static void sd_init_event_callbacks(struct sd_lun *un); 1228 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *); 1229 #endif 1230 1231 /* 1232 * Defines for sd_cache_control 1233 */ 1234 1235 #define SD_CACHE_ENABLE 1 1236 #define SD_CACHE_DISABLE 0 1237 #define SD_CACHE_NOCHANGE -1 1238 1239 static int sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag); 1240 static int sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled); 1241 static void sd_get_nv_sup(sd_ssc_t *ssc); 1242 static dev_t sd_make_device(dev_info_t *devi); 1243 1244 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize, 1245 uint64_t capacity); 1246 1247 /* 1248 * Driver entry point functions. 1249 */ 1250 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 1251 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 1252 static int sd_ready_and_valid(sd_ssc_t *ssc, int part); 1253 1254 static void sdmin(struct buf *bp); 1255 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p); 1256 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 1257 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1258 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1259 1260 static int sdstrategy(struct buf *bp); 1261 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 1262 1263 /* 1264 * Function prototypes for layering functions in the iostart chain. 1265 */ 1266 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un, 1267 struct buf *bp); 1268 static void sd_mapblocksize_iostart(int index, struct sd_lun *un, 1269 struct buf *bp); 1270 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp); 1271 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un, 1272 struct buf *bp); 1273 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp); 1274 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp); 1275 1276 /* 1277 * Function prototypes for layering functions in the iodone chain. 1278 */ 1279 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp); 1280 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp); 1281 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un, 1282 struct buf *bp); 1283 static void sd_mapblocksize_iodone(int index, struct sd_lun *un, 1284 struct buf *bp); 1285 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp); 1286 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un, 1287 struct buf *bp); 1288 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp); 1289 1290 /* 1291 * Prototypes for functions to support buf(9S) based IO. 1292 */ 1293 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg); 1294 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **); 1295 static void sd_destroypkt_for_buf(struct buf *); 1296 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp, 1297 struct buf *bp, int flags, 1298 int (*callback)(caddr_t), caddr_t callback_arg, 1299 diskaddr_t lba, uint32_t blockcount); 1300 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp, 1301 struct buf *bp, diskaddr_t lba, uint32_t blockcount); 1302 1303 /* 1304 * Prototypes for functions to support USCSI IO. 1305 */ 1306 static int sd_uscsi_strategy(struct buf *bp); 1307 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **); 1308 static void sd_destroypkt_for_uscsi(struct buf *); 1309 1310 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 1311 uchar_t chain_type, void *pktinfop); 1312 1313 static int sd_pm_entry(struct sd_lun *un); 1314 static void sd_pm_exit(struct sd_lun *un); 1315 1316 static void sd_pm_idletimeout_handler(void *arg); 1317 1318 /* 1319 * sd_core internal functions (used at the sd_core_io layer). 1320 */ 1321 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp); 1322 static void sdintr(struct scsi_pkt *pktp); 1323 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp); 1324 1325 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1326 enum uio_seg dataspace, int path_flag); 1327 1328 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen, 1329 daddr_t blkno, int (*func)(struct buf *)); 1330 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen, 1331 uint_t bflags, daddr_t blkno, int (*func)(struct buf *)); 1332 static void sd_bioclone_free(struct buf *bp); 1333 static void sd_shadow_buf_free(struct buf *bp); 1334 1335 static void sd_print_transport_rejected_message(struct sd_lun *un, 1336 struct sd_xbuf *xp, int code); 1337 static void sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, 1338 void *arg, int code); 1339 static void sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, 1340 void *arg, int code); 1341 static void sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, 1342 void *arg, int code); 1343 1344 static void sd_retry_command(struct sd_lun *un, struct buf *bp, 1345 int retry_check_flag, 1346 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, 1347 int c), 1348 void *user_arg, int failure_code, clock_t retry_delay, 1349 void (*statp)(kstat_io_t *)); 1350 1351 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, 1352 clock_t retry_delay, void (*statp)(kstat_io_t *)); 1353 1354 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 1355 struct scsi_pkt *pktp); 1356 static void sd_start_retry_command(void *arg); 1357 static void sd_start_direct_priority_command(void *arg); 1358 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, 1359 int errcode); 1360 static void sd_return_failed_command_no_restart(struct sd_lun *un, 1361 struct buf *bp, int errcode); 1362 static void sd_return_command(struct sd_lun *un, struct buf *bp); 1363 static void sd_sync_with_callback(struct sd_lun *un); 1364 static int sdrunout(caddr_t arg); 1365 1366 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp); 1367 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp); 1368 1369 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type); 1370 static void sd_restore_throttle(void *arg); 1371 1372 static void sd_init_cdb_limits(struct sd_lun *un); 1373 1374 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 1375 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1376 1377 /* 1378 * Error handling functions 1379 */ 1380 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 1381 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1382 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, 1383 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1384 static void sd_pkt_status_reservation_conflict(struct sd_lun *un, 1385 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1386 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 1387 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1388 1389 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp, 1390 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1391 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 1392 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1393 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp, 1394 struct sd_xbuf *xp, size_t actual_len); 1395 static void sd_decode_sense(struct sd_lun *un, struct buf *bp, 1396 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1397 1398 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp, 1399 void *arg, int code); 1400 1401 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 1402 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1403 static void sd_sense_key_recoverable_error(struct sd_lun *un, 1404 uint8_t *sense_datap, 1405 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1406 static void sd_sense_key_not_ready(struct sd_lun *un, 1407 uint8_t *sense_datap, 1408 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1409 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 1410 uint8_t *sense_datap, 1411 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1412 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 1413 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1414 static void sd_sense_key_unit_attention(struct sd_lun *un, 1415 uint8_t *sense_datap, 1416 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1417 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 1418 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1419 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 1420 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1421 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 1422 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1423 static void sd_sense_key_default(struct sd_lun *un, 1424 uint8_t *sense_datap, 1425 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1426 1427 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp, 1428 void *arg, int flag); 1429 1430 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 1431 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1432 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 1433 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1434 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 1435 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1436 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 1437 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1438 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 1439 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1440 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 1441 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1442 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 1443 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1444 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 1445 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1446 1447 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp); 1448 1449 static void sd_start_stop_unit_callback(void *arg); 1450 static void sd_start_stop_unit_task(void *arg); 1451 1452 static void sd_taskq_create(void); 1453 static void sd_taskq_delete(void); 1454 static void sd_target_change_task(void *arg); 1455 static void sd_log_lun_expansion_event(struct sd_lun *un, int km_flag); 1456 static void sd_media_change_task(void *arg); 1457 1458 static int sd_handle_mchange(struct sd_lun *un); 1459 static int sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag); 1460 static int sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp, 1461 uint32_t *lbap, int path_flag); 1462 static int sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp, 1463 uint32_t *lbap, int path_flag); 1464 static int sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int flag, 1465 int path_flag); 1466 static int sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr, 1467 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp); 1468 static int sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag); 1469 static int sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc, 1470 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp); 1471 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, 1472 uchar_t usr_cmd, uchar_t *usr_bufp); 1473 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, 1474 struct dk_callback *dkc); 1475 static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp); 1476 static int sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, 1477 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1478 uchar_t *bufaddr, uint_t buflen, int path_flag); 1479 static int sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc, 1480 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1481 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag); 1482 static int sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize, 1483 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag); 1484 static int sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize, 1485 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag); 1486 static int sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr, 1487 size_t buflen, daddr_t start_block, int path_flag); 1488 #define sd_send_scsi_READ(ssc, bufaddr, buflen, start_block, path_flag) \ 1489 sd_send_scsi_RDWR(ssc, SCMD_READ, bufaddr, buflen, start_block, \ 1490 path_flag) 1491 #define sd_send_scsi_WRITE(ssc, bufaddr, buflen, start_block, path_flag)\ 1492 sd_send_scsi_RDWR(ssc, SCMD_WRITE, bufaddr, buflen, start_block,\ 1493 path_flag) 1494 1495 static int sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr, 1496 uint16_t buflen, uchar_t page_code, uchar_t page_control, 1497 uint16_t param_ptr, int path_flag); 1498 1499 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un); 1500 static void sd_free_rqs(struct sd_lun *un); 1501 1502 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, 1503 uchar_t *data, int len, int fmt); 1504 static void sd_panic_for_res_conflict(struct sd_lun *un); 1505 1506 /* 1507 * Disk Ioctl Function Prototypes 1508 */ 1509 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag); 1510 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag); 1511 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag); 1512 1513 /* 1514 * Multi-host Ioctl Prototypes 1515 */ 1516 static int sd_check_mhd(dev_t dev, int interval); 1517 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1518 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt); 1519 static char *sd_sname(uchar_t status); 1520 static void sd_mhd_resvd_recover(void *arg); 1521 static void sd_resv_reclaim_thread(); 1522 static int sd_take_ownership(dev_t dev, struct mhioctkown *p); 1523 static int sd_reserve_release(dev_t dev, int cmd); 1524 static void sd_rmv_resv_reclaim_req(dev_t dev); 1525 static void sd_mhd_reset_notify_cb(caddr_t arg); 1526 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un, 1527 mhioc_inkeys_t *usrp, int flag); 1528 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un, 1529 mhioc_inresvs_t *usrp, int flag); 1530 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag); 1531 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag); 1532 static int sd_mhdioc_release(dev_t dev); 1533 static int sd_mhdioc_register_devid(dev_t dev); 1534 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag); 1535 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag); 1536 1537 /* 1538 * SCSI removable prototypes 1539 */ 1540 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag); 1541 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1542 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1543 static int sr_pause_resume(dev_t dev, int mode); 1544 static int sr_play_msf(dev_t dev, caddr_t data, int flag); 1545 static int sr_play_trkind(dev_t dev, caddr_t data, int flag); 1546 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag); 1547 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag); 1548 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag); 1549 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag); 1550 static int sr_read_cdda(dev_t dev, caddr_t data, int flag); 1551 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag); 1552 static int sr_read_mode1(dev_t dev, caddr_t data, int flag); 1553 static int sr_read_mode2(dev_t dev, caddr_t data, int flag); 1554 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag); 1555 static int sr_sector_mode(dev_t dev, uint32_t blksize); 1556 static int sr_eject(dev_t dev); 1557 static void sr_ejected(register struct sd_lun *un); 1558 static int sr_check_wp(dev_t dev); 1559 static int sd_check_media(dev_t dev, enum dkio_state state); 1560 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1561 static void sd_delayed_cv_broadcast(void *arg); 1562 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag); 1563 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag); 1564 1565 static int sd_log_page_supported(sd_ssc_t *ssc, int log_page); 1566 1567 /* 1568 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions. 1569 */ 1570 static void sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag); 1571 static int sd_wm_cache_constructor(void *wm, void *un, int flags); 1572 static void sd_wm_cache_destructor(void *wm, void *un); 1573 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb, 1574 daddr_t endb, ushort_t typ); 1575 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb, 1576 daddr_t endb); 1577 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp); 1578 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm); 1579 static void sd_read_modify_write_task(void * arg); 1580 static int 1581 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 1582 struct buf **bpp); 1583 1584 1585 /* 1586 * Function prototypes for failfast support. 1587 */ 1588 static void sd_failfast_flushq(struct sd_lun *un); 1589 static int sd_failfast_flushq_callback(struct buf *bp); 1590 1591 /* 1592 * Function prototypes to check for lsi devices 1593 */ 1594 static void sd_is_lsi(struct sd_lun *un); 1595 1596 /* 1597 * Function prototypes for partial DMA support 1598 */ 1599 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 1600 struct scsi_pkt *pkt, struct sd_xbuf *xp); 1601 1602 1603 /* Function prototypes for cmlb */ 1604 static int sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 1605 diskaddr_t start_block, size_t reqlength, void *tg_cookie); 1606 1607 static int sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie); 1608 1609 /* 1610 * Constants for failfast support: 1611 * 1612 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO 1613 * failfast processing being performed. 1614 * 1615 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing 1616 * failfast processing on all bufs with B_FAILFAST set. 1617 */ 1618 1619 #define SD_FAILFAST_INACTIVE 0 1620 #define SD_FAILFAST_ACTIVE 1 1621 1622 /* 1623 * Bitmask to control behavior of buf(9S) flushes when a transition to 1624 * the failfast state occurs. Optional bits include: 1625 * 1626 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that 1627 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will 1628 * be flushed. 1629 * 1630 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the 1631 * driver, in addition to the regular wait queue. This includes the xbuf 1632 * queues. When clear, only the driver's wait queue will be flushed. 1633 */ 1634 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 1635 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 1636 1637 /* 1638 * The default behavior is to only flush bufs that have B_FAILFAST set, but 1639 * to flush all queues within the driver. 1640 */ 1641 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES; 1642 1643 1644 /* 1645 * SD Testing Fault Injection 1646 */ 1647 #ifdef SD_FAULT_INJECTION 1648 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); 1649 static void sd_faultinjection(struct scsi_pkt *pktp); 1650 static void sd_injection_log(char *buf, struct sd_lun *un); 1651 #endif 1652 1653 /* 1654 * Device driver ops vector 1655 */ 1656 static struct cb_ops sd_cb_ops = { 1657 sdopen, /* open */ 1658 sdclose, /* close */ 1659 sdstrategy, /* strategy */ 1660 nodev, /* print */ 1661 sddump, /* dump */ 1662 sdread, /* read */ 1663 sdwrite, /* write */ 1664 sdioctl, /* ioctl */ 1665 nodev, /* devmap */ 1666 nodev, /* mmap */ 1667 nodev, /* segmap */ 1668 nochpoll, /* poll */ 1669 sd_prop_op, /* cb_prop_op */ 1670 0, /* streamtab */ 1671 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */ 1672 CB_REV, /* cb_rev */ 1673 sdaread, /* async I/O read entry point */ 1674 sdawrite /* async I/O write entry point */ 1675 }; 1676 1677 static struct dev_ops sd_ops = { 1678 DEVO_REV, /* devo_rev, */ 1679 0, /* refcnt */ 1680 sdinfo, /* info */ 1681 nulldev, /* identify */ 1682 sdprobe, /* probe */ 1683 sdattach, /* attach */ 1684 sddetach, /* detach */ 1685 nodev, /* reset */ 1686 &sd_cb_ops, /* driver operations */ 1687 NULL, /* bus operations */ 1688 sdpower /* power */ 1689 }; 1690 1691 1692 /* 1693 * This is the loadable module wrapper. 1694 */ 1695 #include <sys/modctl.h> 1696 1697 static struct modldrv modldrv = { 1698 &mod_driverops, /* Type of module. This one is a driver */ 1699 SD_MODULE_NAME, /* Module name. */ 1700 &sd_ops /* driver ops */ 1701 }; 1702 1703 1704 static struct modlinkage modlinkage = { 1705 MODREV_1, 1706 &modldrv, 1707 NULL 1708 }; 1709 1710 static cmlb_tg_ops_t sd_tgops = { 1711 TG_DK_OPS_VERSION_1, 1712 sd_tg_rdwr, 1713 sd_tg_getinfo 1714 }; 1715 1716 static struct scsi_asq_key_strings sd_additional_codes[] = { 1717 0x81, 0, "Logical Unit is Reserved", 1718 0x85, 0, "Audio Address Not Valid", 1719 0xb6, 0, "Media Load Mechanism Failed", 1720 0xB9, 0, "Audio Play Operation Aborted", 1721 0xbf, 0, "Buffer Overflow for Read All Subcodes Command", 1722 0x53, 2, "Medium removal prevented", 1723 0x6f, 0, "Authentication failed during key exchange", 1724 0x6f, 1, "Key not present", 1725 0x6f, 2, "Key not established", 1726 0x6f, 3, "Read without proper authentication", 1727 0x6f, 4, "Mismatched region to this logical unit", 1728 0x6f, 5, "Region reset count error", 1729 0xffff, 0x0, NULL 1730 }; 1731 1732 1733 /* 1734 * Struct for passing printing information for sense data messages 1735 */ 1736 struct sd_sense_info { 1737 int ssi_severity; 1738 int ssi_pfa_flag; 1739 }; 1740 1741 /* 1742 * Table of function pointers for iostart-side routines. Separate "chains" 1743 * of layered function calls are formed by placing the function pointers 1744 * sequentially in the desired order. Functions are called according to an 1745 * incrementing table index ordering. The last function in each chain must 1746 * be sd_core_iostart(). The corresponding iodone-side routines are expected 1747 * in the sd_iodone_chain[] array. 1748 * 1749 * Note: It may seem more natural to organize both the iostart and iodone 1750 * functions together, into an array of structures (or some similar 1751 * organization) with a common index, rather than two separate arrays which 1752 * must be maintained in synchronization. The purpose of this division is 1753 * to achieve improved performance: individual arrays allows for more 1754 * effective cache line utilization on certain platforms. 1755 */ 1756 1757 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp); 1758 1759 1760 static sd_chain_t sd_iostart_chain[] = { 1761 1762 /* Chain for buf IO for disk drive targets (PM enabled) */ 1763 sd_mapblockaddr_iostart, /* Index: 0 */ 1764 sd_pm_iostart, /* Index: 1 */ 1765 sd_core_iostart, /* Index: 2 */ 1766 1767 /* Chain for buf IO for disk drive targets (PM disabled) */ 1768 sd_mapblockaddr_iostart, /* Index: 3 */ 1769 sd_core_iostart, /* Index: 4 */ 1770 1771 /* Chain for buf IO for removable-media targets (PM enabled) */ 1772 sd_mapblockaddr_iostart, /* Index: 5 */ 1773 sd_mapblocksize_iostart, /* Index: 6 */ 1774 sd_pm_iostart, /* Index: 7 */ 1775 sd_core_iostart, /* Index: 8 */ 1776 1777 /* Chain for buf IO for removable-media targets (PM disabled) */ 1778 sd_mapblockaddr_iostart, /* Index: 9 */ 1779 sd_mapblocksize_iostart, /* Index: 10 */ 1780 sd_core_iostart, /* Index: 11 */ 1781 1782 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1783 sd_mapblockaddr_iostart, /* Index: 12 */ 1784 sd_checksum_iostart, /* Index: 13 */ 1785 sd_pm_iostart, /* Index: 14 */ 1786 sd_core_iostart, /* Index: 15 */ 1787 1788 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1789 sd_mapblockaddr_iostart, /* Index: 16 */ 1790 sd_checksum_iostart, /* Index: 17 */ 1791 sd_core_iostart, /* Index: 18 */ 1792 1793 /* Chain for USCSI commands (all targets) */ 1794 sd_pm_iostart, /* Index: 19 */ 1795 sd_core_iostart, /* Index: 20 */ 1796 1797 /* Chain for checksumming USCSI commands (all targets) */ 1798 sd_checksum_uscsi_iostart, /* Index: 21 */ 1799 sd_pm_iostart, /* Index: 22 */ 1800 sd_core_iostart, /* Index: 23 */ 1801 1802 /* Chain for "direct" USCSI commands (all targets) */ 1803 sd_core_iostart, /* Index: 24 */ 1804 1805 /* Chain for "direct priority" USCSI commands (all targets) */ 1806 sd_core_iostart, /* Index: 25 */ 1807 }; 1808 1809 /* 1810 * Macros to locate the first function of each iostart chain in the 1811 * sd_iostart_chain[] array. These are located by the index in the array. 1812 */ 1813 #define SD_CHAIN_DISK_IOSTART 0 1814 #define SD_CHAIN_DISK_IOSTART_NO_PM 3 1815 #define SD_CHAIN_RMMEDIA_IOSTART 5 1816 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9 1817 #define SD_CHAIN_CHKSUM_IOSTART 12 1818 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16 1819 #define SD_CHAIN_USCSI_CMD_IOSTART 19 1820 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21 1821 #define SD_CHAIN_DIRECT_CMD_IOSTART 24 1822 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25 1823 1824 1825 /* 1826 * Table of function pointers for the iodone-side routines for the driver- 1827 * internal layering mechanism. The calling sequence for iodone routines 1828 * uses a decrementing table index, so the last routine called in a chain 1829 * must be at the lowest array index location for that chain. The last 1830 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs) 1831 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering 1832 * of the functions in an iodone side chain must correspond to the ordering 1833 * of the iostart routines for that chain. Note that there is no iodone 1834 * side routine that corresponds to sd_core_iostart(), so there is no 1835 * entry in the table for this. 1836 */ 1837 1838 static sd_chain_t sd_iodone_chain[] = { 1839 1840 /* Chain for buf IO for disk drive targets (PM enabled) */ 1841 sd_buf_iodone, /* Index: 0 */ 1842 sd_mapblockaddr_iodone, /* Index: 1 */ 1843 sd_pm_iodone, /* Index: 2 */ 1844 1845 /* Chain for buf IO for disk drive targets (PM disabled) */ 1846 sd_buf_iodone, /* Index: 3 */ 1847 sd_mapblockaddr_iodone, /* Index: 4 */ 1848 1849 /* Chain for buf IO for removable-media targets (PM enabled) */ 1850 sd_buf_iodone, /* Index: 5 */ 1851 sd_mapblockaddr_iodone, /* Index: 6 */ 1852 sd_mapblocksize_iodone, /* Index: 7 */ 1853 sd_pm_iodone, /* Index: 8 */ 1854 1855 /* Chain for buf IO for removable-media targets (PM disabled) */ 1856 sd_buf_iodone, /* Index: 9 */ 1857 sd_mapblockaddr_iodone, /* Index: 10 */ 1858 sd_mapblocksize_iodone, /* Index: 11 */ 1859 1860 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1861 sd_buf_iodone, /* Index: 12 */ 1862 sd_mapblockaddr_iodone, /* Index: 13 */ 1863 sd_checksum_iodone, /* Index: 14 */ 1864 sd_pm_iodone, /* Index: 15 */ 1865 1866 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1867 sd_buf_iodone, /* Index: 16 */ 1868 sd_mapblockaddr_iodone, /* Index: 17 */ 1869 sd_checksum_iodone, /* Index: 18 */ 1870 1871 /* Chain for USCSI commands (non-checksum targets) */ 1872 sd_uscsi_iodone, /* Index: 19 */ 1873 sd_pm_iodone, /* Index: 20 */ 1874 1875 /* Chain for USCSI commands (checksum targets) */ 1876 sd_uscsi_iodone, /* Index: 21 */ 1877 sd_checksum_uscsi_iodone, /* Index: 22 */ 1878 sd_pm_iodone, /* Index: 22 */ 1879 1880 /* Chain for "direct" USCSI commands (all targets) */ 1881 sd_uscsi_iodone, /* Index: 24 */ 1882 1883 /* Chain for "direct priority" USCSI commands (all targets) */ 1884 sd_uscsi_iodone, /* Index: 25 */ 1885 }; 1886 1887 1888 /* 1889 * Macros to locate the "first" function in the sd_iodone_chain[] array for 1890 * each iodone-side chain. These are located by the array index, but as the 1891 * iodone side functions are called in a decrementing-index order, the 1892 * highest index number in each chain must be specified (as these correspond 1893 * to the first function in the iodone chain that will be called by the core 1894 * at IO completion time). 1895 */ 1896 1897 #define SD_CHAIN_DISK_IODONE 2 1898 #define SD_CHAIN_DISK_IODONE_NO_PM 4 1899 #define SD_CHAIN_RMMEDIA_IODONE 8 1900 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11 1901 #define SD_CHAIN_CHKSUM_IODONE 15 1902 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18 1903 #define SD_CHAIN_USCSI_CMD_IODONE 20 1904 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22 1905 #define SD_CHAIN_DIRECT_CMD_IODONE 24 1906 #define SD_CHAIN_PRIORITY_CMD_IODONE 25 1907 1908 1909 1910 1911 /* 1912 * Array to map a layering chain index to the appropriate initpkt routine. 1913 * The redundant entries are present so that the index used for accessing 1914 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1915 * with this table as well. 1916 */ 1917 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **); 1918 1919 static sd_initpkt_t sd_initpkt_map[] = { 1920 1921 /* Chain for buf IO for disk drive targets (PM enabled) */ 1922 sd_initpkt_for_buf, /* Index: 0 */ 1923 sd_initpkt_for_buf, /* Index: 1 */ 1924 sd_initpkt_for_buf, /* Index: 2 */ 1925 1926 /* Chain for buf IO for disk drive targets (PM disabled) */ 1927 sd_initpkt_for_buf, /* Index: 3 */ 1928 sd_initpkt_for_buf, /* Index: 4 */ 1929 1930 /* Chain for buf IO for removable-media targets (PM enabled) */ 1931 sd_initpkt_for_buf, /* Index: 5 */ 1932 sd_initpkt_for_buf, /* Index: 6 */ 1933 sd_initpkt_for_buf, /* Index: 7 */ 1934 sd_initpkt_for_buf, /* Index: 8 */ 1935 1936 /* Chain for buf IO for removable-media targets (PM disabled) */ 1937 sd_initpkt_for_buf, /* Index: 9 */ 1938 sd_initpkt_for_buf, /* Index: 10 */ 1939 sd_initpkt_for_buf, /* Index: 11 */ 1940 1941 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1942 sd_initpkt_for_buf, /* Index: 12 */ 1943 sd_initpkt_for_buf, /* Index: 13 */ 1944 sd_initpkt_for_buf, /* Index: 14 */ 1945 sd_initpkt_for_buf, /* Index: 15 */ 1946 1947 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1948 sd_initpkt_for_buf, /* Index: 16 */ 1949 sd_initpkt_for_buf, /* Index: 17 */ 1950 sd_initpkt_for_buf, /* Index: 18 */ 1951 1952 /* Chain for USCSI commands (non-checksum targets) */ 1953 sd_initpkt_for_uscsi, /* Index: 19 */ 1954 sd_initpkt_for_uscsi, /* Index: 20 */ 1955 1956 /* Chain for USCSI commands (checksum targets) */ 1957 sd_initpkt_for_uscsi, /* Index: 21 */ 1958 sd_initpkt_for_uscsi, /* Index: 22 */ 1959 sd_initpkt_for_uscsi, /* Index: 22 */ 1960 1961 /* Chain for "direct" USCSI commands (all targets) */ 1962 sd_initpkt_for_uscsi, /* Index: 24 */ 1963 1964 /* Chain for "direct priority" USCSI commands (all targets) */ 1965 sd_initpkt_for_uscsi, /* Index: 25 */ 1966 1967 }; 1968 1969 1970 /* 1971 * Array to map a layering chain index to the appropriate destroypktpkt routine. 1972 * The redundant entries are present so that the index used for accessing 1973 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1974 * with this table as well. 1975 */ 1976 typedef void (*sd_destroypkt_t)(struct buf *); 1977 1978 static sd_destroypkt_t sd_destroypkt_map[] = { 1979 1980 /* Chain for buf IO for disk drive targets (PM enabled) */ 1981 sd_destroypkt_for_buf, /* Index: 0 */ 1982 sd_destroypkt_for_buf, /* Index: 1 */ 1983 sd_destroypkt_for_buf, /* Index: 2 */ 1984 1985 /* Chain for buf IO for disk drive targets (PM disabled) */ 1986 sd_destroypkt_for_buf, /* Index: 3 */ 1987 sd_destroypkt_for_buf, /* Index: 4 */ 1988 1989 /* Chain for buf IO for removable-media targets (PM enabled) */ 1990 sd_destroypkt_for_buf, /* Index: 5 */ 1991 sd_destroypkt_for_buf, /* Index: 6 */ 1992 sd_destroypkt_for_buf, /* Index: 7 */ 1993 sd_destroypkt_for_buf, /* Index: 8 */ 1994 1995 /* Chain for buf IO for removable-media targets (PM disabled) */ 1996 sd_destroypkt_for_buf, /* Index: 9 */ 1997 sd_destroypkt_for_buf, /* Index: 10 */ 1998 sd_destroypkt_for_buf, /* Index: 11 */ 1999 2000 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2001 sd_destroypkt_for_buf, /* Index: 12 */ 2002 sd_destroypkt_for_buf, /* Index: 13 */ 2003 sd_destroypkt_for_buf, /* Index: 14 */ 2004 sd_destroypkt_for_buf, /* Index: 15 */ 2005 2006 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2007 sd_destroypkt_for_buf, /* Index: 16 */ 2008 sd_destroypkt_for_buf, /* Index: 17 */ 2009 sd_destroypkt_for_buf, /* Index: 18 */ 2010 2011 /* Chain for USCSI commands (non-checksum targets) */ 2012 sd_destroypkt_for_uscsi, /* Index: 19 */ 2013 sd_destroypkt_for_uscsi, /* Index: 20 */ 2014 2015 /* Chain for USCSI commands (checksum targets) */ 2016 sd_destroypkt_for_uscsi, /* Index: 21 */ 2017 sd_destroypkt_for_uscsi, /* Index: 22 */ 2018 sd_destroypkt_for_uscsi, /* Index: 22 */ 2019 2020 /* Chain for "direct" USCSI commands (all targets) */ 2021 sd_destroypkt_for_uscsi, /* Index: 24 */ 2022 2023 /* Chain for "direct priority" USCSI commands (all targets) */ 2024 sd_destroypkt_for_uscsi, /* Index: 25 */ 2025 2026 }; 2027 2028 2029 2030 /* 2031 * Array to map a layering chain index to the appropriate chain "type". 2032 * The chain type indicates a specific property/usage of the chain. 2033 * The redundant entries are present so that the index used for accessing 2034 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 2035 * with this table as well. 2036 */ 2037 2038 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */ 2039 #define SD_CHAIN_BUFIO 1 /* regular buf IO */ 2040 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */ 2041 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */ 2042 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */ 2043 /* (for error recovery) */ 2044 2045 static int sd_chain_type_map[] = { 2046 2047 /* Chain for buf IO for disk drive targets (PM enabled) */ 2048 SD_CHAIN_BUFIO, /* Index: 0 */ 2049 SD_CHAIN_BUFIO, /* Index: 1 */ 2050 SD_CHAIN_BUFIO, /* Index: 2 */ 2051 2052 /* Chain for buf IO for disk drive targets (PM disabled) */ 2053 SD_CHAIN_BUFIO, /* Index: 3 */ 2054 SD_CHAIN_BUFIO, /* Index: 4 */ 2055 2056 /* Chain for buf IO for removable-media targets (PM enabled) */ 2057 SD_CHAIN_BUFIO, /* Index: 5 */ 2058 SD_CHAIN_BUFIO, /* Index: 6 */ 2059 SD_CHAIN_BUFIO, /* Index: 7 */ 2060 SD_CHAIN_BUFIO, /* Index: 8 */ 2061 2062 /* Chain for buf IO for removable-media targets (PM disabled) */ 2063 SD_CHAIN_BUFIO, /* Index: 9 */ 2064 SD_CHAIN_BUFIO, /* Index: 10 */ 2065 SD_CHAIN_BUFIO, /* Index: 11 */ 2066 2067 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2068 SD_CHAIN_BUFIO, /* Index: 12 */ 2069 SD_CHAIN_BUFIO, /* Index: 13 */ 2070 SD_CHAIN_BUFIO, /* Index: 14 */ 2071 SD_CHAIN_BUFIO, /* Index: 15 */ 2072 2073 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2074 SD_CHAIN_BUFIO, /* Index: 16 */ 2075 SD_CHAIN_BUFIO, /* Index: 17 */ 2076 SD_CHAIN_BUFIO, /* Index: 18 */ 2077 2078 /* Chain for USCSI commands (non-checksum targets) */ 2079 SD_CHAIN_USCSI, /* Index: 19 */ 2080 SD_CHAIN_USCSI, /* Index: 20 */ 2081 2082 /* Chain for USCSI commands (checksum targets) */ 2083 SD_CHAIN_USCSI, /* Index: 21 */ 2084 SD_CHAIN_USCSI, /* Index: 22 */ 2085 SD_CHAIN_USCSI, /* Index: 22 */ 2086 2087 /* Chain for "direct" USCSI commands (all targets) */ 2088 SD_CHAIN_DIRECT, /* Index: 24 */ 2089 2090 /* Chain for "direct priority" USCSI commands (all targets) */ 2091 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */ 2092 }; 2093 2094 2095 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */ 2096 #define SD_IS_BUFIO(xp) \ 2097 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO) 2098 2099 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */ 2100 #define SD_IS_DIRECT_PRIORITY(xp) \ 2101 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY) 2102 2103 2104 2105 /* 2106 * Struct, array, and macros to map a specific chain to the appropriate 2107 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays. 2108 * 2109 * The sd_chain_index_map[] array is used at attach time to set the various 2110 * un_xxx_chain type members of the sd_lun softstate to the specific layering 2111 * chain to be used with the instance. This allows different instances to use 2112 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart 2113 * and xb_chain_iodone index values in the sd_xbuf are initialized to these 2114 * values at sd_xbuf init time, this allows (1) layering chains may be changed 2115 * dynamically & without the use of locking; and (2) a layer may update the 2116 * xb_chain_io[start|done] member in a given xbuf with its current index value, 2117 * to allow for deferred processing of an IO within the same chain from a 2118 * different execution context. 2119 */ 2120 2121 struct sd_chain_index { 2122 int sci_iostart_index; 2123 int sci_iodone_index; 2124 }; 2125 2126 static struct sd_chain_index sd_chain_index_map[] = { 2127 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE }, 2128 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM }, 2129 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE }, 2130 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM }, 2131 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE }, 2132 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM }, 2133 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE }, 2134 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE }, 2135 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE }, 2136 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE }, 2137 }; 2138 2139 2140 /* 2141 * The following are indexes into the sd_chain_index_map[] array. 2142 */ 2143 2144 /* un->un_buf_chain_type must be set to one of these */ 2145 #define SD_CHAIN_INFO_DISK 0 2146 #define SD_CHAIN_INFO_DISK_NO_PM 1 2147 #define SD_CHAIN_INFO_RMMEDIA 2 2148 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3 2149 #define SD_CHAIN_INFO_CHKSUM 4 2150 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5 2151 2152 /* un->un_uscsi_chain_type must be set to one of these */ 2153 #define SD_CHAIN_INFO_USCSI_CMD 6 2154 /* USCSI with PM disabled is the same as DIRECT */ 2155 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8 2156 #define SD_CHAIN_INFO_USCSI_CHKSUM 7 2157 2158 /* un->un_direct_chain_type must be set to one of these */ 2159 #define SD_CHAIN_INFO_DIRECT_CMD 8 2160 2161 /* un->un_priority_chain_type must be set to one of these */ 2162 #define SD_CHAIN_INFO_PRIORITY_CMD 9 2163 2164 /* size for devid inquiries */ 2165 #define MAX_INQUIRY_SIZE 0xF0 2166 2167 /* 2168 * Macros used by functions to pass a given buf(9S) struct along to the 2169 * next function in the layering chain for further processing. 2170 * 2171 * In the following macros, passing more than three arguments to the called 2172 * routines causes the optimizer for the SPARC compiler to stop doing tail 2173 * call elimination which results in significant performance degradation. 2174 */ 2175 #define SD_BEGIN_IOSTART(index, un, bp) \ 2176 ((*(sd_iostart_chain[index]))(index, un, bp)) 2177 2178 #define SD_BEGIN_IODONE(index, un, bp) \ 2179 ((*(sd_iodone_chain[index]))(index, un, bp)) 2180 2181 #define SD_NEXT_IOSTART(index, un, bp) \ 2182 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp)) 2183 2184 #define SD_NEXT_IODONE(index, un, bp) \ 2185 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp)) 2186 2187 /* 2188 * Function: _init 2189 * 2190 * Description: This is the driver _init(9E) entry point. 2191 * 2192 * Return Code: Returns the value from mod_install(9F) or 2193 * ddi_soft_state_init(9F) as appropriate. 2194 * 2195 * Context: Called when driver module loaded. 2196 */ 2197 2198 int 2199 _init(void) 2200 { 2201 int err; 2202 2203 /* establish driver name from module name */ 2204 sd_label = (char *)mod_modname(&modlinkage); 2205 2206 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun), 2207 SD_MAXUNIT); 2208 2209 if (err != 0) { 2210 return (err); 2211 } 2212 2213 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL); 2214 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL); 2215 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL); 2216 2217 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL); 2218 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL); 2219 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL); 2220 2221 /* 2222 * it's ok to init here even for fibre device 2223 */ 2224 sd_scsi_probe_cache_init(); 2225 2226 sd_scsi_target_lun_init(); 2227 2228 /* 2229 * Creating taskq before mod_install ensures that all callers (threads) 2230 * that enter the module after a successful mod_install encounter 2231 * a valid taskq. 2232 */ 2233 sd_taskq_create(); 2234 2235 err = mod_install(&modlinkage); 2236 if (err != 0) { 2237 /* delete taskq if install fails */ 2238 sd_taskq_delete(); 2239 2240 mutex_destroy(&sd_detach_mutex); 2241 mutex_destroy(&sd_log_mutex); 2242 mutex_destroy(&sd_label_mutex); 2243 2244 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2245 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2246 cv_destroy(&sd_tr.srq_inprocess_cv); 2247 2248 sd_scsi_probe_cache_fini(); 2249 2250 sd_scsi_target_lun_fini(); 2251 2252 ddi_soft_state_fini(&sd_state); 2253 return (err); 2254 } 2255 2256 return (err); 2257 } 2258 2259 2260 /* 2261 * Function: _fini 2262 * 2263 * Description: This is the driver _fini(9E) entry point. 2264 * 2265 * Return Code: Returns the value from mod_remove(9F) 2266 * 2267 * Context: Called when driver module is unloaded. 2268 */ 2269 2270 int 2271 _fini(void) 2272 { 2273 int err; 2274 2275 if ((err = mod_remove(&modlinkage)) != 0) { 2276 return (err); 2277 } 2278 2279 sd_taskq_delete(); 2280 2281 mutex_destroy(&sd_detach_mutex); 2282 mutex_destroy(&sd_log_mutex); 2283 mutex_destroy(&sd_label_mutex); 2284 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2285 2286 sd_scsi_probe_cache_fini(); 2287 2288 sd_scsi_target_lun_fini(); 2289 2290 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2291 cv_destroy(&sd_tr.srq_inprocess_cv); 2292 2293 ddi_soft_state_fini(&sd_state); 2294 2295 return (err); 2296 } 2297 2298 2299 /* 2300 * Function: _info 2301 * 2302 * Description: This is the driver _info(9E) entry point. 2303 * 2304 * Arguments: modinfop - pointer to the driver modinfo structure 2305 * 2306 * Return Code: Returns the value from mod_info(9F). 2307 * 2308 * Context: Kernel thread context 2309 */ 2310 2311 int 2312 _info(struct modinfo *modinfop) 2313 { 2314 return (mod_info(&modlinkage, modinfop)); 2315 } 2316 2317 2318 /* 2319 * The following routines implement the driver message logging facility. 2320 * They provide component- and level- based debug output filtering. 2321 * Output may also be restricted to messages for a single instance by 2322 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set 2323 * to NULL, then messages for all instances are printed. 2324 * 2325 * These routines have been cloned from each other due to the language 2326 * constraints of macros and variable argument list processing. 2327 */ 2328 2329 2330 /* 2331 * Function: sd_log_err 2332 * 2333 * Description: This routine is called by the SD_ERROR macro for debug 2334 * logging of error conditions. 2335 * 2336 * Arguments: comp - driver component being logged 2337 * dev - pointer to driver info structure 2338 * fmt - error string and format to be logged 2339 */ 2340 2341 static void 2342 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...) 2343 { 2344 va_list ap; 2345 dev_info_t *dev; 2346 2347 ASSERT(un != NULL); 2348 dev = SD_DEVINFO(un); 2349 ASSERT(dev != NULL); 2350 2351 /* 2352 * Filter messages based on the global component and level masks. 2353 * Also print if un matches the value of sd_debug_un, or if 2354 * sd_debug_un is set to NULL. 2355 */ 2356 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) && 2357 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2358 mutex_enter(&sd_log_mutex); 2359 va_start(ap, fmt); 2360 (void) vsprintf(sd_log_buf, fmt, ap); 2361 va_end(ap); 2362 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2363 mutex_exit(&sd_log_mutex); 2364 } 2365 #ifdef SD_FAULT_INJECTION 2366 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2367 if (un->sd_injection_mask & comp) { 2368 mutex_enter(&sd_log_mutex); 2369 va_start(ap, fmt); 2370 (void) vsprintf(sd_log_buf, fmt, ap); 2371 va_end(ap); 2372 sd_injection_log(sd_log_buf, un); 2373 mutex_exit(&sd_log_mutex); 2374 } 2375 #endif 2376 } 2377 2378 2379 /* 2380 * Function: sd_log_info 2381 * 2382 * Description: This routine is called by the SD_INFO macro for debug 2383 * logging of general purpose informational conditions. 2384 * 2385 * Arguments: comp - driver component being logged 2386 * dev - pointer to driver info structure 2387 * fmt - info string and format to be logged 2388 */ 2389 2390 static void 2391 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...) 2392 { 2393 va_list ap; 2394 dev_info_t *dev; 2395 2396 ASSERT(un != NULL); 2397 dev = SD_DEVINFO(un); 2398 ASSERT(dev != NULL); 2399 2400 /* 2401 * Filter messages based on the global component and level masks. 2402 * Also print if un matches the value of sd_debug_un, or if 2403 * sd_debug_un is set to NULL. 2404 */ 2405 if ((sd_component_mask & component) && 2406 (sd_level_mask & SD_LOGMASK_INFO) && 2407 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2408 mutex_enter(&sd_log_mutex); 2409 va_start(ap, fmt); 2410 (void) vsprintf(sd_log_buf, fmt, ap); 2411 va_end(ap); 2412 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2413 mutex_exit(&sd_log_mutex); 2414 } 2415 #ifdef SD_FAULT_INJECTION 2416 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2417 if (un->sd_injection_mask & component) { 2418 mutex_enter(&sd_log_mutex); 2419 va_start(ap, fmt); 2420 (void) vsprintf(sd_log_buf, fmt, ap); 2421 va_end(ap); 2422 sd_injection_log(sd_log_buf, un); 2423 mutex_exit(&sd_log_mutex); 2424 } 2425 #endif 2426 } 2427 2428 2429 /* 2430 * Function: sd_log_trace 2431 * 2432 * Description: This routine is called by the SD_TRACE macro for debug 2433 * logging of trace conditions (i.e. function entry/exit). 2434 * 2435 * Arguments: comp - driver component being logged 2436 * dev - pointer to driver info structure 2437 * fmt - trace string and format to be logged 2438 */ 2439 2440 static void 2441 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...) 2442 { 2443 va_list ap; 2444 dev_info_t *dev; 2445 2446 ASSERT(un != NULL); 2447 dev = SD_DEVINFO(un); 2448 ASSERT(dev != NULL); 2449 2450 /* 2451 * Filter messages based on the global component and level masks. 2452 * Also print if un matches the value of sd_debug_un, or if 2453 * sd_debug_un is set to NULL. 2454 */ 2455 if ((sd_component_mask & component) && 2456 (sd_level_mask & SD_LOGMASK_TRACE) && 2457 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2458 mutex_enter(&sd_log_mutex); 2459 va_start(ap, fmt); 2460 (void) vsprintf(sd_log_buf, fmt, ap); 2461 va_end(ap); 2462 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2463 mutex_exit(&sd_log_mutex); 2464 } 2465 #ifdef SD_FAULT_INJECTION 2466 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2467 if (un->sd_injection_mask & component) { 2468 mutex_enter(&sd_log_mutex); 2469 va_start(ap, fmt); 2470 (void) vsprintf(sd_log_buf, fmt, ap); 2471 va_end(ap); 2472 sd_injection_log(sd_log_buf, un); 2473 mutex_exit(&sd_log_mutex); 2474 } 2475 #endif 2476 } 2477 2478 2479 /* 2480 * Function: sdprobe 2481 * 2482 * Description: This is the driver probe(9e) entry point function. 2483 * 2484 * Arguments: devi - opaque device info handle 2485 * 2486 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful. 2487 * DDI_PROBE_FAILURE: If the probe failed. 2488 * DDI_PROBE_PARTIAL: If the instance is not present now, 2489 * but may be present in the future. 2490 */ 2491 2492 static int 2493 sdprobe(dev_info_t *devi) 2494 { 2495 struct scsi_device *devp; 2496 int rval; 2497 int instance; 2498 2499 /* 2500 * if it wasn't for pln, sdprobe could actually be nulldev 2501 * in the "__fibre" case. 2502 */ 2503 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) { 2504 return (DDI_PROBE_DONTCARE); 2505 } 2506 2507 devp = ddi_get_driver_private(devi); 2508 2509 if (devp == NULL) { 2510 /* Ooops... nexus driver is mis-configured... */ 2511 return (DDI_PROBE_FAILURE); 2512 } 2513 2514 instance = ddi_get_instance(devi); 2515 2516 if (ddi_get_soft_state(sd_state, instance) != NULL) { 2517 return (DDI_PROBE_PARTIAL); 2518 } 2519 2520 /* 2521 * Call the SCSA utility probe routine to see if we actually 2522 * have a target at this SCSI nexus. 2523 */ 2524 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) { 2525 case SCSIPROBE_EXISTS: 2526 switch (devp->sd_inq->inq_dtype) { 2527 case DTYPE_DIRECT: 2528 rval = DDI_PROBE_SUCCESS; 2529 break; 2530 case DTYPE_RODIRECT: 2531 /* CDs etc. Can be removable media */ 2532 rval = DDI_PROBE_SUCCESS; 2533 break; 2534 case DTYPE_OPTICAL: 2535 /* 2536 * Rewritable optical driver HP115AA 2537 * Can also be removable media 2538 */ 2539 2540 /* 2541 * Do not attempt to bind to DTYPE_OPTICAL if 2542 * pre solaris 9 sparc sd behavior is required 2543 * 2544 * If first time through and sd_dtype_optical_bind 2545 * has not been set in /etc/system check properties 2546 */ 2547 2548 if (sd_dtype_optical_bind < 0) { 2549 sd_dtype_optical_bind = ddi_prop_get_int 2550 (DDI_DEV_T_ANY, devi, 0, 2551 "optical-device-bind", 1); 2552 } 2553 2554 if (sd_dtype_optical_bind == 0) { 2555 rval = DDI_PROBE_FAILURE; 2556 } else { 2557 rval = DDI_PROBE_SUCCESS; 2558 } 2559 break; 2560 2561 case DTYPE_NOTPRESENT: 2562 default: 2563 rval = DDI_PROBE_FAILURE; 2564 break; 2565 } 2566 break; 2567 default: 2568 rval = DDI_PROBE_PARTIAL; 2569 break; 2570 } 2571 2572 /* 2573 * This routine checks for resource allocation prior to freeing, 2574 * so it will take care of the "smart probing" case where a 2575 * scsi_probe() may or may not have been issued and will *not* 2576 * free previously-freed resources. 2577 */ 2578 scsi_unprobe(devp); 2579 return (rval); 2580 } 2581 2582 2583 /* 2584 * Function: sdinfo 2585 * 2586 * Description: This is the driver getinfo(9e) entry point function. 2587 * Given the device number, return the devinfo pointer from 2588 * the scsi_device structure or the instance number 2589 * associated with the dev_t. 2590 * 2591 * Arguments: dip - pointer to device info structure 2592 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO, 2593 * DDI_INFO_DEVT2INSTANCE) 2594 * arg - driver dev_t 2595 * resultp - user buffer for request response 2596 * 2597 * Return Code: DDI_SUCCESS 2598 * DDI_FAILURE 2599 */ 2600 /* ARGSUSED */ 2601 static int 2602 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2603 { 2604 struct sd_lun *un; 2605 dev_t dev; 2606 int instance; 2607 int error; 2608 2609 switch (infocmd) { 2610 case DDI_INFO_DEVT2DEVINFO: 2611 dev = (dev_t)arg; 2612 instance = SDUNIT(dev); 2613 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 2614 return (DDI_FAILURE); 2615 } 2616 *result = (void *) SD_DEVINFO(un); 2617 error = DDI_SUCCESS; 2618 break; 2619 case DDI_INFO_DEVT2INSTANCE: 2620 dev = (dev_t)arg; 2621 instance = SDUNIT(dev); 2622 *result = (void *)(uintptr_t)instance; 2623 error = DDI_SUCCESS; 2624 break; 2625 default: 2626 error = DDI_FAILURE; 2627 } 2628 return (error); 2629 } 2630 2631 /* 2632 * Function: sd_prop_op 2633 * 2634 * Description: This is the driver prop_op(9e) entry point function. 2635 * Return the number of blocks for the partition in question 2636 * or forward the request to the property facilities. 2637 * 2638 * Arguments: dev - device number 2639 * dip - pointer to device info structure 2640 * prop_op - property operator 2641 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent 2642 * name - pointer to property name 2643 * valuep - pointer or address of the user buffer 2644 * lengthp - property length 2645 * 2646 * Return Code: DDI_PROP_SUCCESS 2647 * DDI_PROP_NOT_FOUND 2648 * DDI_PROP_UNDEFINED 2649 * DDI_PROP_NO_MEMORY 2650 * DDI_PROP_BUF_TOO_SMALL 2651 */ 2652 2653 static int 2654 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 2655 char *name, caddr_t valuep, int *lengthp) 2656 { 2657 struct sd_lun *un; 2658 2659 if ((un = ddi_get_soft_state(sd_state, ddi_get_instance(dip))) == NULL) 2660 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 2661 name, valuep, lengthp)); 2662 2663 return (cmlb_prop_op(un->un_cmlbhandle, 2664 dev, dip, prop_op, mod_flags, name, valuep, lengthp, 2665 SDPART(dev), (void *)SD_PATH_DIRECT)); 2666 } 2667 2668 /* 2669 * The following functions are for smart probing: 2670 * sd_scsi_probe_cache_init() 2671 * sd_scsi_probe_cache_fini() 2672 * sd_scsi_clear_probe_cache() 2673 * sd_scsi_probe_with_cache() 2674 */ 2675 2676 /* 2677 * Function: sd_scsi_probe_cache_init 2678 * 2679 * Description: Initializes the probe response cache mutex and head pointer. 2680 * 2681 * Context: Kernel thread context 2682 */ 2683 2684 static void 2685 sd_scsi_probe_cache_init(void) 2686 { 2687 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL); 2688 sd_scsi_probe_cache_head = NULL; 2689 } 2690 2691 2692 /* 2693 * Function: sd_scsi_probe_cache_fini 2694 * 2695 * Description: Frees all resources associated with the probe response cache. 2696 * 2697 * Context: Kernel thread context 2698 */ 2699 2700 static void 2701 sd_scsi_probe_cache_fini(void) 2702 { 2703 struct sd_scsi_probe_cache *cp; 2704 struct sd_scsi_probe_cache *ncp; 2705 2706 /* Clean up our smart probing linked list */ 2707 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) { 2708 ncp = cp->next; 2709 kmem_free(cp, sizeof (struct sd_scsi_probe_cache)); 2710 } 2711 sd_scsi_probe_cache_head = NULL; 2712 mutex_destroy(&sd_scsi_probe_cache_mutex); 2713 } 2714 2715 2716 /* 2717 * Function: sd_scsi_clear_probe_cache 2718 * 2719 * Description: This routine clears the probe response cache. This is 2720 * done when open() returns ENXIO so that when deferred 2721 * attach is attempted (possibly after a device has been 2722 * turned on) we will retry the probe. Since we don't know 2723 * which target we failed to open, we just clear the 2724 * entire cache. 2725 * 2726 * Context: Kernel thread context 2727 */ 2728 2729 static void 2730 sd_scsi_clear_probe_cache(void) 2731 { 2732 struct sd_scsi_probe_cache *cp; 2733 int i; 2734 2735 mutex_enter(&sd_scsi_probe_cache_mutex); 2736 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2737 /* 2738 * Reset all entries to SCSIPROBE_EXISTS. This will 2739 * force probing to be performed the next time 2740 * sd_scsi_probe_with_cache is called. 2741 */ 2742 for (i = 0; i < NTARGETS_WIDE; i++) { 2743 cp->cache[i] = SCSIPROBE_EXISTS; 2744 } 2745 } 2746 mutex_exit(&sd_scsi_probe_cache_mutex); 2747 } 2748 2749 2750 /* 2751 * Function: sd_scsi_probe_with_cache 2752 * 2753 * Description: This routine implements support for a scsi device probe 2754 * with cache. The driver maintains a cache of the target 2755 * responses to scsi probes. If we get no response from a 2756 * target during a probe inquiry, we remember that, and we 2757 * avoid additional calls to scsi_probe on non-zero LUNs 2758 * on the same target until the cache is cleared. By doing 2759 * so we avoid the 1/4 sec selection timeout for nonzero 2760 * LUNs. lun0 of a target is always probed. 2761 * 2762 * Arguments: devp - Pointer to a scsi_device(9S) structure 2763 * waitfunc - indicates what the allocator routines should 2764 * do when resources are not available. This value 2765 * is passed on to scsi_probe() when that routine 2766 * is called. 2767 * 2768 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache; 2769 * otherwise the value returned by scsi_probe(9F). 2770 * 2771 * Context: Kernel thread context 2772 */ 2773 2774 static int 2775 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)()) 2776 { 2777 struct sd_scsi_probe_cache *cp; 2778 dev_info_t *pdip = ddi_get_parent(devp->sd_dev); 2779 int lun, tgt; 2780 2781 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2782 SCSI_ADDR_PROP_LUN, 0); 2783 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2784 SCSI_ADDR_PROP_TARGET, -1); 2785 2786 /* Make sure caching enabled and target in range */ 2787 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) { 2788 /* do it the old way (no cache) */ 2789 return (scsi_probe(devp, waitfn)); 2790 } 2791 2792 mutex_enter(&sd_scsi_probe_cache_mutex); 2793 2794 /* Find the cache for this scsi bus instance */ 2795 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2796 if (cp->pdip == pdip) { 2797 break; 2798 } 2799 } 2800 2801 /* If we can't find a cache for this pdip, create one */ 2802 if (cp == NULL) { 2803 int i; 2804 2805 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache), 2806 KM_SLEEP); 2807 cp->pdip = pdip; 2808 cp->next = sd_scsi_probe_cache_head; 2809 sd_scsi_probe_cache_head = cp; 2810 for (i = 0; i < NTARGETS_WIDE; i++) { 2811 cp->cache[i] = SCSIPROBE_EXISTS; 2812 } 2813 } 2814 2815 mutex_exit(&sd_scsi_probe_cache_mutex); 2816 2817 /* Recompute the cache for this target if LUN zero */ 2818 if (lun == 0) { 2819 cp->cache[tgt] = SCSIPROBE_EXISTS; 2820 } 2821 2822 /* Don't probe if cache remembers a NORESP from a previous LUN. */ 2823 if (cp->cache[tgt] != SCSIPROBE_EXISTS) { 2824 return (SCSIPROBE_NORESP); 2825 } 2826 2827 /* Do the actual probe; save & return the result */ 2828 return (cp->cache[tgt] = scsi_probe(devp, waitfn)); 2829 } 2830 2831 2832 /* 2833 * Function: sd_scsi_target_lun_init 2834 * 2835 * Description: Initializes the attached lun chain mutex and head pointer. 2836 * 2837 * Context: Kernel thread context 2838 */ 2839 2840 static void 2841 sd_scsi_target_lun_init(void) 2842 { 2843 mutex_init(&sd_scsi_target_lun_mutex, NULL, MUTEX_DRIVER, NULL); 2844 sd_scsi_target_lun_head = NULL; 2845 } 2846 2847 2848 /* 2849 * Function: sd_scsi_target_lun_fini 2850 * 2851 * Description: Frees all resources associated with the attached lun 2852 * chain 2853 * 2854 * Context: Kernel thread context 2855 */ 2856 2857 static void 2858 sd_scsi_target_lun_fini(void) 2859 { 2860 struct sd_scsi_hba_tgt_lun *cp; 2861 struct sd_scsi_hba_tgt_lun *ncp; 2862 2863 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = ncp) { 2864 ncp = cp->next; 2865 kmem_free(cp, sizeof (struct sd_scsi_hba_tgt_lun)); 2866 } 2867 sd_scsi_target_lun_head = NULL; 2868 mutex_destroy(&sd_scsi_target_lun_mutex); 2869 } 2870 2871 2872 /* 2873 * Function: sd_scsi_get_target_lun_count 2874 * 2875 * Description: This routine will check in the attached lun chain to see 2876 * how many luns are attached on the required SCSI controller 2877 * and target. Currently, some capabilities like tagged queue 2878 * are supported per target based by HBA. So all luns in a 2879 * target have the same capabilities. Based on this assumption, 2880 * sd should only set these capabilities once per target. This 2881 * function is called when sd needs to decide how many luns 2882 * already attached on a target. 2883 * 2884 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2885 * controller device. 2886 * target - The target ID on the controller's SCSI bus. 2887 * 2888 * Return Code: The number of luns attached on the required target and 2889 * controller. 2890 * -1 if target ID is not in parallel SCSI scope or the given 2891 * dip is not in the chain. 2892 * 2893 * Context: Kernel thread context 2894 */ 2895 2896 static int 2897 sd_scsi_get_target_lun_count(dev_info_t *dip, int target) 2898 { 2899 struct sd_scsi_hba_tgt_lun *cp; 2900 2901 if ((target < 0) || (target >= NTARGETS_WIDE)) { 2902 return (-1); 2903 } 2904 2905 mutex_enter(&sd_scsi_target_lun_mutex); 2906 2907 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2908 if (cp->pdip == dip) { 2909 break; 2910 } 2911 } 2912 2913 mutex_exit(&sd_scsi_target_lun_mutex); 2914 2915 if (cp == NULL) { 2916 return (-1); 2917 } 2918 2919 return (cp->nlun[target]); 2920 } 2921 2922 2923 /* 2924 * Function: sd_scsi_update_lun_on_target 2925 * 2926 * Description: This routine is used to update the attached lun chain when a 2927 * lun is attached or detached on a target. 2928 * 2929 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2930 * controller device. 2931 * target - The target ID on the controller's SCSI bus. 2932 * flag - Indicate the lun is attached or detached. 2933 * 2934 * Context: Kernel thread context 2935 */ 2936 2937 static void 2938 sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag) 2939 { 2940 struct sd_scsi_hba_tgt_lun *cp; 2941 2942 mutex_enter(&sd_scsi_target_lun_mutex); 2943 2944 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2945 if (cp->pdip == dip) { 2946 break; 2947 } 2948 } 2949 2950 if ((cp == NULL) && (flag == SD_SCSI_LUN_ATTACH)) { 2951 cp = kmem_zalloc(sizeof (struct sd_scsi_hba_tgt_lun), 2952 KM_SLEEP); 2953 cp->pdip = dip; 2954 cp->next = sd_scsi_target_lun_head; 2955 sd_scsi_target_lun_head = cp; 2956 } 2957 2958 mutex_exit(&sd_scsi_target_lun_mutex); 2959 2960 if (cp != NULL) { 2961 if (flag == SD_SCSI_LUN_ATTACH) { 2962 cp->nlun[target] ++; 2963 } else { 2964 cp->nlun[target] --; 2965 } 2966 } 2967 } 2968 2969 2970 /* 2971 * Function: sd_spin_up_unit 2972 * 2973 * Description: Issues the following commands to spin-up the device: 2974 * START STOP UNIT, and INQUIRY. 2975 * 2976 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 2977 * structure for this target. 2978 * 2979 * Return Code: 0 - success 2980 * EIO - failure 2981 * EACCES - reservation conflict 2982 * 2983 * Context: Kernel thread context 2984 */ 2985 2986 static int 2987 sd_spin_up_unit(sd_ssc_t *ssc) 2988 { 2989 size_t resid = 0; 2990 int has_conflict = FALSE; 2991 uchar_t *bufaddr; 2992 int status; 2993 struct sd_lun *un; 2994 2995 ASSERT(ssc != NULL); 2996 un = ssc->ssc_un; 2997 ASSERT(un != NULL); 2998 2999 /* 3000 * Send a throwaway START UNIT command. 3001 * 3002 * If we fail on this, we don't care presently what precisely 3003 * is wrong. EMC's arrays will also fail this with a check 3004 * condition (0x2/0x4/0x3) if the device is "inactive," but 3005 * we don't want to fail the attach because it may become 3006 * "active" later. 3007 */ 3008 status = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_START, 3009 SD_PATH_DIRECT); 3010 3011 if (status != 0) { 3012 if (status == EACCES) 3013 has_conflict = TRUE; 3014 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3015 } 3016 3017 /* 3018 * Send another INQUIRY command to the target. This is necessary for 3019 * non-removable media direct access devices because their INQUIRY data 3020 * may not be fully qualified until they are spun up (perhaps via the 3021 * START command above). Note: This seems to be needed for some 3022 * legacy devices only.) The INQUIRY command should succeed even if a 3023 * Reservation Conflict is present. 3024 */ 3025 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP); 3026 3027 if (sd_send_scsi_INQUIRY(ssc, bufaddr, SUN_INQSIZE, 0, 0, &resid) 3028 != 0) { 3029 kmem_free(bufaddr, SUN_INQSIZE); 3030 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 3031 return (EIO); 3032 } 3033 3034 /* 3035 * If we got enough INQUIRY data, copy it over the old INQUIRY data. 3036 * Note that this routine does not return a failure here even if the 3037 * INQUIRY command did not return any data. This is a legacy behavior. 3038 */ 3039 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) { 3040 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE); 3041 } 3042 3043 kmem_free(bufaddr, SUN_INQSIZE); 3044 3045 /* If we hit a reservation conflict above, tell the caller. */ 3046 if (has_conflict == TRUE) { 3047 return (EACCES); 3048 } 3049 3050 return (0); 3051 } 3052 3053 #ifdef _LP64 3054 /* 3055 * Function: sd_enable_descr_sense 3056 * 3057 * Description: This routine attempts to select descriptor sense format 3058 * using the Control mode page. Devices that support 64 bit 3059 * LBAs (for >2TB luns) should also implement descriptor 3060 * sense data so we will call this function whenever we see 3061 * a lun larger than 2TB. If for some reason the device 3062 * supports 64 bit LBAs but doesn't support descriptor sense 3063 * presumably the mode select will fail. Everything will 3064 * continue to work normally except that we will not get 3065 * complete sense data for commands that fail with an LBA 3066 * larger than 32 bits. 3067 * 3068 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3069 * structure for this target. 3070 * 3071 * Context: Kernel thread context only 3072 */ 3073 3074 static void 3075 sd_enable_descr_sense(sd_ssc_t *ssc) 3076 { 3077 uchar_t *header; 3078 struct mode_control_scsi3 *ctrl_bufp; 3079 size_t buflen; 3080 size_t bd_len; 3081 int status; 3082 struct sd_lun *un; 3083 3084 ASSERT(ssc != NULL); 3085 un = ssc->ssc_un; 3086 ASSERT(un != NULL); 3087 3088 /* 3089 * Read MODE SENSE page 0xA, Control Mode Page 3090 */ 3091 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH + 3092 sizeof (struct mode_control_scsi3); 3093 header = kmem_zalloc(buflen, KM_SLEEP); 3094 3095 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 3096 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT); 3097 3098 if (status != 0) { 3099 SD_ERROR(SD_LOG_COMMON, un, 3100 "sd_enable_descr_sense: mode sense ctrl page failed\n"); 3101 goto eds_exit; 3102 } 3103 3104 /* 3105 * Determine size of Block Descriptors in order to locate 3106 * the mode page data. ATAPI devices return 0, SCSI devices 3107 * should return MODE_BLK_DESC_LENGTH. 3108 */ 3109 bd_len = ((struct mode_header *)header)->bdesc_length; 3110 3111 /* Clear the mode data length field for MODE SELECT */ 3112 ((struct mode_header *)header)->length = 0; 3113 3114 ctrl_bufp = (struct mode_control_scsi3 *) 3115 (header + MODE_HEADER_LENGTH + bd_len); 3116 3117 /* 3118 * If the page length is smaller than the expected value, 3119 * the target device doesn't support D_SENSE. Bail out here. 3120 */ 3121 if (ctrl_bufp->mode_page.length < 3122 sizeof (struct mode_control_scsi3) - 2) { 3123 SD_ERROR(SD_LOG_COMMON, un, 3124 "sd_enable_descr_sense: enable D_SENSE failed\n"); 3125 goto eds_exit; 3126 } 3127 3128 /* 3129 * Clear PS bit for MODE SELECT 3130 */ 3131 ctrl_bufp->mode_page.ps = 0; 3132 3133 /* 3134 * Set D_SENSE to enable descriptor sense format. 3135 */ 3136 ctrl_bufp->d_sense = 1; 3137 3138 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3139 3140 /* 3141 * Use MODE SELECT to commit the change to the D_SENSE bit 3142 */ 3143 status = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, header, 3144 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT); 3145 3146 if (status != 0) { 3147 SD_INFO(SD_LOG_COMMON, un, 3148 "sd_enable_descr_sense: mode select ctrl page failed\n"); 3149 } else { 3150 kmem_free(header, buflen); 3151 return; 3152 } 3153 3154 eds_exit: 3155 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3156 kmem_free(header, buflen); 3157 } 3158 3159 /* 3160 * Function: sd_reenable_dsense_task 3161 * 3162 * Description: Re-enable descriptor sense after device or bus reset 3163 * 3164 * Context: Executes in a taskq() thread context 3165 */ 3166 static void 3167 sd_reenable_dsense_task(void *arg) 3168 { 3169 struct sd_lun *un = arg; 3170 sd_ssc_t *ssc; 3171 3172 ASSERT(un != NULL); 3173 3174 ssc = sd_ssc_init(un); 3175 sd_enable_descr_sense(ssc); 3176 sd_ssc_fini(ssc); 3177 } 3178 #endif /* _LP64 */ 3179 3180 /* 3181 * Function: sd_set_mmc_caps 3182 * 3183 * Description: This routine determines if the device is MMC compliant and if 3184 * the device supports CDDA via a mode sense of the CDVD 3185 * capabilities mode page. Also checks if the device is a 3186 * dvdram writable device. 3187 * 3188 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3189 * structure for this target. 3190 * 3191 * Context: Kernel thread context only 3192 */ 3193 3194 static void 3195 sd_set_mmc_caps(sd_ssc_t *ssc) 3196 { 3197 struct mode_header_grp2 *sense_mhp; 3198 uchar_t *sense_page; 3199 caddr_t buf; 3200 int bd_len; 3201 int status; 3202 struct uscsi_cmd com; 3203 int rtn; 3204 uchar_t *out_data_rw, *out_data_hd; 3205 uchar_t *rqbuf_rw, *rqbuf_hd; 3206 struct sd_lun *un; 3207 3208 ASSERT(ssc != NULL); 3209 un = ssc->ssc_un; 3210 ASSERT(un != NULL); 3211 3212 /* 3213 * The flags which will be set in this function are - mmc compliant, 3214 * dvdram writable device, cdda support. Initialize them to FALSE 3215 * and if a capability is detected - it will be set to TRUE. 3216 */ 3217 un->un_f_mmc_cap = FALSE; 3218 un->un_f_dvdram_writable_device = FALSE; 3219 un->un_f_cfg_cdda = FALSE; 3220 3221 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3222 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf, 3223 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 3224 3225 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3226 3227 if (status != 0) { 3228 /* command failed; just return */ 3229 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3230 return; 3231 } 3232 /* 3233 * If the mode sense request for the CDROM CAPABILITIES 3234 * page (0x2A) succeeds the device is assumed to be MMC. 3235 */ 3236 un->un_f_mmc_cap = TRUE; 3237 3238 /* Get to the page data */ 3239 sense_mhp = (struct mode_header_grp2 *)buf; 3240 bd_len = (sense_mhp->bdesc_length_hi << 8) | 3241 sense_mhp->bdesc_length_lo; 3242 if (bd_len > MODE_BLK_DESC_LENGTH) { 3243 /* 3244 * We did not get back the expected block descriptor 3245 * length so we cannot determine if the device supports 3246 * CDDA. However, we still indicate the device is MMC 3247 * according to the successful response to the page 3248 * 0x2A mode sense request. 3249 */ 3250 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3251 "sd_set_mmc_caps: Mode Sense returned " 3252 "invalid block descriptor length\n"); 3253 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3254 return; 3255 } 3256 3257 /* See if read CDDA is supported */ 3258 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + 3259 bd_len); 3260 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE; 3261 3262 /* See if writing DVD RAM is supported. */ 3263 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE; 3264 if (un->un_f_dvdram_writable_device == TRUE) { 3265 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3266 return; 3267 } 3268 3269 /* 3270 * If the device presents DVD or CD capabilities in the mode 3271 * page, we can return here since a RRD will not have 3272 * these capabilities. 3273 */ 3274 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3275 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3276 return; 3277 } 3278 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3279 3280 /* 3281 * If un->un_f_dvdram_writable_device is still FALSE, 3282 * check for a Removable Rigid Disk (RRD). A RRD 3283 * device is identified by the features RANDOM_WRITABLE and 3284 * HARDWARE_DEFECT_MANAGEMENT. 3285 */ 3286 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3287 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3288 3289 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw, 3290 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3291 RANDOM_WRITABLE, SD_PATH_STANDARD); 3292 3293 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3294 3295 if (rtn != 0) { 3296 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3297 kmem_free(rqbuf_rw, SENSE_LENGTH); 3298 return; 3299 } 3300 3301 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3302 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3303 3304 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd, 3305 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3306 HARDWARE_DEFECT_MANAGEMENT, SD_PATH_STANDARD); 3307 3308 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3309 3310 if (rtn == 0) { 3311 /* 3312 * We have good information, check for random writable 3313 * and hardware defect features. 3314 */ 3315 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3316 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) { 3317 un->un_f_dvdram_writable_device = TRUE; 3318 } 3319 } 3320 3321 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3322 kmem_free(rqbuf_rw, SENSE_LENGTH); 3323 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3324 kmem_free(rqbuf_hd, SENSE_LENGTH); 3325 } 3326 3327 /* 3328 * Function: sd_check_for_writable_cd 3329 * 3330 * Description: This routine determines if the media in the device is 3331 * writable or not. It uses the get configuration command (0x46) 3332 * to determine if the media is writable 3333 * 3334 * Arguments: un - driver soft state (unit) structure 3335 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" 3336 * chain and the normal command waitq, or 3337 * SD_PATH_DIRECT_PRIORITY to use the USCSI 3338 * "direct" chain and bypass the normal command 3339 * waitq. 3340 * 3341 * Context: Never called at interrupt context. 3342 */ 3343 3344 static void 3345 sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag) 3346 { 3347 struct uscsi_cmd com; 3348 uchar_t *out_data; 3349 uchar_t *rqbuf; 3350 int rtn; 3351 uchar_t *out_data_rw, *out_data_hd; 3352 uchar_t *rqbuf_rw, *rqbuf_hd; 3353 struct mode_header_grp2 *sense_mhp; 3354 uchar_t *sense_page; 3355 caddr_t buf; 3356 int bd_len; 3357 int status; 3358 struct sd_lun *un; 3359 3360 ASSERT(ssc != NULL); 3361 un = ssc->ssc_un; 3362 ASSERT(un != NULL); 3363 ASSERT(mutex_owned(SD_MUTEX(un))); 3364 3365 /* 3366 * Initialize the writable media to false, if configuration info. 3367 * tells us otherwise then only we will set it. 3368 */ 3369 un->un_f_mmc_writable_media = FALSE; 3370 mutex_exit(SD_MUTEX(un)); 3371 3372 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 3373 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3374 3375 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, SENSE_LENGTH, 3376 out_data, SD_PROFILE_HEADER_LEN, path_flag); 3377 3378 if (rtn != 0) 3379 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3380 3381 mutex_enter(SD_MUTEX(un)); 3382 if (rtn == 0) { 3383 /* 3384 * We have good information, check for writable DVD. 3385 */ 3386 if ((out_data[6] == 0) && (out_data[7] == 0x12)) { 3387 un->un_f_mmc_writable_media = TRUE; 3388 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3389 kmem_free(rqbuf, SENSE_LENGTH); 3390 return; 3391 } 3392 } 3393 3394 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3395 kmem_free(rqbuf, SENSE_LENGTH); 3396 3397 /* 3398 * Determine if this is a RRD type device. 3399 */ 3400 mutex_exit(SD_MUTEX(un)); 3401 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3402 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf, 3403 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, path_flag); 3404 3405 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3406 3407 mutex_enter(SD_MUTEX(un)); 3408 if (status != 0) { 3409 /* command failed; just return */ 3410 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3411 return; 3412 } 3413 3414 /* Get to the page data */ 3415 sense_mhp = (struct mode_header_grp2 *)buf; 3416 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 3417 if (bd_len > MODE_BLK_DESC_LENGTH) { 3418 /* 3419 * We did not get back the expected block descriptor length so 3420 * we cannot check the mode page. 3421 */ 3422 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3423 "sd_check_for_writable_cd: Mode Sense returned " 3424 "invalid block descriptor length\n"); 3425 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3426 return; 3427 } 3428 3429 /* 3430 * If the device presents DVD or CD capabilities in the mode 3431 * page, we can return here since a RRD device will not have 3432 * these capabilities. 3433 */ 3434 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len); 3435 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3436 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3437 return; 3438 } 3439 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3440 3441 /* 3442 * If un->un_f_mmc_writable_media is still FALSE, 3443 * check for RRD type media. A RRD device is identified 3444 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT. 3445 */ 3446 mutex_exit(SD_MUTEX(un)); 3447 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3448 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3449 3450 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw, 3451 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3452 RANDOM_WRITABLE, path_flag); 3453 3454 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3455 if (rtn != 0) { 3456 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3457 kmem_free(rqbuf_rw, SENSE_LENGTH); 3458 mutex_enter(SD_MUTEX(un)); 3459 return; 3460 } 3461 3462 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3463 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3464 3465 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd, 3466 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3467 HARDWARE_DEFECT_MANAGEMENT, path_flag); 3468 3469 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3470 mutex_enter(SD_MUTEX(un)); 3471 if (rtn == 0) { 3472 /* 3473 * We have good information, check for random writable 3474 * and hardware defect features as current. 3475 */ 3476 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3477 (out_data_rw[10] & 0x1) && 3478 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) && 3479 (out_data_hd[10] & 0x1)) { 3480 un->un_f_mmc_writable_media = TRUE; 3481 } 3482 } 3483 3484 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3485 kmem_free(rqbuf_rw, SENSE_LENGTH); 3486 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3487 kmem_free(rqbuf_hd, SENSE_LENGTH); 3488 } 3489 3490 /* 3491 * Function: sd_read_unit_properties 3492 * 3493 * Description: The following implements a property lookup mechanism. 3494 * Properties for particular disks (keyed on vendor, model 3495 * and rev numbers) are sought in the sd.conf file via 3496 * sd_process_sdconf_file(), and if not found there, are 3497 * looked for in a list hardcoded in this driver via 3498 * sd_process_sdconf_table() Once located the properties 3499 * are used to update the driver unit structure. 3500 * 3501 * Arguments: un - driver soft state (unit) structure 3502 */ 3503 3504 static void 3505 sd_read_unit_properties(struct sd_lun *un) 3506 { 3507 /* 3508 * sd_process_sdconf_file returns SD_FAILURE if it cannot find 3509 * the "sd-config-list" property (from the sd.conf file) or if 3510 * there was not a match for the inquiry vid/pid. If this event 3511 * occurs the static driver configuration table is searched for 3512 * a match. 3513 */ 3514 ASSERT(un != NULL); 3515 if (sd_process_sdconf_file(un) == SD_FAILURE) { 3516 sd_process_sdconf_table(un); 3517 } 3518 3519 /* check for LSI device */ 3520 sd_is_lsi(un); 3521 3522 3523 } 3524 3525 3526 /* 3527 * Function: sd_process_sdconf_file 3528 * 3529 * Description: Use ddi_prop_lookup(9F) to obtain the properties from the 3530 * driver's config file (ie, sd.conf) and update the driver 3531 * soft state structure accordingly. 3532 * 3533 * Arguments: un - driver soft state (unit) structure 3534 * 3535 * Return Code: SD_SUCCESS - The properties were successfully set according 3536 * to the driver configuration file. 3537 * SD_FAILURE - The driver config list was not obtained or 3538 * there was no vid/pid match. This indicates that 3539 * the static config table should be used. 3540 * 3541 * The config file has a property, "sd-config-list". Currently we support 3542 * two kinds of formats. For both formats, the value of this property 3543 * is a list of duplets: 3544 * 3545 * sd-config-list= 3546 * <duplet>, 3547 * [,<duplet>]*; 3548 * 3549 * For the improved format, where 3550 * 3551 * <duplet>:= "<vid+pid>","<tunable-list>" 3552 * 3553 * and 3554 * 3555 * <tunable-list>:= <tunable> [, <tunable> ]*; 3556 * <tunable> = <name> : <value> 3557 * 3558 * The <vid+pid> is the string that is returned by the target device on a 3559 * SCSI inquiry command, the <tunable-list> contains one or more tunables 3560 * to apply to all target devices with the specified <vid+pid>. 3561 * 3562 * Each <tunable> is a "<name> : <value>" pair. 3563 * 3564 * For the old format, the structure of each duplet is as follows: 3565 * 3566 * <duplet>:= "<vid+pid>","<data-property-name_list>" 3567 * 3568 * The first entry of the duplet is the device ID string (the concatenated 3569 * vid & pid; not to be confused with a device_id). This is defined in 3570 * the same way as in the sd_disk_table. 3571 * 3572 * The second part of the duplet is a string that identifies a 3573 * data-property-name-list. The data-property-name-list is defined as 3574 * follows: 3575 * 3576 * <data-property-name-list>:=<data-property-name> [<data-property-name>] 3577 * 3578 * The syntax of <data-property-name> depends on the <version> field. 3579 * 3580 * If version = SD_CONF_VERSION_1 we have the following syntax: 3581 * 3582 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3583 * 3584 * where the prop0 value will be used to set prop0 if bit0 set in the 3585 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1 3586 * 3587 */ 3588 3589 static int 3590 sd_process_sdconf_file(struct sd_lun *un) 3591 { 3592 char **config_list = NULL; 3593 uint_t nelements; 3594 char *vidptr; 3595 int vidlen; 3596 char *dnlist_ptr; 3597 char *dataname_ptr; 3598 char *dataname_lasts; 3599 int *data_list = NULL; 3600 uint_t data_list_len; 3601 int rval = SD_FAILURE; 3602 int i; 3603 3604 ASSERT(un != NULL); 3605 3606 /* Obtain the configuration list associated with the .conf file */ 3607 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, SD_DEVINFO(un), 3608 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, sd_config_list, 3609 &config_list, &nelements) != DDI_PROP_SUCCESS) { 3610 return (SD_FAILURE); 3611 } 3612 3613 /* 3614 * Compare vids in each duplet to the inquiry vid - if a match is 3615 * made, get the data value and update the soft state structure 3616 * accordingly. 3617 * 3618 * Each duplet should show as a pair of strings, return SD_FAILURE 3619 * otherwise. 3620 */ 3621 if (nelements & 1) { 3622 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3623 "sd-config-list should show as pairs of strings.\n"); 3624 if (config_list) 3625 ddi_prop_free(config_list); 3626 return (SD_FAILURE); 3627 } 3628 3629 for (i = 0; i < nelements; i += 2) { 3630 /* 3631 * Note: The assumption here is that each vid entry is on 3632 * a unique line from its associated duplet. 3633 */ 3634 vidptr = config_list[i]; 3635 vidlen = (int)strlen(vidptr); 3636 if ((vidlen == 0) || 3637 (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS)) { 3638 continue; 3639 } 3640 3641 /* 3642 * dnlist contains 1 or more blank separated 3643 * data-property-name entries 3644 */ 3645 dnlist_ptr = config_list[i + 1]; 3646 3647 if (strchr(dnlist_ptr, ':') != NULL) { 3648 /* 3649 * Decode the improved format sd-config-list. 3650 */ 3651 sd_nvpair_str_decode(un, dnlist_ptr); 3652 } else { 3653 /* 3654 * The old format sd-config-list, loop through all 3655 * data-property-name entries in the 3656 * data-property-name-list 3657 * setting the properties for each. 3658 */ 3659 for (dataname_ptr = sd_strtok_r(dnlist_ptr, " \t", 3660 &dataname_lasts); dataname_ptr != NULL; 3661 dataname_ptr = sd_strtok_r(NULL, " \t", 3662 &dataname_lasts)) { 3663 int version; 3664 3665 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3666 "sd_process_sdconf_file: disk:%s, " 3667 "data:%s\n", vidptr, dataname_ptr); 3668 3669 /* Get the data list */ 3670 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 3671 SD_DEVINFO(un), 0, dataname_ptr, &data_list, 3672 &data_list_len) != DDI_PROP_SUCCESS) { 3673 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3674 "sd_process_sdconf_file: data " 3675 "property (%s) has no value\n", 3676 dataname_ptr); 3677 continue; 3678 } 3679 3680 version = data_list[0]; 3681 3682 if (version == SD_CONF_VERSION_1) { 3683 sd_tunables values; 3684 3685 /* Set the properties */ 3686 if (sd_chk_vers1_data(un, data_list[1], 3687 &data_list[2], data_list_len, 3688 dataname_ptr) == SD_SUCCESS) { 3689 sd_get_tunables_from_conf(un, 3690 data_list[1], &data_list[2], 3691 &values); 3692 sd_set_vers1_properties(un, 3693 data_list[1], &values); 3694 rval = SD_SUCCESS; 3695 } else { 3696 rval = SD_FAILURE; 3697 } 3698 } else { 3699 scsi_log(SD_DEVINFO(un), sd_label, 3700 CE_WARN, "data property %s version " 3701 "0x%x is invalid.", 3702 dataname_ptr, version); 3703 rval = SD_FAILURE; 3704 } 3705 if (data_list) 3706 ddi_prop_free(data_list); 3707 } 3708 } 3709 } 3710 3711 /* free up the memory allocated by ddi_prop_lookup_string_array(). */ 3712 if (config_list) { 3713 ddi_prop_free(config_list); 3714 } 3715 3716 return (rval); 3717 } 3718 3719 /* 3720 * Function: sd_nvpair_str_decode() 3721 * 3722 * Description: Parse the improved format sd-config-list to get 3723 * each entry of tunable, which includes a name-value pair. 3724 * Then call sd_set_properties() to set the property. 3725 * 3726 * Arguments: un - driver soft state (unit) structure 3727 * nvpair_str - the tunable list 3728 */ 3729 static void 3730 sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str) 3731 { 3732 char *nv, *name, *value, *token; 3733 char *nv_lasts, *v_lasts, *x_lasts; 3734 3735 for (nv = sd_strtok_r(nvpair_str, ",", &nv_lasts); nv != NULL; 3736 nv = sd_strtok_r(NULL, ",", &nv_lasts)) { 3737 token = sd_strtok_r(nv, ":", &v_lasts); 3738 name = sd_strtok_r(token, " \t", &x_lasts); 3739 token = sd_strtok_r(NULL, ":", &v_lasts); 3740 value = sd_strtok_r(token, " \t", &x_lasts); 3741 if (name == NULL || value == NULL) { 3742 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3743 "sd_nvpair_str_decode: " 3744 "name or value is not valid!\n"); 3745 } else { 3746 sd_set_properties(un, name, value); 3747 } 3748 } 3749 } 3750 3751 /* 3752 * Function: sd_strtok_r() 3753 * 3754 * Description: This function uses strpbrk and strspn to break 3755 * string into tokens on sequentially subsequent calls. Return 3756 * NULL when no non-separator characters remain. The first 3757 * argument is NULL for subsequent calls. 3758 */ 3759 static char * 3760 sd_strtok_r(char *string, const char *sepset, char **lasts) 3761 { 3762 char *q, *r; 3763 3764 /* First or subsequent call */ 3765 if (string == NULL) 3766 string = *lasts; 3767 3768 if (string == NULL) 3769 return (NULL); 3770 3771 /* Skip leading separators */ 3772 q = string + strspn(string, sepset); 3773 3774 if (*q == '\0') 3775 return (NULL); 3776 3777 if ((r = strpbrk(q, sepset)) == NULL) 3778 *lasts = NULL; 3779 else { 3780 *r = '\0'; 3781 *lasts = r + 1; 3782 } 3783 return (q); 3784 } 3785 3786 /* 3787 * Function: sd_set_properties() 3788 * 3789 * Description: Set device properties based on the improved 3790 * format sd-config-list. 3791 * 3792 * Arguments: un - driver soft state (unit) structure 3793 * name - supported tunable name 3794 * value - tunable value 3795 */ 3796 static void 3797 sd_set_properties(struct sd_lun *un, char *name, char *value) 3798 { 3799 char *endptr = NULL; 3800 long val = 0; 3801 3802 if (strcasecmp(name, "cache-nonvolatile") == 0) { 3803 if (strcasecmp(value, "true") == 0) { 3804 un->un_f_suppress_cache_flush = TRUE; 3805 } else if (strcasecmp(value, "false") == 0) { 3806 un->un_f_suppress_cache_flush = FALSE; 3807 } else { 3808 goto value_invalid; 3809 } 3810 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3811 "suppress_cache_flush flag set to %d\n", 3812 un->un_f_suppress_cache_flush); 3813 return; 3814 } 3815 3816 if (strcasecmp(name, "controller-type") == 0) { 3817 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3818 un->un_ctype = val; 3819 } else { 3820 goto value_invalid; 3821 } 3822 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3823 "ctype set to %d\n", un->un_ctype); 3824 return; 3825 } 3826 3827 if (strcasecmp(name, "delay-busy") == 0) { 3828 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3829 un->un_busy_timeout = drv_usectohz(val / 1000); 3830 } else { 3831 goto value_invalid; 3832 } 3833 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3834 "busy_timeout set to %d\n", un->un_busy_timeout); 3835 return; 3836 } 3837 3838 if (strcasecmp(name, "disksort") == 0) { 3839 if (strcasecmp(value, "true") == 0) { 3840 un->un_f_disksort_disabled = FALSE; 3841 } else if (strcasecmp(value, "false") == 0) { 3842 un->un_f_disksort_disabled = TRUE; 3843 } else { 3844 goto value_invalid; 3845 } 3846 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3847 "disksort disabled flag set to %d\n", 3848 un->un_f_disksort_disabled); 3849 return; 3850 } 3851 3852 if (strcasecmp(name, "timeout-releasereservation") == 0) { 3853 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3854 un->un_reserve_release_time = val; 3855 } else { 3856 goto value_invalid; 3857 } 3858 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3859 "reservation release timeout set to %d\n", 3860 un->un_reserve_release_time); 3861 return; 3862 } 3863 3864 if (strcasecmp(name, "reset-lun") == 0) { 3865 if (strcasecmp(value, "true") == 0) { 3866 un->un_f_lun_reset_enabled = TRUE; 3867 } else if (strcasecmp(value, "false") == 0) { 3868 un->un_f_lun_reset_enabled = FALSE; 3869 } else { 3870 goto value_invalid; 3871 } 3872 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3873 "lun reset enabled flag set to %d\n", 3874 un->un_f_lun_reset_enabled); 3875 return; 3876 } 3877 3878 if (strcasecmp(name, "retries-busy") == 0) { 3879 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3880 un->un_busy_retry_count = val; 3881 } else { 3882 goto value_invalid; 3883 } 3884 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3885 "busy retry count set to %d\n", un->un_busy_retry_count); 3886 return; 3887 } 3888 3889 if (strcasecmp(name, "retries-timeout") == 0) { 3890 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3891 un->un_retry_count = val; 3892 } else { 3893 goto value_invalid; 3894 } 3895 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3896 "timeout retry count set to %d\n", un->un_retry_count); 3897 return; 3898 } 3899 3900 if (strcasecmp(name, "retries-notready") == 0) { 3901 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3902 un->un_notready_retry_count = val; 3903 } else { 3904 goto value_invalid; 3905 } 3906 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3907 "notready retry count set to %d\n", 3908 un->un_notready_retry_count); 3909 return; 3910 } 3911 3912 if (strcasecmp(name, "retries-reset") == 0) { 3913 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3914 un->un_reset_retry_count = val; 3915 } else { 3916 goto value_invalid; 3917 } 3918 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3919 "reset retry count set to %d\n", 3920 un->un_reset_retry_count); 3921 return; 3922 } 3923 3924 if (strcasecmp(name, "throttle-max") == 0) { 3925 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3926 un->un_saved_throttle = un->un_throttle = val; 3927 } else { 3928 goto value_invalid; 3929 } 3930 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3931 "throttle set to %d\n", un->un_throttle); 3932 } 3933 3934 if (strcasecmp(name, "throttle-min") == 0) { 3935 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3936 un->un_min_throttle = val; 3937 } else { 3938 goto value_invalid; 3939 } 3940 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3941 "min throttle set to %d\n", un->un_min_throttle); 3942 } 3943 3944 /* 3945 * Validate the throttle values. 3946 * If any of the numbers are invalid, set everything to defaults. 3947 */ 3948 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 3949 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 3950 (un->un_min_throttle > un->un_throttle)) { 3951 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 3952 un->un_min_throttle = sd_min_throttle; 3953 } 3954 return; 3955 3956 value_invalid: 3957 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3958 "value of prop %s is invalid\n", name); 3959 } 3960 3961 /* 3962 * Function: sd_get_tunables_from_conf() 3963 * 3964 * 3965 * This function reads the data list from the sd.conf file and pulls 3966 * the values that can have numeric values as arguments and places 3967 * the values in the appropriate sd_tunables member. 3968 * Since the order of the data list members varies across platforms 3969 * This function reads them from the data list in a platform specific 3970 * order and places them into the correct sd_tunable member that is 3971 * consistent across all platforms. 3972 */ 3973 static void 3974 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list, 3975 sd_tunables *values) 3976 { 3977 int i; 3978 int mask; 3979 3980 bzero(values, sizeof (sd_tunables)); 3981 3982 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3983 3984 mask = 1 << i; 3985 if (mask > flags) { 3986 break; 3987 } 3988 3989 switch (mask & flags) { 3990 case 0: /* This mask bit not set in flags */ 3991 continue; 3992 case SD_CONF_BSET_THROTTLE: 3993 values->sdt_throttle = data_list[i]; 3994 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3995 "sd_get_tunables_from_conf: throttle = %d\n", 3996 values->sdt_throttle); 3997 break; 3998 case SD_CONF_BSET_CTYPE: 3999 values->sdt_ctype = data_list[i]; 4000 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4001 "sd_get_tunables_from_conf: ctype = %d\n", 4002 values->sdt_ctype); 4003 break; 4004 case SD_CONF_BSET_NRR_COUNT: 4005 values->sdt_not_rdy_retries = data_list[i]; 4006 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4007 "sd_get_tunables_from_conf: not_rdy_retries = %d\n", 4008 values->sdt_not_rdy_retries); 4009 break; 4010 case SD_CONF_BSET_BSY_RETRY_COUNT: 4011 values->sdt_busy_retries = data_list[i]; 4012 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4013 "sd_get_tunables_from_conf: busy_retries = %d\n", 4014 values->sdt_busy_retries); 4015 break; 4016 case SD_CONF_BSET_RST_RETRIES: 4017 values->sdt_reset_retries = data_list[i]; 4018 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4019 "sd_get_tunables_from_conf: reset_retries = %d\n", 4020 values->sdt_reset_retries); 4021 break; 4022 case SD_CONF_BSET_RSV_REL_TIME: 4023 values->sdt_reserv_rel_time = data_list[i]; 4024 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4025 "sd_get_tunables_from_conf: reserv_rel_time = %d\n", 4026 values->sdt_reserv_rel_time); 4027 break; 4028 case SD_CONF_BSET_MIN_THROTTLE: 4029 values->sdt_min_throttle = data_list[i]; 4030 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4031 "sd_get_tunables_from_conf: min_throttle = %d\n", 4032 values->sdt_min_throttle); 4033 break; 4034 case SD_CONF_BSET_DISKSORT_DISABLED: 4035 values->sdt_disk_sort_dis = data_list[i]; 4036 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4037 "sd_get_tunables_from_conf: disk_sort_dis = %d\n", 4038 values->sdt_disk_sort_dis); 4039 break; 4040 case SD_CONF_BSET_LUN_RESET_ENABLED: 4041 values->sdt_lun_reset_enable = data_list[i]; 4042 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4043 "sd_get_tunables_from_conf: lun_reset_enable = %d" 4044 "\n", values->sdt_lun_reset_enable); 4045 break; 4046 case SD_CONF_BSET_CACHE_IS_NV: 4047 values->sdt_suppress_cache_flush = data_list[i]; 4048 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4049 "sd_get_tunables_from_conf: \ 4050 suppress_cache_flush = %d" 4051 "\n", values->sdt_suppress_cache_flush); 4052 break; 4053 } 4054 } 4055 } 4056 4057 /* 4058 * Function: sd_process_sdconf_table 4059 * 4060 * Description: Search the static configuration table for a match on the 4061 * inquiry vid/pid and update the driver soft state structure 4062 * according to the table property values for the device. 4063 * 4064 * The form of a configuration table entry is: 4065 * <vid+pid>,<flags>,<property-data> 4066 * "SEAGATE ST42400N",1,0x40000, 4067 * 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1; 4068 * 4069 * Arguments: un - driver soft state (unit) structure 4070 */ 4071 4072 static void 4073 sd_process_sdconf_table(struct sd_lun *un) 4074 { 4075 char *id = NULL; 4076 int table_index; 4077 int idlen; 4078 4079 ASSERT(un != NULL); 4080 for (table_index = 0; table_index < sd_disk_table_size; 4081 table_index++) { 4082 id = sd_disk_table[table_index].device_id; 4083 idlen = strlen(id); 4084 if (idlen == 0) { 4085 continue; 4086 } 4087 4088 /* 4089 * The static configuration table currently does not 4090 * implement version 10 properties. Additionally, 4091 * multiple data-property-name entries are not 4092 * implemented in the static configuration table. 4093 */ 4094 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4095 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4096 "sd_process_sdconf_table: disk %s\n", id); 4097 sd_set_vers1_properties(un, 4098 sd_disk_table[table_index].flags, 4099 sd_disk_table[table_index].properties); 4100 break; 4101 } 4102 } 4103 } 4104 4105 4106 /* 4107 * Function: sd_sdconf_id_match 4108 * 4109 * Description: This local function implements a case sensitive vid/pid 4110 * comparison as well as the boundary cases of wild card and 4111 * multiple blanks. 4112 * 4113 * Note: An implicit assumption made here is that the scsi 4114 * inquiry structure will always keep the vid, pid and 4115 * revision strings in consecutive sequence, so they can be 4116 * read as a single string. If this assumption is not the 4117 * case, a separate string, to be used for the check, needs 4118 * to be built with these strings concatenated. 4119 * 4120 * Arguments: un - driver soft state (unit) structure 4121 * id - table or config file vid/pid 4122 * idlen - length of the vid/pid (bytes) 4123 * 4124 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 4125 * SD_FAILURE - Indicates no match with the inquiry vid/pid 4126 */ 4127 4128 static int 4129 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen) 4130 { 4131 struct scsi_inquiry *sd_inq; 4132 int rval = SD_SUCCESS; 4133 4134 ASSERT(un != NULL); 4135 sd_inq = un->un_sd->sd_inq; 4136 ASSERT(id != NULL); 4137 4138 /* 4139 * We use the inq_vid as a pointer to a buffer containing the 4140 * vid and pid and use the entire vid/pid length of the table 4141 * entry for the comparison. This works because the inq_pid 4142 * data member follows inq_vid in the scsi_inquiry structure. 4143 */ 4144 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) { 4145 /* 4146 * The user id string is compared to the inquiry vid/pid 4147 * using a case insensitive comparison and ignoring 4148 * multiple spaces. 4149 */ 4150 rval = sd_blank_cmp(un, id, idlen); 4151 if (rval != SD_SUCCESS) { 4152 /* 4153 * User id strings that start and end with a "*" 4154 * are a special case. These do not have a 4155 * specific vendor, and the product string can 4156 * appear anywhere in the 16 byte PID portion of 4157 * the inquiry data. This is a simple strstr() 4158 * type search for the user id in the inquiry data. 4159 */ 4160 if ((id[0] == '*') && (id[idlen - 1] == '*')) { 4161 char *pidptr = &id[1]; 4162 int i; 4163 int j; 4164 int pidstrlen = idlen - 2; 4165 j = sizeof (SD_INQUIRY(un)->inq_pid) - 4166 pidstrlen; 4167 4168 if (j < 0) { 4169 return (SD_FAILURE); 4170 } 4171 for (i = 0; i < j; i++) { 4172 if (bcmp(&SD_INQUIRY(un)->inq_pid[i], 4173 pidptr, pidstrlen) == 0) { 4174 rval = SD_SUCCESS; 4175 break; 4176 } 4177 } 4178 } 4179 } 4180 } 4181 return (rval); 4182 } 4183 4184 4185 /* 4186 * Function: sd_blank_cmp 4187 * 4188 * Description: If the id string starts and ends with a space, treat 4189 * multiple consecutive spaces as equivalent to a single 4190 * space. For example, this causes a sd_disk_table entry 4191 * of " NEC CDROM " to match a device's id string of 4192 * "NEC CDROM". 4193 * 4194 * Note: The success exit condition for this routine is if 4195 * the pointer to the table entry is '\0' and the cnt of 4196 * the inquiry length is zero. This will happen if the inquiry 4197 * string returned by the device is padded with spaces to be 4198 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The 4199 * SCSI spec states that the inquiry string is to be padded with 4200 * spaces. 4201 * 4202 * Arguments: un - driver soft state (unit) structure 4203 * id - table or config file vid/pid 4204 * idlen - length of the vid/pid (bytes) 4205 * 4206 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 4207 * SD_FAILURE - Indicates no match with the inquiry vid/pid 4208 */ 4209 4210 static int 4211 sd_blank_cmp(struct sd_lun *un, char *id, int idlen) 4212 { 4213 char *p1; 4214 char *p2; 4215 int cnt; 4216 cnt = sizeof (SD_INQUIRY(un)->inq_vid) + 4217 sizeof (SD_INQUIRY(un)->inq_pid); 4218 4219 ASSERT(un != NULL); 4220 p2 = un->un_sd->sd_inq->inq_vid; 4221 ASSERT(id != NULL); 4222 p1 = id; 4223 4224 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) { 4225 /* 4226 * Note: string p1 is terminated by a NUL but string p2 4227 * isn't. The end of p2 is determined by cnt. 4228 */ 4229 for (;;) { 4230 /* skip over any extra blanks in both strings */ 4231 while ((*p1 != '\0') && (*p1 == ' ')) { 4232 p1++; 4233 } 4234 while ((cnt != 0) && (*p2 == ' ')) { 4235 p2++; 4236 cnt--; 4237 } 4238 4239 /* compare the two strings */ 4240 if ((cnt == 0) || 4241 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) { 4242 break; 4243 } 4244 while ((cnt > 0) && 4245 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) { 4246 p1++; 4247 p2++; 4248 cnt--; 4249 } 4250 } 4251 } 4252 4253 /* return SD_SUCCESS if both strings match */ 4254 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE); 4255 } 4256 4257 4258 /* 4259 * Function: sd_chk_vers1_data 4260 * 4261 * Description: Verify the version 1 device properties provided by the 4262 * user via the configuration file 4263 * 4264 * Arguments: un - driver soft state (unit) structure 4265 * flags - integer mask indicating properties to be set 4266 * prop_list - integer list of property values 4267 * list_len - number of the elements 4268 * 4269 * Return Code: SD_SUCCESS - Indicates the user provided data is valid 4270 * SD_FAILURE - Indicates the user provided data is invalid 4271 */ 4272 4273 static int 4274 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 4275 int list_len, char *dataname_ptr) 4276 { 4277 int i; 4278 int mask = 1; 4279 int index = 0; 4280 4281 ASSERT(un != NULL); 4282 4283 /* Check for a NULL property name and list */ 4284 if (dataname_ptr == NULL) { 4285 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4286 "sd_chk_vers1_data: NULL data property name."); 4287 return (SD_FAILURE); 4288 } 4289 if (prop_list == NULL) { 4290 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4291 "sd_chk_vers1_data: %s NULL data property list.", 4292 dataname_ptr); 4293 return (SD_FAILURE); 4294 } 4295 4296 /* Display a warning if undefined bits are set in the flags */ 4297 if (flags & ~SD_CONF_BIT_MASK) { 4298 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4299 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. " 4300 "Properties not set.", 4301 (flags & ~SD_CONF_BIT_MASK), dataname_ptr); 4302 return (SD_FAILURE); 4303 } 4304 4305 /* 4306 * Verify the length of the list by identifying the highest bit set 4307 * in the flags and validating that the property list has a length 4308 * up to the index of this bit. 4309 */ 4310 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 4311 if (flags & mask) { 4312 index++; 4313 } 4314 mask = 1 << i; 4315 } 4316 if (list_len < (index + 2)) { 4317 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4318 "sd_chk_vers1_data: " 4319 "Data property list %s size is incorrect. " 4320 "Properties not set.", dataname_ptr); 4321 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: " 4322 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS); 4323 return (SD_FAILURE); 4324 } 4325 return (SD_SUCCESS); 4326 } 4327 4328 4329 /* 4330 * Function: sd_set_vers1_properties 4331 * 4332 * Description: Set version 1 device properties based on a property list 4333 * retrieved from the driver configuration file or static 4334 * configuration table. Version 1 properties have the format: 4335 * 4336 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 4337 * 4338 * where the prop0 value will be used to set prop0 if bit0 4339 * is set in the flags 4340 * 4341 * Arguments: un - driver soft state (unit) structure 4342 * flags - integer mask indicating properties to be set 4343 * prop_list - integer list of property values 4344 */ 4345 4346 static void 4347 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list) 4348 { 4349 ASSERT(un != NULL); 4350 4351 /* 4352 * Set the flag to indicate cache is to be disabled. An attempt 4353 * to disable the cache via sd_cache_control() will be made 4354 * later during attach once the basic initialization is complete. 4355 */ 4356 if (flags & SD_CONF_BSET_NOCACHE) { 4357 un->un_f_opt_disable_cache = TRUE; 4358 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4359 "sd_set_vers1_properties: caching disabled flag set\n"); 4360 } 4361 4362 /* CD-specific configuration parameters */ 4363 if (flags & SD_CONF_BSET_PLAYMSF_BCD) { 4364 un->un_f_cfg_playmsf_bcd = TRUE; 4365 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4366 "sd_set_vers1_properties: playmsf_bcd set\n"); 4367 } 4368 if (flags & SD_CONF_BSET_READSUB_BCD) { 4369 un->un_f_cfg_readsub_bcd = TRUE; 4370 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4371 "sd_set_vers1_properties: readsub_bcd set\n"); 4372 } 4373 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) { 4374 un->un_f_cfg_read_toc_trk_bcd = TRUE; 4375 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4376 "sd_set_vers1_properties: read_toc_trk_bcd set\n"); 4377 } 4378 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) { 4379 un->un_f_cfg_read_toc_addr_bcd = TRUE; 4380 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4381 "sd_set_vers1_properties: read_toc_addr_bcd set\n"); 4382 } 4383 if (flags & SD_CONF_BSET_NO_READ_HEADER) { 4384 un->un_f_cfg_no_read_header = TRUE; 4385 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4386 "sd_set_vers1_properties: no_read_header set\n"); 4387 } 4388 if (flags & SD_CONF_BSET_READ_CD_XD4) { 4389 un->un_f_cfg_read_cd_xd4 = TRUE; 4390 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4391 "sd_set_vers1_properties: read_cd_xd4 set\n"); 4392 } 4393 4394 /* Support for devices which do not have valid/unique serial numbers */ 4395 if (flags & SD_CONF_BSET_FAB_DEVID) { 4396 un->un_f_opt_fab_devid = TRUE; 4397 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4398 "sd_set_vers1_properties: fab_devid bit set\n"); 4399 } 4400 4401 /* Support for user throttle configuration */ 4402 if (flags & SD_CONF_BSET_THROTTLE) { 4403 ASSERT(prop_list != NULL); 4404 un->un_saved_throttle = un->un_throttle = 4405 prop_list->sdt_throttle; 4406 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4407 "sd_set_vers1_properties: throttle set to %d\n", 4408 prop_list->sdt_throttle); 4409 } 4410 4411 /* Set the per disk retry count according to the conf file or table. */ 4412 if (flags & SD_CONF_BSET_NRR_COUNT) { 4413 ASSERT(prop_list != NULL); 4414 if (prop_list->sdt_not_rdy_retries) { 4415 un->un_notready_retry_count = 4416 prop_list->sdt_not_rdy_retries; 4417 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4418 "sd_set_vers1_properties: not ready retry count" 4419 " set to %d\n", un->un_notready_retry_count); 4420 } 4421 } 4422 4423 /* The controller type is reported for generic disk driver ioctls */ 4424 if (flags & SD_CONF_BSET_CTYPE) { 4425 ASSERT(prop_list != NULL); 4426 switch (prop_list->sdt_ctype) { 4427 case CTYPE_CDROM: 4428 un->un_ctype = prop_list->sdt_ctype; 4429 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4430 "sd_set_vers1_properties: ctype set to " 4431 "CTYPE_CDROM\n"); 4432 break; 4433 case CTYPE_CCS: 4434 un->un_ctype = prop_list->sdt_ctype; 4435 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4436 "sd_set_vers1_properties: ctype set to " 4437 "CTYPE_CCS\n"); 4438 break; 4439 case CTYPE_ROD: /* RW optical */ 4440 un->un_ctype = prop_list->sdt_ctype; 4441 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4442 "sd_set_vers1_properties: ctype set to " 4443 "CTYPE_ROD\n"); 4444 break; 4445 default: 4446 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4447 "sd_set_vers1_properties: Could not set " 4448 "invalid ctype value (%d)", 4449 prop_list->sdt_ctype); 4450 } 4451 } 4452 4453 /* Purple failover timeout */ 4454 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) { 4455 ASSERT(prop_list != NULL); 4456 un->un_busy_retry_count = 4457 prop_list->sdt_busy_retries; 4458 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4459 "sd_set_vers1_properties: " 4460 "busy retry count set to %d\n", 4461 un->un_busy_retry_count); 4462 } 4463 4464 /* Purple reset retry count */ 4465 if (flags & SD_CONF_BSET_RST_RETRIES) { 4466 ASSERT(prop_list != NULL); 4467 un->un_reset_retry_count = 4468 prop_list->sdt_reset_retries; 4469 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4470 "sd_set_vers1_properties: " 4471 "reset retry count set to %d\n", 4472 un->un_reset_retry_count); 4473 } 4474 4475 /* Purple reservation release timeout */ 4476 if (flags & SD_CONF_BSET_RSV_REL_TIME) { 4477 ASSERT(prop_list != NULL); 4478 un->un_reserve_release_time = 4479 prop_list->sdt_reserv_rel_time; 4480 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4481 "sd_set_vers1_properties: " 4482 "reservation release timeout set to %d\n", 4483 un->un_reserve_release_time); 4484 } 4485 4486 /* 4487 * Driver flag telling the driver to verify that no commands are pending 4488 * for a device before issuing a Test Unit Ready. This is a workaround 4489 * for a firmware bug in some Seagate eliteI drives. 4490 */ 4491 if (flags & SD_CONF_BSET_TUR_CHECK) { 4492 un->un_f_cfg_tur_check = TRUE; 4493 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4494 "sd_set_vers1_properties: tur queue check set\n"); 4495 } 4496 4497 if (flags & SD_CONF_BSET_MIN_THROTTLE) { 4498 un->un_min_throttle = prop_list->sdt_min_throttle; 4499 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4500 "sd_set_vers1_properties: min throttle set to %d\n", 4501 un->un_min_throttle); 4502 } 4503 4504 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) { 4505 un->un_f_disksort_disabled = 4506 (prop_list->sdt_disk_sort_dis != 0) ? 4507 TRUE : FALSE; 4508 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4509 "sd_set_vers1_properties: disksort disabled " 4510 "flag set to %d\n", 4511 prop_list->sdt_disk_sort_dis); 4512 } 4513 4514 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) { 4515 un->un_f_lun_reset_enabled = 4516 (prop_list->sdt_lun_reset_enable != 0) ? 4517 TRUE : FALSE; 4518 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4519 "sd_set_vers1_properties: lun reset enabled " 4520 "flag set to %d\n", 4521 prop_list->sdt_lun_reset_enable); 4522 } 4523 4524 if (flags & SD_CONF_BSET_CACHE_IS_NV) { 4525 un->un_f_suppress_cache_flush = 4526 (prop_list->sdt_suppress_cache_flush != 0) ? 4527 TRUE : FALSE; 4528 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4529 "sd_set_vers1_properties: suppress_cache_flush " 4530 "flag set to %d\n", 4531 prop_list->sdt_suppress_cache_flush); 4532 } 4533 4534 /* 4535 * Validate the throttle values. 4536 * If any of the numbers are invalid, set everything to defaults. 4537 */ 4538 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4539 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4540 (un->un_min_throttle > un->un_throttle)) { 4541 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4542 un->un_min_throttle = sd_min_throttle; 4543 } 4544 } 4545 4546 /* 4547 * Function: sd_is_lsi() 4548 * 4549 * Description: Check for lsi devices, step through the static device 4550 * table to match vid/pid. 4551 * 4552 * Args: un - ptr to sd_lun 4553 * 4554 * Notes: When creating new LSI property, need to add the new LSI property 4555 * to this function. 4556 */ 4557 static void 4558 sd_is_lsi(struct sd_lun *un) 4559 { 4560 char *id = NULL; 4561 int table_index; 4562 int idlen; 4563 void *prop; 4564 4565 ASSERT(un != NULL); 4566 for (table_index = 0; table_index < sd_disk_table_size; 4567 table_index++) { 4568 id = sd_disk_table[table_index].device_id; 4569 idlen = strlen(id); 4570 if (idlen == 0) { 4571 continue; 4572 } 4573 4574 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4575 prop = sd_disk_table[table_index].properties; 4576 if (prop == &lsi_properties || 4577 prop == &lsi_oem_properties || 4578 prop == &lsi_properties_scsi || 4579 prop == &symbios_properties) { 4580 un->un_f_cfg_is_lsi = TRUE; 4581 } 4582 break; 4583 } 4584 } 4585 } 4586 4587 /* 4588 * Function: sd_get_physical_geometry 4589 * 4590 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and 4591 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the 4592 * target, and use this information to initialize the physical 4593 * geometry cache specified by pgeom_p. 4594 * 4595 * MODE SENSE is an optional command, so failure in this case 4596 * does not necessarily denote an error. We want to use the 4597 * MODE SENSE commands to derive the physical geometry of the 4598 * device, but if either command fails, the logical geometry is 4599 * used as the fallback for disk label geometry in cmlb. 4600 * 4601 * This requires that un->un_blockcount and un->un_tgt_blocksize 4602 * have already been initialized for the current target and 4603 * that the current values be passed as args so that we don't 4604 * end up ever trying to use -1 as a valid value. This could 4605 * happen if either value is reset while we're not holding 4606 * the mutex. 4607 * 4608 * Arguments: un - driver soft state (unit) structure 4609 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4610 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4611 * to use the USCSI "direct" chain and bypass the normal 4612 * command waitq. 4613 * 4614 * Context: Kernel thread only (can sleep). 4615 */ 4616 4617 static int 4618 sd_get_physical_geometry(struct sd_lun *un, cmlb_geom_t *pgeom_p, 4619 diskaddr_t capacity, int lbasize, int path_flag) 4620 { 4621 struct mode_format *page3p; 4622 struct mode_geometry *page4p; 4623 struct mode_header *headerp; 4624 int sector_size; 4625 int nsect; 4626 int nhead; 4627 int ncyl; 4628 int intrlv; 4629 int spc; 4630 diskaddr_t modesense_capacity; 4631 int rpm; 4632 int bd_len; 4633 int mode_header_length; 4634 uchar_t *p3bufp; 4635 uchar_t *p4bufp; 4636 int cdbsize; 4637 int ret = EIO; 4638 sd_ssc_t *ssc; 4639 int status; 4640 4641 ASSERT(un != NULL); 4642 4643 if (lbasize == 0) { 4644 if (ISCD(un)) { 4645 lbasize = 2048; 4646 } else { 4647 lbasize = un->un_sys_blocksize; 4648 } 4649 } 4650 pgeom_p->g_secsize = (unsigned short)lbasize; 4651 4652 /* 4653 * If the unit is a cd/dvd drive MODE SENSE page three 4654 * and MODE SENSE page four are reserved (see SBC spec 4655 * and MMC spec). To prevent soft errors just return 4656 * using the default LBA size. 4657 */ 4658 if (ISCD(un)) 4659 return (ret); 4660 4661 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0; 4662 4663 /* 4664 * Retrieve MODE SENSE page 3 - Format Device Page 4665 */ 4666 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP); 4667 ssc = sd_ssc_init(un); 4668 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p3bufp, 4669 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag); 4670 if (status != 0) { 4671 SD_ERROR(SD_LOG_COMMON, un, 4672 "sd_get_physical_geometry: mode sense page 3 failed\n"); 4673 goto page3_exit; 4674 } 4675 4676 /* 4677 * Determine size of Block Descriptors in order to locate the mode 4678 * page data. ATAPI devices return 0, SCSI devices should return 4679 * MODE_BLK_DESC_LENGTH. 4680 */ 4681 headerp = (struct mode_header *)p3bufp; 4682 if (un->un_f_cfg_is_atapi == TRUE) { 4683 struct mode_header_grp2 *mhp = 4684 (struct mode_header_grp2 *)headerp; 4685 mode_header_length = MODE_HEADER_LENGTH_GRP2; 4686 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4687 } else { 4688 mode_header_length = MODE_HEADER_LENGTH; 4689 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4690 } 4691 4692 if (bd_len > MODE_BLK_DESC_LENGTH) { 4693 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4694 "received unexpected bd_len of %d, page3\n", bd_len); 4695 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 4696 "sd_get_physical_geometry: received unexpected " 4697 "bd_len of %d, page3", bd_len); 4698 status = EIO; 4699 goto page3_exit; 4700 } 4701 4702 page3p = (struct mode_format *) 4703 ((caddr_t)headerp + mode_header_length + bd_len); 4704 4705 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) { 4706 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4707 "mode sense pg3 code mismatch %d\n", 4708 page3p->mode_page.code); 4709 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 4710 "sd_get_physical_geometry: mode sense pg3 code " 4711 "mismatch %d", page3p->mode_page.code); 4712 status = EIO; 4713 goto page3_exit; 4714 } 4715 4716 /* 4717 * Use this physical geometry data only if BOTH MODE SENSE commands 4718 * complete successfully; otherwise, revert to the logical geometry. 4719 * So, we need to save everything in temporary variables. 4720 */ 4721 sector_size = BE_16(page3p->data_bytes_sect); 4722 4723 /* 4724 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size 4725 */ 4726 if (sector_size == 0) { 4727 sector_size = un->un_sys_blocksize; 4728 } else { 4729 sector_size &= ~(un->un_sys_blocksize - 1); 4730 } 4731 4732 nsect = BE_16(page3p->sect_track); 4733 intrlv = BE_16(page3p->interleave); 4734 4735 SD_INFO(SD_LOG_COMMON, un, 4736 "sd_get_physical_geometry: Format Parameters (page 3)\n"); 4737 SD_INFO(SD_LOG_COMMON, un, 4738 " mode page: %d; nsect: %d; sector size: %d;\n", 4739 page3p->mode_page.code, nsect, sector_size); 4740 SD_INFO(SD_LOG_COMMON, un, 4741 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv, 4742 BE_16(page3p->track_skew), 4743 BE_16(page3p->cylinder_skew)); 4744 4745 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 4746 4747 /* 4748 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page 4749 */ 4750 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP); 4751 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p4bufp, 4752 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag); 4753 if (status != 0) { 4754 SD_ERROR(SD_LOG_COMMON, un, 4755 "sd_get_physical_geometry: mode sense page 4 failed\n"); 4756 goto page4_exit; 4757 } 4758 4759 /* 4760 * Determine size of Block Descriptors in order to locate the mode 4761 * page data. ATAPI devices return 0, SCSI devices should return 4762 * MODE_BLK_DESC_LENGTH. 4763 */ 4764 headerp = (struct mode_header *)p4bufp; 4765 if (un->un_f_cfg_is_atapi == TRUE) { 4766 struct mode_header_grp2 *mhp = 4767 (struct mode_header_grp2 *)headerp; 4768 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4769 } else { 4770 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4771 } 4772 4773 if (bd_len > MODE_BLK_DESC_LENGTH) { 4774 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4775 "received unexpected bd_len of %d, page4\n", bd_len); 4776 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 4777 "sd_get_physical_geometry: received unexpected " 4778 "bd_len of %d, page4", bd_len); 4779 status = EIO; 4780 goto page4_exit; 4781 } 4782 4783 page4p = (struct mode_geometry *) 4784 ((caddr_t)headerp + mode_header_length + bd_len); 4785 4786 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) { 4787 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4788 "mode sense pg4 code mismatch %d\n", 4789 page4p->mode_page.code); 4790 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 4791 "sd_get_physical_geometry: mode sense pg4 code " 4792 "mismatch %d", page4p->mode_page.code); 4793 status = EIO; 4794 goto page4_exit; 4795 } 4796 4797 /* 4798 * Stash the data now, after we know that both commands completed. 4799 */ 4800 4801 4802 nhead = (int)page4p->heads; /* uchar, so no conversion needed */ 4803 spc = nhead * nsect; 4804 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb; 4805 rpm = BE_16(page4p->rpm); 4806 4807 modesense_capacity = spc * ncyl; 4808 4809 SD_INFO(SD_LOG_COMMON, un, 4810 "sd_get_physical_geometry: Geometry Parameters (page 4)\n"); 4811 SD_INFO(SD_LOG_COMMON, un, 4812 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm); 4813 SD_INFO(SD_LOG_COMMON, un, 4814 " computed capacity(h*s*c): %d;\n", modesense_capacity); 4815 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n", 4816 (void *)pgeom_p, capacity); 4817 4818 /* 4819 * Compensate if the drive's geometry is not rectangular, i.e., 4820 * the product of C * H * S returned by MODE SENSE >= that returned 4821 * by read capacity. This is an idiosyncrasy of the original x86 4822 * disk subsystem. 4823 */ 4824 if (modesense_capacity >= capacity) { 4825 SD_INFO(SD_LOG_COMMON, un, 4826 "sd_get_physical_geometry: adjusting acyl; " 4827 "old: %d; new: %d\n", pgeom_p->g_acyl, 4828 (modesense_capacity - capacity + spc - 1) / spc); 4829 if (sector_size != 0) { 4830 /* 1243403: NEC D38x7 drives don't support sec size */ 4831 pgeom_p->g_secsize = (unsigned short)sector_size; 4832 } 4833 pgeom_p->g_nsect = (unsigned short)nsect; 4834 pgeom_p->g_nhead = (unsigned short)nhead; 4835 pgeom_p->g_capacity = capacity; 4836 pgeom_p->g_acyl = 4837 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc; 4838 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl; 4839 } 4840 4841 pgeom_p->g_rpm = (unsigned short)rpm; 4842 pgeom_p->g_intrlv = (unsigned short)intrlv; 4843 ret = 0; 4844 4845 SD_INFO(SD_LOG_COMMON, un, 4846 "sd_get_physical_geometry: mode sense geometry:\n"); 4847 SD_INFO(SD_LOG_COMMON, un, 4848 " nsect: %d; sector size: %d; interlv: %d\n", 4849 nsect, sector_size, intrlv); 4850 SD_INFO(SD_LOG_COMMON, un, 4851 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n", 4852 nhead, ncyl, rpm, modesense_capacity); 4853 SD_INFO(SD_LOG_COMMON, un, 4854 "sd_get_physical_geometry: (cached)\n"); 4855 SD_INFO(SD_LOG_COMMON, un, 4856 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 4857 pgeom_p->g_ncyl, pgeom_p->g_acyl, 4858 pgeom_p->g_nhead, pgeom_p->g_nsect); 4859 SD_INFO(SD_LOG_COMMON, un, 4860 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n", 4861 pgeom_p->g_secsize, pgeom_p->g_capacity, 4862 pgeom_p->g_intrlv, pgeom_p->g_rpm); 4863 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 4864 4865 page4_exit: 4866 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH); 4867 4868 page3_exit: 4869 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH); 4870 4871 if (status != 0) { 4872 if (status == EIO) { 4873 /* 4874 * Some disks do not support mode sense(6), we 4875 * should ignore this kind of error(sense key is 4876 * 0x5 - illegal request). 4877 */ 4878 uint8_t *sensep; 4879 int senlen; 4880 4881 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 4882 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 4883 ssc->ssc_uscsi_cmd->uscsi_rqresid); 4884 4885 if (senlen > 0 && 4886 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 4887 sd_ssc_assessment(ssc, 4888 SD_FMT_IGNORE_COMPROMISE); 4889 } else { 4890 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 4891 } 4892 } else { 4893 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 4894 } 4895 } 4896 sd_ssc_fini(ssc); 4897 return (ret); 4898 } 4899 4900 /* 4901 * Function: sd_get_virtual_geometry 4902 * 4903 * Description: Ask the controller to tell us about the target device. 4904 * 4905 * Arguments: un - pointer to softstate 4906 * capacity - disk capacity in #blocks 4907 * lbasize - disk block size in bytes 4908 * 4909 * Context: Kernel thread only 4910 */ 4911 4912 static int 4913 sd_get_virtual_geometry(struct sd_lun *un, cmlb_geom_t *lgeom_p, 4914 diskaddr_t capacity, int lbasize) 4915 { 4916 uint_t geombuf; 4917 int spc; 4918 4919 ASSERT(un != NULL); 4920 4921 /* Set sector size, and total number of sectors */ 4922 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1); 4923 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1); 4924 4925 /* Let the HBA tell us its geometry */ 4926 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1); 4927 4928 /* A value of -1 indicates an undefined "geometry" property */ 4929 if (geombuf == (-1)) { 4930 return (EINVAL); 4931 } 4932 4933 /* Initialize the logical geometry cache. */ 4934 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff; 4935 lgeom_p->g_nsect = geombuf & 0xffff; 4936 lgeom_p->g_secsize = un->un_sys_blocksize; 4937 4938 spc = lgeom_p->g_nhead * lgeom_p->g_nsect; 4939 4940 /* 4941 * Note: The driver originally converted the capacity value from 4942 * target blocks to system blocks. However, the capacity value passed 4943 * to this routine is already in terms of system blocks (this scaling 4944 * is done when the READ CAPACITY command is issued and processed). 4945 * This 'error' may have gone undetected because the usage of g_ncyl 4946 * (which is based upon g_capacity) is very limited within the driver 4947 */ 4948 lgeom_p->g_capacity = capacity; 4949 4950 /* 4951 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The 4952 * hba may return zero values if the device has been removed. 4953 */ 4954 if (spc == 0) { 4955 lgeom_p->g_ncyl = 0; 4956 } else { 4957 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc; 4958 } 4959 lgeom_p->g_acyl = 0; 4960 4961 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n"); 4962 return (0); 4963 4964 } 4965 /* 4966 * Function: sd_update_block_info 4967 * 4968 * Description: Calculate a byte count to sector count bitshift value 4969 * from sector size. 4970 * 4971 * Arguments: un: unit struct. 4972 * lbasize: new target sector size 4973 * capacity: new target capacity, ie. block count 4974 * 4975 * Context: Kernel thread context 4976 */ 4977 4978 static void 4979 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity) 4980 { 4981 if (lbasize != 0) { 4982 un->un_tgt_blocksize = lbasize; 4983 un->un_f_tgt_blocksize_is_valid = TRUE; 4984 } 4985 4986 if (capacity != 0) { 4987 un->un_blockcount = capacity; 4988 un->un_f_blockcount_is_valid = TRUE; 4989 } 4990 } 4991 4992 4993 /* 4994 * Function: sd_register_devid 4995 * 4996 * Description: This routine will obtain the device id information from the 4997 * target, obtain the serial number, and register the device 4998 * id with the ddi framework. 4999 * 5000 * Arguments: devi - the system's dev_info_t for the device. 5001 * un - driver soft state (unit) structure 5002 * reservation_flag - indicates if a reservation conflict 5003 * occurred during attach 5004 * 5005 * Context: Kernel Thread 5006 */ 5007 static void 5008 sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi, int reservation_flag) 5009 { 5010 int rval = 0; 5011 uchar_t *inq80 = NULL; 5012 size_t inq80_len = MAX_INQUIRY_SIZE; 5013 size_t inq80_resid = 0; 5014 uchar_t *inq83 = NULL; 5015 size_t inq83_len = MAX_INQUIRY_SIZE; 5016 size_t inq83_resid = 0; 5017 int dlen, len; 5018 char *sn; 5019 struct sd_lun *un; 5020 5021 ASSERT(ssc != NULL); 5022 un = ssc->ssc_un; 5023 ASSERT(un != NULL); 5024 ASSERT(mutex_owned(SD_MUTEX(un))); 5025 ASSERT((SD_DEVINFO(un)) == devi); 5026 5027 /* 5028 * If transport has already registered a devid for this target 5029 * then that takes precedence over the driver's determination 5030 * of the devid. 5031 */ 5032 if (ddi_devid_get(SD_DEVINFO(un), &un->un_devid) == DDI_SUCCESS) { 5033 ASSERT(un->un_devid); 5034 return; /* use devid registered by the transport */ 5035 } 5036 5037 /* 5038 * This is the case of antiquated Sun disk drives that have the 5039 * FAB_DEVID property set in the disk_table. These drives 5040 * manage the devid's by storing them in last 2 available sectors 5041 * on the drive and have them fabricated by the ddi layer by calling 5042 * ddi_devid_init and passing the DEVID_FAB flag. 5043 */ 5044 if (un->un_f_opt_fab_devid == TRUE) { 5045 /* 5046 * Depending on EINVAL isn't reliable, since a reserved disk 5047 * may result in invalid geometry, so check to make sure a 5048 * reservation conflict did not occur during attach. 5049 */ 5050 if ((sd_get_devid(ssc) == EINVAL) && 5051 (reservation_flag != SD_TARGET_IS_RESERVED)) { 5052 /* 5053 * The devid is invalid AND there is no reservation 5054 * conflict. Fabricate a new devid. 5055 */ 5056 (void) sd_create_devid(ssc); 5057 } 5058 5059 /* Register the devid if it exists */ 5060 if (un->un_devid != NULL) { 5061 (void) ddi_devid_register(SD_DEVINFO(un), 5062 un->un_devid); 5063 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5064 "sd_register_devid: Devid Fabricated\n"); 5065 } 5066 return; 5067 } 5068 5069 /* 5070 * We check the availability of the World Wide Name (0x83) and Unit 5071 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using 5072 * un_vpd_page_mask from them, we decide which way to get the WWN. If 5073 * 0x83 is available, that is the best choice. Our next choice is 5074 * 0x80. If neither are available, we munge the devid from the device 5075 * vid/pid/serial # for Sun qualified disks, or use the ddi framework 5076 * to fabricate a devid for non-Sun qualified disks. 5077 */ 5078 if (sd_check_vpd_page_support(ssc) == 0) { 5079 /* collect page 80 data if available */ 5080 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) { 5081 5082 mutex_exit(SD_MUTEX(un)); 5083 inq80 = kmem_zalloc(inq80_len, KM_SLEEP); 5084 5085 rval = sd_send_scsi_INQUIRY(ssc, inq80, inq80_len, 5086 0x01, 0x80, &inq80_resid); 5087 5088 if (rval != 0) { 5089 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5090 kmem_free(inq80, inq80_len); 5091 inq80 = NULL; 5092 inq80_len = 0; 5093 } else if (ddi_prop_exists( 5094 DDI_DEV_T_NONE, SD_DEVINFO(un), 5095 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 5096 INQUIRY_SERIAL_NO) == 0) { 5097 /* 5098 * If we don't already have a serial number 5099 * property, do quick verify of data returned 5100 * and define property. 5101 */ 5102 dlen = inq80_len - inq80_resid; 5103 len = (size_t)inq80[3]; 5104 if ((dlen >= 4) && ((len + 4) <= dlen)) { 5105 /* 5106 * Ensure sn termination, skip leading 5107 * blanks, and create property 5108 * 'inquiry-serial-no'. 5109 */ 5110 sn = (char *)&inq80[4]; 5111 sn[len] = 0; 5112 while (*sn && (*sn == ' ')) 5113 sn++; 5114 if (*sn) { 5115 (void) ddi_prop_update_string( 5116 DDI_DEV_T_NONE, 5117 SD_DEVINFO(un), 5118 INQUIRY_SERIAL_NO, sn); 5119 } 5120 } 5121 } 5122 mutex_enter(SD_MUTEX(un)); 5123 } 5124 5125 /* collect page 83 data if available */ 5126 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) { 5127 mutex_exit(SD_MUTEX(un)); 5128 inq83 = kmem_zalloc(inq83_len, KM_SLEEP); 5129 5130 rval = sd_send_scsi_INQUIRY(ssc, inq83, inq83_len, 5131 0x01, 0x83, &inq83_resid); 5132 5133 if (rval != 0) { 5134 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5135 kmem_free(inq83, inq83_len); 5136 inq83 = NULL; 5137 inq83_len = 0; 5138 } 5139 mutex_enter(SD_MUTEX(un)); 5140 } 5141 } 5142 5143 /* encode best devid possible based on data available */ 5144 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, 5145 (char *)ddi_driver_name(SD_DEVINFO(un)), 5146 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)), 5147 inq80, inq80_len - inq80_resid, inq83, inq83_len - 5148 inq83_resid, &un->un_devid) == DDI_SUCCESS) { 5149 5150 /* devid successfully encoded, register devid */ 5151 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); 5152 5153 } else { 5154 /* 5155 * Unable to encode a devid based on data available. 5156 * This is not a Sun qualified disk. Older Sun disk 5157 * drives that have the SD_FAB_DEVID property 5158 * set in the disk_table and non Sun qualified 5159 * disks are treated in the same manner. These 5160 * drives manage the devid's by storing them in 5161 * last 2 available sectors on the drive and 5162 * have them fabricated by the ddi layer by 5163 * calling ddi_devid_init and passing the 5164 * DEVID_FAB flag. 5165 * Create a fabricate devid only if there's no 5166 * fabricate devid existed. 5167 */ 5168 if (sd_get_devid(ssc) == EINVAL) { 5169 (void) sd_create_devid(ssc); 5170 } 5171 un->un_f_opt_fab_devid = TRUE; 5172 5173 /* Register the devid if it exists */ 5174 if (un->un_devid != NULL) { 5175 (void) ddi_devid_register(SD_DEVINFO(un), 5176 un->un_devid); 5177 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5178 "sd_register_devid: devid fabricated using " 5179 "ddi framework\n"); 5180 } 5181 } 5182 5183 /* clean up resources */ 5184 if (inq80 != NULL) { 5185 kmem_free(inq80, inq80_len); 5186 } 5187 if (inq83 != NULL) { 5188 kmem_free(inq83, inq83_len); 5189 } 5190 } 5191 5192 5193 5194 /* 5195 * Function: sd_get_devid 5196 * 5197 * Description: This routine will return 0 if a valid device id has been 5198 * obtained from the target and stored in the soft state. If a 5199 * valid device id has not been previously read and stored, a 5200 * read attempt will be made. 5201 * 5202 * Arguments: un - driver soft state (unit) structure 5203 * 5204 * Return Code: 0 if we successfully get the device id 5205 * 5206 * Context: Kernel Thread 5207 */ 5208 5209 static int 5210 sd_get_devid(sd_ssc_t *ssc) 5211 { 5212 struct dk_devid *dkdevid; 5213 ddi_devid_t tmpid; 5214 uint_t *ip; 5215 size_t sz; 5216 diskaddr_t blk; 5217 int status; 5218 int chksum; 5219 int i; 5220 size_t buffer_size; 5221 struct sd_lun *un; 5222 5223 ASSERT(ssc != NULL); 5224 un = ssc->ssc_un; 5225 ASSERT(un != NULL); 5226 ASSERT(mutex_owned(SD_MUTEX(un))); 5227 5228 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n", 5229 un); 5230 5231 if (un->un_devid != NULL) { 5232 return (0); 5233 } 5234 5235 mutex_exit(SD_MUTEX(un)); 5236 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5237 (void *)SD_PATH_DIRECT) != 0) { 5238 mutex_enter(SD_MUTEX(un)); 5239 return (EINVAL); 5240 } 5241 5242 /* 5243 * Read and verify device id, stored in the reserved cylinders at the 5244 * end of the disk. Backup label is on the odd sectors of the last 5245 * track of the last cylinder. Device id will be on track of the next 5246 * to last cylinder. 5247 */ 5248 mutex_enter(SD_MUTEX(un)); 5249 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid)); 5250 mutex_exit(SD_MUTEX(un)); 5251 dkdevid = kmem_alloc(buffer_size, KM_SLEEP); 5252 status = sd_send_scsi_READ(ssc, dkdevid, buffer_size, blk, 5253 SD_PATH_DIRECT); 5254 5255 if (status != 0) { 5256 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5257 goto error; 5258 } 5259 5260 /* Validate the revision */ 5261 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 5262 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 5263 status = EINVAL; 5264 goto error; 5265 } 5266 5267 /* Calculate the checksum */ 5268 chksum = 0; 5269 ip = (uint_t *)dkdevid; 5270 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 5271 i++) { 5272 chksum ^= ip[i]; 5273 } 5274 5275 /* Compare the checksums */ 5276 if (DKD_GETCHKSUM(dkdevid) != chksum) { 5277 status = EINVAL; 5278 goto error; 5279 } 5280 5281 /* Validate the device id */ 5282 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 5283 status = EINVAL; 5284 goto error; 5285 } 5286 5287 /* 5288 * Store the device id in the driver soft state 5289 */ 5290 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 5291 tmpid = kmem_alloc(sz, KM_SLEEP); 5292 5293 mutex_enter(SD_MUTEX(un)); 5294 5295 un->un_devid = tmpid; 5296 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 5297 5298 kmem_free(dkdevid, buffer_size); 5299 5300 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un); 5301 5302 return (status); 5303 error: 5304 mutex_enter(SD_MUTEX(un)); 5305 kmem_free(dkdevid, buffer_size); 5306 return (status); 5307 } 5308 5309 5310 /* 5311 * Function: sd_create_devid 5312 * 5313 * Description: This routine will fabricate the device id and write it 5314 * to the disk. 5315 * 5316 * Arguments: un - driver soft state (unit) structure 5317 * 5318 * Return Code: value of the fabricated device id 5319 * 5320 * Context: Kernel Thread 5321 */ 5322 5323 static ddi_devid_t 5324 sd_create_devid(sd_ssc_t *ssc) 5325 { 5326 struct sd_lun *un; 5327 5328 ASSERT(ssc != NULL); 5329 un = ssc->ssc_un; 5330 ASSERT(un != NULL); 5331 5332 /* Fabricate the devid */ 5333 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid) 5334 == DDI_FAILURE) { 5335 return (NULL); 5336 } 5337 5338 /* Write the devid to disk */ 5339 if (sd_write_deviceid(ssc) != 0) { 5340 ddi_devid_free(un->un_devid); 5341 un->un_devid = NULL; 5342 } 5343 5344 return (un->un_devid); 5345 } 5346 5347 5348 /* 5349 * Function: sd_write_deviceid 5350 * 5351 * Description: This routine will write the device id to the disk 5352 * reserved sector. 5353 * 5354 * Arguments: un - driver soft state (unit) structure 5355 * 5356 * Return Code: EINVAL 5357 * value returned by sd_send_scsi_cmd 5358 * 5359 * Context: Kernel Thread 5360 */ 5361 5362 static int 5363 sd_write_deviceid(sd_ssc_t *ssc) 5364 { 5365 struct dk_devid *dkdevid; 5366 diskaddr_t blk; 5367 uint_t *ip, chksum; 5368 int status; 5369 int i; 5370 struct sd_lun *un; 5371 5372 ASSERT(ssc != NULL); 5373 un = ssc->ssc_un; 5374 ASSERT(un != NULL); 5375 ASSERT(mutex_owned(SD_MUTEX(un))); 5376 5377 mutex_exit(SD_MUTEX(un)); 5378 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5379 (void *)SD_PATH_DIRECT) != 0) { 5380 mutex_enter(SD_MUTEX(un)); 5381 return (-1); 5382 } 5383 5384 5385 /* Allocate the buffer */ 5386 dkdevid = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP); 5387 5388 /* Fill in the revision */ 5389 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 5390 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 5391 5392 /* Copy in the device id */ 5393 mutex_enter(SD_MUTEX(un)); 5394 bcopy(un->un_devid, &dkdevid->dkd_devid, 5395 ddi_devid_sizeof(un->un_devid)); 5396 mutex_exit(SD_MUTEX(un)); 5397 5398 /* Calculate the checksum */ 5399 chksum = 0; 5400 ip = (uint_t *)dkdevid; 5401 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 5402 i++) { 5403 chksum ^= ip[i]; 5404 } 5405 5406 /* Fill-in checksum */ 5407 DKD_FORMCHKSUM(chksum, dkdevid); 5408 5409 /* Write the reserved sector */ 5410 status = sd_send_scsi_WRITE(ssc, dkdevid, un->un_sys_blocksize, blk, 5411 SD_PATH_DIRECT); 5412 if (status != 0) 5413 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5414 5415 kmem_free(dkdevid, un->un_sys_blocksize); 5416 5417 mutex_enter(SD_MUTEX(un)); 5418 return (status); 5419 } 5420 5421 5422 /* 5423 * Function: sd_check_vpd_page_support 5424 * 5425 * Description: This routine sends an inquiry command with the EVPD bit set and 5426 * a page code of 0x00 to the device. It is used to determine which 5427 * vital product pages are available to find the devid. We are 5428 * looking for pages 0x83 or 0x80. If we return a negative 1, the 5429 * device does not support that command. 5430 * 5431 * Arguments: un - driver soft state (unit) structure 5432 * 5433 * Return Code: 0 - success 5434 * 1 - check condition 5435 * 5436 * Context: This routine can sleep. 5437 */ 5438 5439 static int 5440 sd_check_vpd_page_support(sd_ssc_t *ssc) 5441 { 5442 uchar_t *page_list = NULL; 5443 uchar_t page_length = 0xff; /* Use max possible length */ 5444 uchar_t evpd = 0x01; /* Set the EVPD bit */ 5445 uchar_t page_code = 0x00; /* Supported VPD Pages */ 5446 int rval = 0; 5447 int counter; 5448 struct sd_lun *un; 5449 5450 ASSERT(ssc != NULL); 5451 un = ssc->ssc_un; 5452 ASSERT(un != NULL); 5453 ASSERT(mutex_owned(SD_MUTEX(un))); 5454 5455 mutex_exit(SD_MUTEX(un)); 5456 5457 /* 5458 * We'll set the page length to the maximum to save figuring it out 5459 * with an additional call. 5460 */ 5461 page_list = kmem_zalloc(page_length, KM_SLEEP); 5462 5463 rval = sd_send_scsi_INQUIRY(ssc, page_list, page_length, evpd, 5464 page_code, NULL); 5465 5466 if (rval != 0) 5467 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5468 5469 mutex_enter(SD_MUTEX(un)); 5470 5471 /* 5472 * Now we must validate that the device accepted the command, as some 5473 * drives do not support it. If the drive does support it, we will 5474 * return 0, and the supported pages will be in un_vpd_page_mask. If 5475 * not, we return -1. 5476 */ 5477 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) { 5478 /* Loop to find one of the 2 pages we need */ 5479 counter = 4; /* Supported pages start at byte 4, with 0x00 */ 5480 5481 /* 5482 * Pages are returned in ascending order, and 0x83 is what we 5483 * are hoping for. 5484 */ 5485 while ((page_list[counter] <= 0x86) && 5486 (counter <= (page_list[VPD_PAGE_LENGTH] + 5487 VPD_HEAD_OFFSET))) { 5488 /* 5489 * Add 3 because page_list[3] is the number of 5490 * pages minus 3 5491 */ 5492 5493 switch (page_list[counter]) { 5494 case 0x00: 5495 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG; 5496 break; 5497 case 0x80: 5498 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG; 5499 break; 5500 case 0x81: 5501 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG; 5502 break; 5503 case 0x82: 5504 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG; 5505 break; 5506 case 0x83: 5507 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG; 5508 break; 5509 case 0x86: 5510 un->un_vpd_page_mask |= SD_VPD_EXTENDED_DATA_PG; 5511 break; 5512 } 5513 counter++; 5514 } 5515 5516 } else { 5517 rval = -1; 5518 5519 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5520 "sd_check_vpd_page_support: This drive does not implement " 5521 "VPD pages.\n"); 5522 } 5523 5524 kmem_free(page_list, page_length); 5525 5526 return (rval); 5527 } 5528 5529 5530 /* 5531 * Function: sd_setup_pm 5532 * 5533 * Description: Initialize Power Management on the device 5534 * 5535 * Context: Kernel Thread 5536 */ 5537 5538 static void 5539 sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi) 5540 { 5541 uint_t log_page_size; 5542 uchar_t *log_page_data; 5543 int rval = 0; 5544 struct sd_lun *un; 5545 5546 ASSERT(ssc != NULL); 5547 un = ssc->ssc_un; 5548 ASSERT(un != NULL); 5549 5550 /* 5551 * Since we are called from attach, holding a mutex for 5552 * un is unnecessary. Because some of the routines called 5553 * from here require SD_MUTEX to not be held, assert this 5554 * right up front. 5555 */ 5556 ASSERT(!mutex_owned(SD_MUTEX(un))); 5557 /* 5558 * Since the sd device does not have the 'reg' property, 5559 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 5560 * The following code is to tell cpr that this device 5561 * DOES need to be suspended and resumed. 5562 */ 5563 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 5564 "pm-hardware-state", "needs-suspend-resume"); 5565 5566 /* 5567 * This complies with the new power management framework 5568 * for certain desktop machines. Create the pm_components 5569 * property as a string array property. 5570 */ 5571 if (un->un_f_pm_supported) { 5572 /* 5573 * not all devices have a motor, try it first. 5574 * some devices may return ILLEGAL REQUEST, some 5575 * will hang 5576 * The following START_STOP_UNIT is used to check if target 5577 * device has a motor. 5578 */ 5579 un->un_f_start_stop_supported = TRUE; 5580 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_START, 5581 SD_PATH_DIRECT); 5582 5583 if (rval != 0) { 5584 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5585 un->un_f_start_stop_supported = FALSE; 5586 } 5587 5588 /* 5589 * create pm properties anyways otherwise the parent can't 5590 * go to sleep 5591 */ 5592 (void) sd_create_pm_components(devi, un); 5593 un->un_f_pm_is_enabled = TRUE; 5594 return; 5595 } 5596 5597 if (!un->un_f_log_sense_supported) { 5598 un->un_power_level = SD_SPINDLE_ON; 5599 un->un_f_pm_is_enabled = FALSE; 5600 return; 5601 } 5602 5603 rval = sd_log_page_supported(ssc, START_STOP_CYCLE_PAGE); 5604 5605 #ifdef SDDEBUG 5606 if (sd_force_pm_supported) { 5607 /* Force a successful result */ 5608 rval = 1; 5609 } 5610 #endif 5611 5612 /* 5613 * If the start-stop cycle counter log page is not supported 5614 * or if the pm-capable property is SD_PM_CAPABLE_FALSE (0) 5615 * then we should not create the pm_components property. 5616 */ 5617 if (rval == -1) { 5618 /* 5619 * Error. 5620 * Reading log sense failed, most likely this is 5621 * an older drive that does not support log sense. 5622 * If this fails auto-pm is not supported. 5623 */ 5624 un->un_power_level = SD_SPINDLE_ON; 5625 un->un_f_pm_is_enabled = FALSE; 5626 5627 } else if (rval == 0) { 5628 /* 5629 * Page not found. 5630 * The start stop cycle counter is implemented as page 5631 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For 5632 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE). 5633 */ 5634 if (sd_log_page_supported(ssc, START_STOP_CYCLE_VU_PAGE) == 1) { 5635 /* 5636 * Page found, use this one. 5637 */ 5638 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE; 5639 un->un_f_pm_is_enabled = TRUE; 5640 } else { 5641 /* 5642 * Error or page not found. 5643 * auto-pm is not supported for this device. 5644 */ 5645 un->un_power_level = SD_SPINDLE_ON; 5646 un->un_f_pm_is_enabled = FALSE; 5647 } 5648 } else { 5649 /* 5650 * Page found, use it. 5651 */ 5652 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE; 5653 un->un_f_pm_is_enabled = TRUE; 5654 } 5655 5656 5657 if (un->un_f_pm_is_enabled == TRUE) { 5658 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5659 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5660 5661 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 5662 log_page_size, un->un_start_stop_cycle_page, 5663 0x01, 0, SD_PATH_DIRECT); 5664 5665 if (rval != 0) { 5666 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5667 } 5668 5669 #ifdef SDDEBUG 5670 if (sd_force_pm_supported) { 5671 /* Force a successful result */ 5672 rval = 0; 5673 } 5674 #endif 5675 5676 /* 5677 * If the Log sense for Page( Start/stop cycle counter page) 5678 * succeeds, then power management is supported and we can 5679 * enable auto-pm. 5680 */ 5681 if (rval == 0) { 5682 (void) sd_create_pm_components(devi, un); 5683 } else { 5684 un->un_power_level = SD_SPINDLE_ON; 5685 un->un_f_pm_is_enabled = FALSE; 5686 } 5687 5688 kmem_free(log_page_data, log_page_size); 5689 } 5690 } 5691 5692 5693 /* 5694 * Function: sd_create_pm_components 5695 * 5696 * Description: Initialize PM property. 5697 * 5698 * Context: Kernel thread context 5699 */ 5700 5701 static void 5702 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un) 5703 { 5704 char *pm_comp[] = { "NAME=spindle-motor", "0=off", "1=on", NULL }; 5705 5706 ASSERT(!mutex_owned(SD_MUTEX(un))); 5707 5708 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 5709 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) { 5710 /* 5711 * When components are initially created they are idle, 5712 * power up any non-removables. 5713 * Note: the return value of pm_raise_power can't be used 5714 * for determining if PM should be enabled for this device. 5715 * Even if you check the return values and remove this 5716 * property created above, the PM framework will not honor the 5717 * change after the first call to pm_raise_power. Hence, 5718 * removal of that property does not help if pm_raise_power 5719 * fails. In the case of removable media, the start/stop 5720 * will fail if the media is not present. 5721 */ 5722 if (un->un_f_attach_spinup && (pm_raise_power(SD_DEVINFO(un), 0, 5723 SD_SPINDLE_ON) == DDI_SUCCESS)) { 5724 mutex_enter(SD_MUTEX(un)); 5725 un->un_power_level = SD_SPINDLE_ON; 5726 mutex_enter(&un->un_pm_mutex); 5727 /* Set to on and not busy. */ 5728 un->un_pm_count = 0; 5729 } else { 5730 mutex_enter(SD_MUTEX(un)); 5731 un->un_power_level = SD_SPINDLE_OFF; 5732 mutex_enter(&un->un_pm_mutex); 5733 /* Set to off. */ 5734 un->un_pm_count = -1; 5735 } 5736 mutex_exit(&un->un_pm_mutex); 5737 mutex_exit(SD_MUTEX(un)); 5738 } else { 5739 un->un_power_level = SD_SPINDLE_ON; 5740 un->un_f_pm_is_enabled = FALSE; 5741 } 5742 } 5743 5744 5745 /* 5746 * Function: sd_ddi_suspend 5747 * 5748 * Description: Performs system power-down operations. This includes 5749 * setting the drive state to indicate its suspended so 5750 * that no new commands will be accepted. Also, wait for 5751 * all commands that are in transport or queued to a timer 5752 * for retry to complete. All timeout threads are cancelled. 5753 * 5754 * Return Code: DDI_FAILURE or DDI_SUCCESS 5755 * 5756 * Context: Kernel thread context 5757 */ 5758 5759 static int 5760 sd_ddi_suspend(dev_info_t *devi) 5761 { 5762 struct sd_lun *un; 5763 clock_t wait_cmds_complete; 5764 5765 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5766 if (un == NULL) { 5767 return (DDI_FAILURE); 5768 } 5769 5770 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n"); 5771 5772 mutex_enter(SD_MUTEX(un)); 5773 5774 /* Return success if the device is already suspended. */ 5775 if (un->un_state == SD_STATE_SUSPENDED) { 5776 mutex_exit(SD_MUTEX(un)); 5777 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5778 "device already suspended, exiting\n"); 5779 return (DDI_SUCCESS); 5780 } 5781 5782 /* Return failure if the device is being used by HA */ 5783 if (un->un_resvd_status & 5784 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) { 5785 mutex_exit(SD_MUTEX(un)); 5786 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5787 "device in use by HA, exiting\n"); 5788 return (DDI_FAILURE); 5789 } 5790 5791 /* 5792 * Return failure if the device is in a resource wait 5793 * or power changing state. 5794 */ 5795 if ((un->un_state == SD_STATE_RWAIT) || 5796 (un->un_state == SD_STATE_PM_CHANGING)) { 5797 mutex_exit(SD_MUTEX(un)); 5798 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5799 "device in resource wait state, exiting\n"); 5800 return (DDI_FAILURE); 5801 } 5802 5803 5804 un->un_save_state = un->un_last_state; 5805 New_state(un, SD_STATE_SUSPENDED); 5806 5807 /* 5808 * Wait for all commands that are in transport or queued to a timer 5809 * for retry to complete. 5810 * 5811 * While waiting, no new commands will be accepted or sent because of 5812 * the new state we set above. 5813 * 5814 * Wait till current operation has completed. If we are in the resource 5815 * wait state (with an intr outstanding) then we need to wait till the 5816 * intr completes and starts the next cmd. We want to wait for 5817 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND. 5818 */ 5819 wait_cmds_complete = ddi_get_lbolt() + 5820 (sd_wait_cmds_complete * drv_usectohz(1000000)); 5821 5822 while (un->un_ncmds_in_transport != 0) { 5823 /* 5824 * Fail if commands do not finish in the specified time. 5825 */ 5826 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un), 5827 wait_cmds_complete) == -1) { 5828 /* 5829 * Undo the state changes made above. Everything 5830 * must go back to it's original value. 5831 */ 5832 Restore_state(un); 5833 un->un_last_state = un->un_save_state; 5834 /* Wake up any threads that might be waiting. */ 5835 cv_broadcast(&un->un_suspend_cv); 5836 mutex_exit(SD_MUTEX(un)); 5837 SD_ERROR(SD_LOG_IO_PM, un, 5838 "sd_ddi_suspend: failed due to outstanding cmds\n"); 5839 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n"); 5840 return (DDI_FAILURE); 5841 } 5842 } 5843 5844 /* 5845 * Cancel SCSI watch thread and timeouts, if any are active 5846 */ 5847 5848 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) { 5849 opaque_t temp_token = un->un_swr_token; 5850 mutex_exit(SD_MUTEX(un)); 5851 scsi_watch_suspend(temp_token); 5852 mutex_enter(SD_MUTEX(un)); 5853 } 5854 5855 if (un->un_reset_throttle_timeid != NULL) { 5856 timeout_id_t temp_id = un->un_reset_throttle_timeid; 5857 un->un_reset_throttle_timeid = NULL; 5858 mutex_exit(SD_MUTEX(un)); 5859 (void) untimeout(temp_id); 5860 mutex_enter(SD_MUTEX(un)); 5861 } 5862 5863 if (un->un_dcvb_timeid != NULL) { 5864 timeout_id_t temp_id = un->un_dcvb_timeid; 5865 un->un_dcvb_timeid = NULL; 5866 mutex_exit(SD_MUTEX(un)); 5867 (void) untimeout(temp_id); 5868 mutex_enter(SD_MUTEX(un)); 5869 } 5870 5871 mutex_enter(&un->un_pm_mutex); 5872 if (un->un_pm_timeid != NULL) { 5873 timeout_id_t temp_id = un->un_pm_timeid; 5874 un->un_pm_timeid = NULL; 5875 mutex_exit(&un->un_pm_mutex); 5876 mutex_exit(SD_MUTEX(un)); 5877 (void) untimeout(temp_id); 5878 mutex_enter(SD_MUTEX(un)); 5879 } else { 5880 mutex_exit(&un->un_pm_mutex); 5881 } 5882 5883 if (un->un_retry_timeid != NULL) { 5884 timeout_id_t temp_id = un->un_retry_timeid; 5885 un->un_retry_timeid = NULL; 5886 mutex_exit(SD_MUTEX(un)); 5887 (void) untimeout(temp_id); 5888 mutex_enter(SD_MUTEX(un)); 5889 5890 if (un->un_retry_bp != NULL) { 5891 un->un_retry_bp->av_forw = un->un_waitq_headp; 5892 un->un_waitq_headp = un->un_retry_bp; 5893 if (un->un_waitq_tailp == NULL) { 5894 un->un_waitq_tailp = un->un_retry_bp; 5895 } 5896 un->un_retry_bp = NULL; 5897 un->un_retry_statp = NULL; 5898 } 5899 } 5900 5901 if (un->un_direct_priority_timeid != NULL) { 5902 timeout_id_t temp_id = un->un_direct_priority_timeid; 5903 un->un_direct_priority_timeid = NULL; 5904 mutex_exit(SD_MUTEX(un)); 5905 (void) untimeout(temp_id); 5906 mutex_enter(SD_MUTEX(un)); 5907 } 5908 5909 if (un->un_f_is_fibre == TRUE) { 5910 /* 5911 * Remove callbacks for insert and remove events 5912 */ 5913 if (un->un_insert_event != NULL) { 5914 mutex_exit(SD_MUTEX(un)); 5915 (void) ddi_remove_event_handler(un->un_insert_cb_id); 5916 mutex_enter(SD_MUTEX(un)); 5917 un->un_insert_event = NULL; 5918 } 5919 5920 if (un->un_remove_event != NULL) { 5921 mutex_exit(SD_MUTEX(un)); 5922 (void) ddi_remove_event_handler(un->un_remove_cb_id); 5923 mutex_enter(SD_MUTEX(un)); 5924 un->un_remove_event = NULL; 5925 } 5926 } 5927 5928 mutex_exit(SD_MUTEX(un)); 5929 5930 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n"); 5931 5932 return (DDI_SUCCESS); 5933 } 5934 5935 5936 /* 5937 * Function: sd_ddi_pm_suspend 5938 * 5939 * Description: Set the drive state to low power. 5940 * Someone else is required to actually change the drive 5941 * power level. 5942 * 5943 * Arguments: un - driver soft state (unit) structure 5944 * 5945 * Return Code: DDI_FAILURE or DDI_SUCCESS 5946 * 5947 * Context: Kernel thread context 5948 */ 5949 5950 static int 5951 sd_ddi_pm_suspend(struct sd_lun *un) 5952 { 5953 ASSERT(un != NULL); 5954 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: entry\n"); 5955 5956 ASSERT(!mutex_owned(SD_MUTEX(un))); 5957 mutex_enter(SD_MUTEX(un)); 5958 5959 /* 5960 * Exit if power management is not enabled for this device, or if 5961 * the device is being used by HA. 5962 */ 5963 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status & 5964 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) { 5965 mutex_exit(SD_MUTEX(un)); 5966 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exiting\n"); 5967 return (DDI_SUCCESS); 5968 } 5969 5970 SD_INFO(SD_LOG_POWER, un, "sd_ddi_pm_suspend: un_ncmds_in_driver=%ld\n", 5971 un->un_ncmds_in_driver); 5972 5973 /* 5974 * See if the device is not busy, ie.: 5975 * - we have no commands in the driver for this device 5976 * - not waiting for resources 5977 */ 5978 if ((un->un_ncmds_in_driver == 0) && 5979 (un->un_state != SD_STATE_RWAIT)) { 5980 /* 5981 * The device is not busy, so it is OK to go to low power state. 5982 * Indicate low power, but rely on someone else to actually 5983 * change it. 5984 */ 5985 mutex_enter(&un->un_pm_mutex); 5986 un->un_pm_count = -1; 5987 mutex_exit(&un->un_pm_mutex); 5988 un->un_power_level = SD_SPINDLE_OFF; 5989 } 5990 5991 mutex_exit(SD_MUTEX(un)); 5992 5993 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exit\n"); 5994 5995 return (DDI_SUCCESS); 5996 } 5997 5998 5999 /* 6000 * Function: sd_ddi_resume 6001 * 6002 * Description: Performs system power-up operations.. 6003 * 6004 * Return Code: DDI_SUCCESS 6005 * DDI_FAILURE 6006 * 6007 * Context: Kernel thread context 6008 */ 6009 6010 static int 6011 sd_ddi_resume(dev_info_t *devi) 6012 { 6013 struct sd_lun *un; 6014 6015 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 6016 if (un == NULL) { 6017 return (DDI_FAILURE); 6018 } 6019 6020 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n"); 6021 6022 mutex_enter(SD_MUTEX(un)); 6023 Restore_state(un); 6024 6025 /* 6026 * Restore the state which was saved to give the 6027 * the right state in un_last_state 6028 */ 6029 un->un_last_state = un->un_save_state; 6030 /* 6031 * Note: throttle comes back at full. 6032 * Also note: this MUST be done before calling pm_raise_power 6033 * otherwise the system can get hung in biowait. The scenario where 6034 * this'll happen is under cpr suspend. Writing of the system 6035 * state goes through sddump, which writes 0 to un_throttle. If 6036 * writing the system state then fails, example if the partition is 6037 * too small, then cpr attempts a resume. If throttle isn't restored 6038 * from the saved value until after calling pm_raise_power then 6039 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs 6040 * in biowait. 6041 */ 6042 un->un_throttle = un->un_saved_throttle; 6043 6044 /* 6045 * The chance of failure is very rare as the only command done in power 6046 * entry point is START command when you transition from 0->1 or 6047 * unknown->1. Put it to SPINDLE ON state irrespective of the state at 6048 * which suspend was done. Ignore the return value as the resume should 6049 * not be failed. In the case of removable media the media need not be 6050 * inserted and hence there is a chance that raise power will fail with 6051 * media not present. 6052 */ 6053 if (un->un_f_attach_spinup) { 6054 mutex_exit(SD_MUTEX(un)); 6055 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 6056 mutex_enter(SD_MUTEX(un)); 6057 } 6058 6059 /* 6060 * Don't broadcast to the suspend cv and therefore possibly 6061 * start I/O until after power has been restored. 6062 */ 6063 cv_broadcast(&un->un_suspend_cv); 6064 cv_broadcast(&un->un_state_cv); 6065 6066 /* restart thread */ 6067 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) { 6068 scsi_watch_resume(un->un_swr_token); 6069 } 6070 6071 #if (defined(__fibre)) 6072 if (un->un_f_is_fibre == TRUE) { 6073 /* 6074 * Add callbacks for insert and remove events 6075 */ 6076 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 6077 sd_init_event_callbacks(un); 6078 } 6079 } 6080 #endif 6081 6082 /* 6083 * Transport any pending commands to the target. 6084 * 6085 * If this is a low-activity device commands in queue will have to wait 6086 * until new commands come in, which may take awhile. Also, we 6087 * specifically don't check un_ncmds_in_transport because we know that 6088 * there really are no commands in progress after the unit was 6089 * suspended and we could have reached the throttle level, been 6090 * suspended, and have no new commands coming in for awhile. Highly 6091 * unlikely, but so is the low-activity disk scenario. 6092 */ 6093 ddi_xbuf_dispatch(un->un_xbuf_attr); 6094 6095 sd_start_cmds(un, NULL); 6096 mutex_exit(SD_MUTEX(un)); 6097 6098 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n"); 6099 6100 return (DDI_SUCCESS); 6101 } 6102 6103 6104 /* 6105 * Function: sd_ddi_pm_resume 6106 * 6107 * Description: Set the drive state to powered on. 6108 * Someone else is required to actually change the drive 6109 * power level. 6110 * 6111 * Arguments: un - driver soft state (unit) structure 6112 * 6113 * Return Code: DDI_SUCCESS 6114 * 6115 * Context: Kernel thread context 6116 */ 6117 6118 static int 6119 sd_ddi_pm_resume(struct sd_lun *un) 6120 { 6121 ASSERT(un != NULL); 6122 6123 ASSERT(!mutex_owned(SD_MUTEX(un))); 6124 mutex_enter(SD_MUTEX(un)); 6125 un->un_power_level = SD_SPINDLE_ON; 6126 6127 ASSERT(!mutex_owned(&un->un_pm_mutex)); 6128 mutex_enter(&un->un_pm_mutex); 6129 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 6130 un->un_pm_count++; 6131 ASSERT(un->un_pm_count == 0); 6132 /* 6133 * Note: no longer do the cv_broadcast on un_suspend_cv. The 6134 * un_suspend_cv is for a system resume, not a power management 6135 * device resume. (4297749) 6136 * cv_broadcast(&un->un_suspend_cv); 6137 */ 6138 } 6139 mutex_exit(&un->un_pm_mutex); 6140 mutex_exit(SD_MUTEX(un)); 6141 6142 return (DDI_SUCCESS); 6143 } 6144 6145 6146 /* 6147 * Function: sd_pm_idletimeout_handler 6148 * 6149 * Description: A timer routine that's active only while a device is busy. 6150 * The purpose is to extend slightly the pm framework's busy 6151 * view of the device to prevent busy/idle thrashing for 6152 * back-to-back commands. Do this by comparing the current time 6153 * to the time at which the last command completed and when the 6154 * difference is greater than sd_pm_idletime, call 6155 * pm_idle_component. In addition to indicating idle to the pm 6156 * framework, update the chain type to again use the internal pm 6157 * layers of the driver. 6158 * 6159 * Arguments: arg - driver soft state (unit) structure 6160 * 6161 * Context: Executes in a timeout(9F) thread context 6162 */ 6163 6164 static void 6165 sd_pm_idletimeout_handler(void *arg) 6166 { 6167 struct sd_lun *un = arg; 6168 6169 time_t now; 6170 6171 mutex_enter(&sd_detach_mutex); 6172 if (un->un_detach_count != 0) { 6173 /* Abort if the instance is detaching */ 6174 mutex_exit(&sd_detach_mutex); 6175 return; 6176 } 6177 mutex_exit(&sd_detach_mutex); 6178 6179 now = ddi_get_time(); 6180 /* 6181 * Grab both mutexes, in the proper order, since we're accessing 6182 * both PM and softstate variables. 6183 */ 6184 mutex_enter(SD_MUTEX(un)); 6185 mutex_enter(&un->un_pm_mutex); 6186 if (((now - un->un_pm_idle_time) > sd_pm_idletime) && 6187 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) { 6188 /* 6189 * Update the chain types. 6190 * This takes affect on the next new command received. 6191 */ 6192 if (un->un_f_non_devbsize_supported) { 6193 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 6194 } else { 6195 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 6196 } 6197 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 6198 6199 SD_TRACE(SD_LOG_IO_PM, un, 6200 "sd_pm_idletimeout_handler: idling device\n"); 6201 (void) pm_idle_component(SD_DEVINFO(un), 0); 6202 un->un_pm_idle_timeid = NULL; 6203 } else { 6204 un->un_pm_idle_timeid = 6205 timeout(sd_pm_idletimeout_handler, un, 6206 (drv_usectohz((clock_t)300000))); /* 300 ms. */ 6207 } 6208 mutex_exit(&un->un_pm_mutex); 6209 mutex_exit(SD_MUTEX(un)); 6210 } 6211 6212 6213 /* 6214 * Function: sd_pm_timeout_handler 6215 * 6216 * Description: Callback to tell framework we are idle. 6217 * 6218 * Context: timeout(9f) thread context. 6219 */ 6220 6221 static void 6222 sd_pm_timeout_handler(void *arg) 6223 { 6224 struct sd_lun *un = arg; 6225 6226 (void) pm_idle_component(SD_DEVINFO(un), 0); 6227 mutex_enter(&un->un_pm_mutex); 6228 un->un_pm_timeid = NULL; 6229 mutex_exit(&un->un_pm_mutex); 6230 } 6231 6232 6233 /* 6234 * Function: sdpower 6235 * 6236 * Description: PM entry point. 6237 * 6238 * Return Code: DDI_SUCCESS 6239 * DDI_FAILURE 6240 * 6241 * Context: Kernel thread context 6242 */ 6243 6244 static int 6245 sdpower(dev_info_t *devi, int component, int level) 6246 { 6247 struct sd_lun *un; 6248 int instance; 6249 int rval = DDI_SUCCESS; 6250 uint_t i, log_page_size, maxcycles, ncycles; 6251 uchar_t *log_page_data; 6252 int log_sense_page; 6253 int medium_present; 6254 time_t intvlp; 6255 dev_t dev; 6256 struct pm_trans_data sd_pm_tran_data; 6257 uchar_t save_state; 6258 int sval; 6259 uchar_t state_before_pm; 6260 int got_semaphore_here; 6261 sd_ssc_t *ssc; 6262 6263 instance = ddi_get_instance(devi); 6264 6265 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 6266 (SD_SPINDLE_OFF > level) || (level > SD_SPINDLE_ON) || 6267 component != 0) { 6268 return (DDI_FAILURE); 6269 } 6270 6271 dev = sd_make_device(SD_DEVINFO(un)); 6272 ssc = sd_ssc_init(un); 6273 6274 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level); 6275 6276 /* 6277 * Must synchronize power down with close. 6278 * Attempt to decrement/acquire the open/close semaphore, 6279 * but do NOT wait on it. If it's not greater than zero, 6280 * ie. it can't be decremented without waiting, then 6281 * someone else, either open or close, already has it 6282 * and the try returns 0. Use that knowledge here to determine 6283 * if it's OK to change the device power level. 6284 * Also, only increment it on exit if it was decremented, ie. gotten, 6285 * here. 6286 */ 6287 got_semaphore_here = sema_tryp(&un->un_semoclose); 6288 6289 mutex_enter(SD_MUTEX(un)); 6290 6291 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n", 6292 un->un_ncmds_in_driver); 6293 6294 /* 6295 * If un_ncmds_in_driver is non-zero it indicates commands are 6296 * already being processed in the driver, or if the semaphore was 6297 * not gotten here it indicates an open or close is being processed. 6298 * At the same time somebody is requesting to go low power which 6299 * can't happen, therefore we need to return failure. 6300 */ 6301 if ((level == SD_SPINDLE_OFF) && 6302 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) { 6303 mutex_exit(SD_MUTEX(un)); 6304 6305 if (got_semaphore_here != 0) { 6306 sema_v(&un->un_semoclose); 6307 } 6308 SD_TRACE(SD_LOG_IO_PM, un, 6309 "sdpower: exit, device has queued cmds.\n"); 6310 6311 goto sdpower_failed; 6312 } 6313 6314 /* 6315 * if it is OFFLINE that means the disk is completely dead 6316 * in our case we have to put the disk in on or off by sending commands 6317 * Of course that will fail anyway so return back here. 6318 * 6319 * Power changes to a device that's OFFLINE or SUSPENDED 6320 * are not allowed. 6321 */ 6322 if ((un->un_state == SD_STATE_OFFLINE) || 6323 (un->un_state == SD_STATE_SUSPENDED)) { 6324 mutex_exit(SD_MUTEX(un)); 6325 6326 if (got_semaphore_here != 0) { 6327 sema_v(&un->un_semoclose); 6328 } 6329 SD_TRACE(SD_LOG_IO_PM, un, 6330 "sdpower: exit, device is off-line.\n"); 6331 6332 goto sdpower_failed; 6333 } 6334 6335 /* 6336 * Change the device's state to indicate it's power level 6337 * is being changed. Do this to prevent a power off in the 6338 * middle of commands, which is especially bad on devices 6339 * that are really powered off instead of just spun down. 6340 */ 6341 state_before_pm = un->un_state; 6342 un->un_state = SD_STATE_PM_CHANGING; 6343 6344 mutex_exit(SD_MUTEX(un)); 6345 6346 /* 6347 * If "pm-capable" property is set to TRUE by HBA drivers, 6348 * bypass the following checking, otherwise, check the log 6349 * sense information for this device 6350 */ 6351 if ((level == SD_SPINDLE_OFF) && un->un_f_log_sense_supported) { 6352 /* 6353 * Get the log sense information to understand whether the 6354 * the powercycle counts have gone beyond the threshhold. 6355 */ 6356 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 6357 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 6358 6359 mutex_enter(SD_MUTEX(un)); 6360 log_sense_page = un->un_start_stop_cycle_page; 6361 mutex_exit(SD_MUTEX(un)); 6362 6363 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 6364 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT); 6365 6366 if (rval != 0) { 6367 if (rval == EIO) 6368 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 6369 else 6370 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6371 } 6372 6373 #ifdef SDDEBUG 6374 if (sd_force_pm_supported) { 6375 /* Force a successful result */ 6376 rval = 0; 6377 } 6378 #endif 6379 if (rval != 0) { 6380 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 6381 "Log Sense Failed\n"); 6382 6383 kmem_free(log_page_data, log_page_size); 6384 /* Cannot support power management on those drives */ 6385 6386 if (got_semaphore_here != 0) { 6387 sema_v(&un->un_semoclose); 6388 } 6389 /* 6390 * On exit put the state back to it's original value 6391 * and broadcast to anyone waiting for the power 6392 * change completion. 6393 */ 6394 mutex_enter(SD_MUTEX(un)); 6395 un->un_state = state_before_pm; 6396 cv_broadcast(&un->un_suspend_cv); 6397 mutex_exit(SD_MUTEX(un)); 6398 SD_TRACE(SD_LOG_IO_PM, un, 6399 "sdpower: exit, Log Sense Failed.\n"); 6400 6401 goto sdpower_failed; 6402 } 6403 6404 /* 6405 * From the page data - Convert the essential information to 6406 * pm_trans_data 6407 */ 6408 maxcycles = 6409 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) | 6410 (log_page_data[0x1E] << 8) | log_page_data[0x1F]; 6411 6412 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles; 6413 6414 ncycles = 6415 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) | 6416 (log_page_data[0x26] << 8) | log_page_data[0x27]; 6417 6418 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles; 6419 6420 for (i = 0; i < DC_SCSI_MFR_LEN; i++) { 6421 sd_pm_tran_data.un.scsi_cycles.svc_date[i] = 6422 log_page_data[8+i]; 6423 } 6424 6425 kmem_free(log_page_data, log_page_size); 6426 6427 /* 6428 * Call pm_trans_check routine to get the Ok from 6429 * the global policy 6430 */ 6431 6432 sd_pm_tran_data.format = DC_SCSI_FORMAT; 6433 sd_pm_tran_data.un.scsi_cycles.flag = 0; 6434 6435 rval = pm_trans_check(&sd_pm_tran_data, &intvlp); 6436 #ifdef SDDEBUG 6437 if (sd_force_pm_supported) { 6438 /* Force a successful result */ 6439 rval = 1; 6440 } 6441 #endif 6442 switch (rval) { 6443 case 0: 6444 /* 6445 * Not Ok to Power cycle or error in parameters passed 6446 * Would have given the advised time to consider power 6447 * cycle. Based on the new intvlp parameter we are 6448 * supposed to pretend we are busy so that pm framework 6449 * will never call our power entry point. Because of 6450 * that install a timeout handler and wait for the 6451 * recommended time to elapse so that power management 6452 * can be effective again. 6453 * 6454 * To effect this behavior, call pm_busy_component to 6455 * indicate to the framework this device is busy. 6456 * By not adjusting un_pm_count the rest of PM in 6457 * the driver will function normally, and independent 6458 * of this but because the framework is told the device 6459 * is busy it won't attempt powering down until it gets 6460 * a matching idle. The timeout handler sends this. 6461 * Note: sd_pm_entry can't be called here to do this 6462 * because sdpower may have been called as a result 6463 * of a call to pm_raise_power from within sd_pm_entry. 6464 * 6465 * If a timeout handler is already active then 6466 * don't install another. 6467 */ 6468 mutex_enter(&un->un_pm_mutex); 6469 if (un->un_pm_timeid == NULL) { 6470 un->un_pm_timeid = 6471 timeout(sd_pm_timeout_handler, 6472 un, intvlp * drv_usectohz(1000000)); 6473 mutex_exit(&un->un_pm_mutex); 6474 (void) pm_busy_component(SD_DEVINFO(un), 0); 6475 } else { 6476 mutex_exit(&un->un_pm_mutex); 6477 } 6478 if (got_semaphore_here != 0) { 6479 sema_v(&un->un_semoclose); 6480 } 6481 /* 6482 * On exit put the state back to it's original value 6483 * and broadcast to anyone waiting for the power 6484 * change completion. 6485 */ 6486 mutex_enter(SD_MUTEX(un)); 6487 un->un_state = state_before_pm; 6488 cv_broadcast(&un->un_suspend_cv); 6489 mutex_exit(SD_MUTEX(un)); 6490 6491 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, " 6492 "trans check Failed, not ok to power cycle.\n"); 6493 6494 goto sdpower_failed; 6495 case -1: 6496 if (got_semaphore_here != 0) { 6497 sema_v(&un->un_semoclose); 6498 } 6499 /* 6500 * On exit put the state back to it's original value 6501 * and broadcast to anyone waiting for the power 6502 * change completion. 6503 */ 6504 mutex_enter(SD_MUTEX(un)); 6505 un->un_state = state_before_pm; 6506 cv_broadcast(&un->un_suspend_cv); 6507 mutex_exit(SD_MUTEX(un)); 6508 SD_TRACE(SD_LOG_IO_PM, un, 6509 "sdpower: exit, trans check command Failed.\n"); 6510 6511 goto sdpower_failed; 6512 } 6513 } 6514 6515 if (level == SD_SPINDLE_OFF) { 6516 /* 6517 * Save the last state... if the STOP FAILS we need it 6518 * for restoring 6519 */ 6520 mutex_enter(SD_MUTEX(un)); 6521 save_state = un->un_last_state; 6522 /* 6523 * There must not be any cmds. getting processed 6524 * in the driver when we get here. Power to the 6525 * device is potentially going off. 6526 */ 6527 ASSERT(un->un_ncmds_in_driver == 0); 6528 mutex_exit(SD_MUTEX(un)); 6529 6530 /* 6531 * For now suspend the device completely before spindle is 6532 * turned off 6533 */ 6534 if ((rval = sd_ddi_pm_suspend(un)) == DDI_FAILURE) { 6535 if (got_semaphore_here != 0) { 6536 sema_v(&un->un_semoclose); 6537 } 6538 /* 6539 * On exit put the state back to it's original value 6540 * and broadcast to anyone waiting for the power 6541 * change completion. 6542 */ 6543 mutex_enter(SD_MUTEX(un)); 6544 un->un_state = state_before_pm; 6545 cv_broadcast(&un->un_suspend_cv); 6546 mutex_exit(SD_MUTEX(un)); 6547 SD_TRACE(SD_LOG_IO_PM, un, 6548 "sdpower: exit, PM suspend Failed.\n"); 6549 6550 goto sdpower_failed; 6551 } 6552 } 6553 6554 /* 6555 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open, 6556 * close, or strategy. Dump no long uses this routine, it uses it's 6557 * own code so it can be done in polled mode. 6558 */ 6559 6560 medium_present = TRUE; 6561 6562 /* 6563 * When powering up, issue a TUR in case the device is at unit 6564 * attention. Don't do retries. Bypass the PM layer, otherwise 6565 * a deadlock on un_pm_busy_cv will occur. 6566 */ 6567 if (level == SD_SPINDLE_ON) { 6568 sval = sd_send_scsi_TEST_UNIT_READY(ssc, 6569 SD_DONT_RETRY_TUR | SD_BYPASS_PM); 6570 if (sval != 0) 6571 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6572 } 6573 6574 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n", 6575 ((level == SD_SPINDLE_ON) ? "START" : "STOP")); 6576 6577 sval = sd_send_scsi_START_STOP_UNIT(ssc, 6578 ((level == SD_SPINDLE_ON) ? SD_TARGET_START : SD_TARGET_STOP), 6579 SD_PATH_DIRECT); 6580 if (sval != 0) { 6581 if (sval == EIO) 6582 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 6583 else 6584 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6585 } 6586 6587 /* Command failed, check for media present. */ 6588 if ((sval == ENXIO) && un->un_f_has_removable_media) { 6589 medium_present = FALSE; 6590 } 6591 6592 /* 6593 * The conditions of interest here are: 6594 * if a spindle off with media present fails, 6595 * then restore the state and return an error. 6596 * else if a spindle on fails, 6597 * then return an error (there's no state to restore). 6598 * In all other cases we setup for the new state 6599 * and return success. 6600 */ 6601 switch (level) { 6602 case SD_SPINDLE_OFF: 6603 if ((medium_present == TRUE) && (sval != 0)) { 6604 /* The stop command from above failed */ 6605 rval = DDI_FAILURE; 6606 /* 6607 * The stop command failed, and we have media 6608 * present. Put the level back by calling the 6609 * sd_pm_resume() and set the state back to 6610 * it's previous value. 6611 */ 6612 (void) sd_ddi_pm_resume(un); 6613 mutex_enter(SD_MUTEX(un)); 6614 un->un_last_state = save_state; 6615 mutex_exit(SD_MUTEX(un)); 6616 break; 6617 } 6618 /* 6619 * The stop command from above succeeded. 6620 */ 6621 if (un->un_f_monitor_media_state) { 6622 /* 6623 * Terminate watch thread in case of removable media 6624 * devices going into low power state. This is as per 6625 * the requirements of pm framework, otherwise commands 6626 * will be generated for the device (through watch 6627 * thread), even when the device is in low power state. 6628 */ 6629 mutex_enter(SD_MUTEX(un)); 6630 un->un_f_watcht_stopped = FALSE; 6631 if (un->un_swr_token != NULL) { 6632 opaque_t temp_token = un->un_swr_token; 6633 un->un_f_watcht_stopped = TRUE; 6634 un->un_swr_token = NULL; 6635 mutex_exit(SD_MUTEX(un)); 6636 (void) scsi_watch_request_terminate(temp_token, 6637 SCSI_WATCH_TERMINATE_ALL_WAIT); 6638 } else { 6639 mutex_exit(SD_MUTEX(un)); 6640 } 6641 } 6642 break; 6643 6644 default: /* The level requested is spindle on... */ 6645 /* 6646 * Legacy behavior: return success on a failed spinup 6647 * if there is no media in the drive. 6648 * Do this by looking at medium_present here. 6649 */ 6650 if ((sval != 0) && medium_present) { 6651 /* The start command from above failed */ 6652 rval = DDI_FAILURE; 6653 break; 6654 } 6655 /* 6656 * The start command from above succeeded 6657 * Resume the devices now that we have 6658 * started the disks 6659 */ 6660 (void) sd_ddi_pm_resume(un); 6661 6662 /* 6663 * Resume the watch thread since it was suspended 6664 * when the device went into low power mode. 6665 */ 6666 if (un->un_f_monitor_media_state) { 6667 mutex_enter(SD_MUTEX(un)); 6668 if (un->un_f_watcht_stopped == TRUE) { 6669 opaque_t temp_token; 6670 6671 un->un_f_watcht_stopped = FALSE; 6672 mutex_exit(SD_MUTEX(un)); 6673 temp_token = scsi_watch_request_submit( 6674 SD_SCSI_DEVP(un), 6675 sd_check_media_time, 6676 SENSE_LENGTH, sd_media_watch_cb, 6677 (caddr_t)dev); 6678 mutex_enter(SD_MUTEX(un)); 6679 un->un_swr_token = temp_token; 6680 } 6681 mutex_exit(SD_MUTEX(un)); 6682 } 6683 } 6684 if (got_semaphore_here != 0) { 6685 sema_v(&un->un_semoclose); 6686 } 6687 /* 6688 * On exit put the state back to it's original value 6689 * and broadcast to anyone waiting for the power 6690 * change completion. 6691 */ 6692 mutex_enter(SD_MUTEX(un)); 6693 un->un_state = state_before_pm; 6694 cv_broadcast(&un->un_suspend_cv); 6695 mutex_exit(SD_MUTEX(un)); 6696 6697 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval); 6698 6699 sd_ssc_fini(ssc); 6700 return (rval); 6701 6702 sdpower_failed: 6703 6704 sd_ssc_fini(ssc); 6705 return (DDI_FAILURE); 6706 } 6707 6708 6709 6710 /* 6711 * Function: sdattach 6712 * 6713 * Description: Driver's attach(9e) entry point function. 6714 * 6715 * Arguments: devi - opaque device info handle 6716 * cmd - attach type 6717 * 6718 * Return Code: DDI_SUCCESS 6719 * DDI_FAILURE 6720 * 6721 * Context: Kernel thread context 6722 */ 6723 6724 static int 6725 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 6726 { 6727 switch (cmd) { 6728 case DDI_ATTACH: 6729 return (sd_unit_attach(devi)); 6730 case DDI_RESUME: 6731 return (sd_ddi_resume(devi)); 6732 default: 6733 break; 6734 } 6735 return (DDI_FAILURE); 6736 } 6737 6738 6739 /* 6740 * Function: sddetach 6741 * 6742 * Description: Driver's detach(9E) entry point function. 6743 * 6744 * Arguments: devi - opaque device info handle 6745 * cmd - detach type 6746 * 6747 * Return Code: DDI_SUCCESS 6748 * DDI_FAILURE 6749 * 6750 * Context: Kernel thread context 6751 */ 6752 6753 static int 6754 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 6755 { 6756 switch (cmd) { 6757 case DDI_DETACH: 6758 return (sd_unit_detach(devi)); 6759 case DDI_SUSPEND: 6760 return (sd_ddi_suspend(devi)); 6761 default: 6762 break; 6763 } 6764 return (DDI_FAILURE); 6765 } 6766 6767 6768 /* 6769 * Function: sd_sync_with_callback 6770 * 6771 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft 6772 * state while the callback routine is active. 6773 * 6774 * Arguments: un: softstate structure for the instance 6775 * 6776 * Context: Kernel thread context 6777 */ 6778 6779 static void 6780 sd_sync_with_callback(struct sd_lun *un) 6781 { 6782 ASSERT(un != NULL); 6783 6784 mutex_enter(SD_MUTEX(un)); 6785 6786 ASSERT(un->un_in_callback >= 0); 6787 6788 while (un->un_in_callback > 0) { 6789 mutex_exit(SD_MUTEX(un)); 6790 delay(2); 6791 mutex_enter(SD_MUTEX(un)); 6792 } 6793 6794 mutex_exit(SD_MUTEX(un)); 6795 } 6796 6797 /* 6798 * Function: sd_unit_attach 6799 * 6800 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates 6801 * the soft state structure for the device and performs 6802 * all necessary structure and device initializations. 6803 * 6804 * Arguments: devi: the system's dev_info_t for the device. 6805 * 6806 * Return Code: DDI_SUCCESS if attach is successful. 6807 * DDI_FAILURE if any part of the attach fails. 6808 * 6809 * Context: Called at attach(9e) time for the DDI_ATTACH flag. 6810 * Kernel thread context only. Can sleep. 6811 */ 6812 6813 static int 6814 sd_unit_attach(dev_info_t *devi) 6815 { 6816 struct scsi_device *devp; 6817 struct sd_lun *un; 6818 char *variantp; 6819 int reservation_flag = SD_TARGET_IS_UNRESERVED; 6820 int instance; 6821 int rval; 6822 int wc_enabled; 6823 int tgt; 6824 uint64_t capacity; 6825 uint_t lbasize = 0; 6826 dev_info_t *pdip = ddi_get_parent(devi); 6827 int offbyone = 0; 6828 int geom_label_valid = 0; 6829 sd_ssc_t *ssc; 6830 int status; 6831 struct sd_fm_internal *sfip = NULL; 6832 #if defined(__sparc) 6833 int max_xfer_size; 6834 #endif 6835 6836 /* 6837 * Retrieve the target driver's private data area. This was set 6838 * up by the HBA. 6839 */ 6840 devp = ddi_get_driver_private(devi); 6841 6842 /* 6843 * Retrieve the target ID of the device. 6844 */ 6845 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6846 SCSI_ADDR_PROP_TARGET, -1); 6847 6848 /* 6849 * Since we have no idea what state things were left in by the last 6850 * user of the device, set up some 'default' settings, ie. turn 'em 6851 * off. The scsi_ifsetcap calls force re-negotiations with the drive. 6852 * Do this before the scsi_probe, which sends an inquiry. 6853 * This is a fix for bug (4430280). 6854 * Of special importance is wide-xfer. The drive could have been left 6855 * in wide transfer mode by the last driver to communicate with it, 6856 * this includes us. If that's the case, and if the following is not 6857 * setup properly or we don't re-negotiate with the drive prior to 6858 * transferring data to/from the drive, it causes bus parity errors, 6859 * data overruns, and unexpected interrupts. This first occurred when 6860 * the fix for bug (4378686) was made. 6861 */ 6862 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1); 6863 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1); 6864 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1); 6865 6866 /* 6867 * Currently, scsi_ifsetcap sets tagged-qing capability for all LUNs 6868 * on a target. Setting it per lun instance actually sets the 6869 * capability of this target, which affects those luns already 6870 * attached on the same target. So during attach, we can only disable 6871 * this capability only when no other lun has been attached on this 6872 * target. By doing this, we assume a target has the same tagged-qing 6873 * capability for every lun. The condition can be removed when HBA 6874 * is changed to support per lun based tagged-qing capability. 6875 */ 6876 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 6877 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1); 6878 } 6879 6880 /* 6881 * Use scsi_probe() to issue an INQUIRY command to the device. 6882 * This call will allocate and fill in the scsi_inquiry structure 6883 * and point the sd_inq member of the scsi_device structure to it. 6884 * If the attach succeeds, then this memory will not be de-allocated 6885 * (via scsi_unprobe()) until the instance is detached. 6886 */ 6887 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 6888 goto probe_failed; 6889 } 6890 6891 /* 6892 * Check the device type as specified in the inquiry data and 6893 * claim it if it is of a type that we support. 6894 */ 6895 switch (devp->sd_inq->inq_dtype) { 6896 case DTYPE_DIRECT: 6897 break; 6898 case DTYPE_RODIRECT: 6899 break; 6900 case DTYPE_OPTICAL: 6901 break; 6902 case DTYPE_NOTPRESENT: 6903 default: 6904 /* Unsupported device type; fail the attach. */ 6905 goto probe_failed; 6906 } 6907 6908 /* 6909 * Allocate the soft state structure for this unit. 6910 * 6911 * We rely upon this memory being set to all zeroes by 6912 * ddi_soft_state_zalloc(). We assume that any member of the 6913 * soft state structure that is not explicitly initialized by 6914 * this routine will have a value of zero. 6915 */ 6916 instance = ddi_get_instance(devp->sd_dev); 6917 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) { 6918 goto probe_failed; 6919 } 6920 6921 /* 6922 * Retrieve a pointer to the newly-allocated soft state. 6923 * 6924 * This should NEVER fail if the ddi_soft_state_zalloc() call above 6925 * was successful, unless something has gone horribly wrong and the 6926 * ddi's soft state internals are corrupt (in which case it is 6927 * probably better to halt here than just fail the attach....) 6928 */ 6929 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 6930 panic("sd_unit_attach: NULL soft state on instance:0x%x", 6931 instance); 6932 /*NOTREACHED*/ 6933 } 6934 6935 /* 6936 * Link the back ptr of the driver soft state to the scsi_device 6937 * struct for this lun. 6938 * Save a pointer to the softstate in the driver-private area of 6939 * the scsi_device struct. 6940 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until 6941 * we first set un->un_sd below. 6942 */ 6943 un->un_sd = devp; 6944 devp->sd_private = (opaque_t)un; 6945 6946 /* 6947 * The following must be after devp is stored in the soft state struct. 6948 */ 6949 #ifdef SDDEBUG 6950 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6951 "%s_unit_attach: un:0x%p instance:%d\n", 6952 ddi_driver_name(devi), un, instance); 6953 #endif 6954 6955 /* 6956 * Set up the device type and node type (for the minor nodes). 6957 * By default we assume that the device can at least support the 6958 * Common Command Set. Call it a CD-ROM if it reports itself 6959 * as a RODIRECT device. 6960 */ 6961 switch (devp->sd_inq->inq_dtype) { 6962 case DTYPE_RODIRECT: 6963 un->un_node_type = DDI_NT_CD_CHAN; 6964 un->un_ctype = CTYPE_CDROM; 6965 break; 6966 case DTYPE_OPTICAL: 6967 un->un_node_type = DDI_NT_BLOCK_CHAN; 6968 un->un_ctype = CTYPE_ROD; 6969 break; 6970 default: 6971 un->un_node_type = DDI_NT_BLOCK_CHAN; 6972 un->un_ctype = CTYPE_CCS; 6973 break; 6974 } 6975 6976 /* 6977 * Try to read the interconnect type from the HBA. 6978 * 6979 * Note: This driver is currently compiled as two binaries, a parallel 6980 * scsi version (sd) and a fibre channel version (ssd). All functional 6981 * differences are determined at compile time. In the future a single 6982 * binary will be provided and the interconnect type will be used to 6983 * differentiate between fibre and parallel scsi behaviors. At that time 6984 * it will be necessary for all fibre channel HBAs to support this 6985 * property. 6986 * 6987 * set un_f_is_fiber to TRUE ( default fiber ) 6988 */ 6989 un->un_f_is_fibre = TRUE; 6990 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { 6991 case INTERCONNECT_SSA: 6992 un->un_interconnect_type = SD_INTERCONNECT_SSA; 6993 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6994 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un); 6995 break; 6996 case INTERCONNECT_PARALLEL: 6997 un->un_f_is_fibre = FALSE; 6998 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 6999 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7000 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); 7001 break; 7002 case INTERCONNECT_SATA: 7003 un->un_f_is_fibre = FALSE; 7004 un->un_interconnect_type = SD_INTERCONNECT_SATA; 7005 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7006 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un); 7007 break; 7008 case INTERCONNECT_FIBRE: 7009 un->un_interconnect_type = SD_INTERCONNECT_FIBRE; 7010 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7011 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); 7012 break; 7013 case INTERCONNECT_FABRIC: 7014 un->un_interconnect_type = SD_INTERCONNECT_FABRIC; 7015 un->un_node_type = DDI_NT_BLOCK_FABRIC; 7016 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7017 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); 7018 break; 7019 default: 7020 #ifdef SD_DEFAULT_INTERCONNECT_TYPE 7021 /* 7022 * The HBA does not support the "interconnect-type" property 7023 * (or did not provide a recognized type). 7024 * 7025 * Note: This will be obsoleted when a single fibre channel 7026 * and parallel scsi driver is delivered. In the meantime the 7027 * interconnect type will be set to the platform default.If that 7028 * type is not parallel SCSI, it means that we should be 7029 * assuming "ssd" semantics. However, here this also means that 7030 * the FC HBA is not supporting the "interconnect-type" property 7031 * like we expect it to, so log this occurrence. 7032 */ 7033 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE; 7034 if (!SD_IS_PARALLEL_SCSI(un)) { 7035 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7036 "sd_unit_attach: un:0x%p Assuming " 7037 "INTERCONNECT_FIBRE\n", un); 7038 } else { 7039 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7040 "sd_unit_attach: un:0x%p Assuming " 7041 "INTERCONNECT_PARALLEL\n", un); 7042 un->un_f_is_fibre = FALSE; 7043 } 7044 #else 7045 /* 7046 * Note: This source will be implemented when a single fibre 7047 * channel and parallel scsi driver is delivered. The default 7048 * will be to assume that if a device does not support the 7049 * "interconnect-type" property it is a parallel SCSI HBA and 7050 * we will set the interconnect type for parallel scsi. 7051 */ 7052 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7053 un->un_f_is_fibre = FALSE; 7054 #endif 7055 break; 7056 } 7057 7058 if (un->un_f_is_fibre == TRUE) { 7059 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) == 7060 SCSI_VERSION_3) { 7061 switch (un->un_interconnect_type) { 7062 case SD_INTERCONNECT_FIBRE: 7063 case SD_INTERCONNECT_SSA: 7064 un->un_node_type = DDI_NT_BLOCK_WWN; 7065 break; 7066 default: 7067 break; 7068 } 7069 } 7070 } 7071 7072 /* 7073 * Initialize the Request Sense command for the target 7074 */ 7075 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { 7076 goto alloc_rqs_failed; 7077 } 7078 7079 /* 7080 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc 7081 * with separate binary for sd and ssd. 7082 * 7083 * x86 has 1 binary, un_retry_count is set base on connection type. 7084 * The hardcoded values will go away when Sparc uses 1 binary 7085 * for sd and ssd. This hardcoded values need to match 7086 * SD_RETRY_COUNT in sddef.h 7087 * The value used is base on interconnect type. 7088 * fibre = 3, parallel = 5 7089 */ 7090 #if defined(__i386) || defined(__amd64) 7091 un->un_retry_count = un->un_f_is_fibre ? 3 : 5; 7092 #else 7093 un->un_retry_count = SD_RETRY_COUNT; 7094 #endif 7095 7096 /* 7097 * Set the per disk retry count to the default number of retries 7098 * for disks and CDROMs. This value can be overridden by the 7099 * disk property list or an entry in sd.conf. 7100 */ 7101 un->un_notready_retry_count = 7102 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un) 7103 : DISK_NOT_READY_RETRY_COUNT(un); 7104 7105 /* 7106 * Set the busy retry count to the default value of un_retry_count. 7107 * This can be overridden by entries in sd.conf or the device 7108 * config table. 7109 */ 7110 un->un_busy_retry_count = un->un_retry_count; 7111 7112 /* 7113 * Init the reset threshold for retries. This number determines 7114 * how many retries must be performed before a reset can be issued 7115 * (for certain error conditions). This can be overridden by entries 7116 * in sd.conf or the device config table. 7117 */ 7118 un->un_reset_retry_count = (un->un_retry_count / 2); 7119 7120 /* 7121 * Set the victim_retry_count to the default un_retry_count 7122 */ 7123 un->un_victim_retry_count = (2 * un->un_retry_count); 7124 7125 /* 7126 * Set the reservation release timeout to the default value of 7127 * 5 seconds. This can be overridden by entries in ssd.conf or the 7128 * device config table. 7129 */ 7130 un->un_reserve_release_time = 5; 7131 7132 /* 7133 * Set up the default maximum transfer size. Note that this may 7134 * get updated later in the attach, when setting up default wide 7135 * operations for disks. 7136 */ 7137 #if defined(__i386) || defined(__amd64) 7138 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; 7139 un->un_partial_dma_supported = 1; 7140 #else 7141 un->un_max_xfer_size = (uint_t)maxphys; 7142 #endif 7143 7144 /* 7145 * Get "allow bus device reset" property (defaults to "enabled" if 7146 * the property was not defined). This is to disable bus resets for 7147 * certain kinds of error recovery. Note: In the future when a run-time 7148 * fibre check is available the soft state flag should default to 7149 * enabled. 7150 */ 7151 if (un->un_f_is_fibre == TRUE) { 7152 un->un_f_allow_bus_device_reset = TRUE; 7153 } else { 7154 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7155 "allow-bus-device-reset", 1) != 0) { 7156 un->un_f_allow_bus_device_reset = TRUE; 7157 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7158 "sd_unit_attach: un:0x%p Bus device reset " 7159 "enabled\n", un); 7160 } else { 7161 un->un_f_allow_bus_device_reset = FALSE; 7162 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7163 "sd_unit_attach: un:0x%p Bus device reset " 7164 "disabled\n", un); 7165 } 7166 } 7167 7168 /* 7169 * Check if this is an ATAPI device. ATAPI devices use Group 1 7170 * Read/Write commands and Group 2 Mode Sense/Select commands. 7171 * 7172 * Note: The "obsolete" way of doing this is to check for the "atapi" 7173 * property. The new "variant" property with a value of "atapi" has been 7174 * introduced so that future 'variants' of standard SCSI behavior (like 7175 * atapi) could be specified by the underlying HBA drivers by supplying 7176 * a new value for the "variant" property, instead of having to define a 7177 * new property. 7178 */ 7179 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { 7180 un->un_f_cfg_is_atapi = TRUE; 7181 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7182 "sd_unit_attach: un:0x%p Atapi device\n", un); 7183 } 7184 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", 7185 &variantp) == DDI_PROP_SUCCESS) { 7186 if (strcmp(variantp, "atapi") == 0) { 7187 un->un_f_cfg_is_atapi = TRUE; 7188 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7189 "sd_unit_attach: un:0x%p Atapi device\n", un); 7190 } 7191 ddi_prop_free(variantp); 7192 } 7193 7194 un->un_cmd_timeout = SD_IO_TIME; 7195 7196 un->un_busy_timeout = SD_BSY_TIMEOUT; 7197 7198 /* Info on current states, statuses, etc. (Updated frequently) */ 7199 un->un_state = SD_STATE_NORMAL; 7200 un->un_last_state = SD_STATE_NORMAL; 7201 7202 /* Control & status info for command throttling */ 7203 un->un_throttle = sd_max_throttle; 7204 un->un_saved_throttle = sd_max_throttle; 7205 un->un_min_throttle = sd_min_throttle; 7206 7207 if (un->un_f_is_fibre == TRUE) { 7208 un->un_f_use_adaptive_throttle = TRUE; 7209 } else { 7210 un->un_f_use_adaptive_throttle = FALSE; 7211 } 7212 7213 /* Removable media support. */ 7214 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 7215 un->un_mediastate = DKIO_NONE; 7216 un->un_specified_mediastate = DKIO_NONE; 7217 7218 /* CVs for suspend/resume (PM or DR) */ 7219 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 7220 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 7221 7222 /* Power management support. */ 7223 un->un_power_level = SD_SPINDLE_UNINIT; 7224 7225 cv_init(&un->un_wcc_cv, NULL, CV_DRIVER, NULL); 7226 un->un_f_wcc_inprog = 0; 7227 7228 /* 7229 * The open/close semaphore is used to serialize threads executing 7230 * in the driver's open & close entry point routines for a given 7231 * instance. 7232 */ 7233 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); 7234 7235 /* 7236 * The conf file entry and softstate variable is a forceful override, 7237 * meaning a non-zero value must be entered to change the default. 7238 */ 7239 un->un_f_disksort_disabled = FALSE; 7240 7241 /* 7242 * Retrieve the properties from the static driver table or the driver 7243 * configuration file (.conf) for this unit and update the soft state 7244 * for the device as needed for the indicated properties. 7245 * Note: the property configuration needs to occur here as some of the 7246 * following routines may have dependencies on soft state flags set 7247 * as part of the driver property configuration. 7248 */ 7249 sd_read_unit_properties(un); 7250 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7251 "sd_unit_attach: un:0x%p property configuration complete.\n", un); 7252 7253 /* 7254 * Only if a device has "hotpluggable" property, it is 7255 * treated as hotpluggable device. Otherwise, it is 7256 * regarded as non-hotpluggable one. 7257 */ 7258 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "hotpluggable", 7259 -1) != -1) { 7260 un->un_f_is_hotpluggable = TRUE; 7261 } 7262 7263 /* 7264 * set unit's attributes(flags) according to "hotpluggable" and 7265 * RMB bit in INQUIRY data. 7266 */ 7267 sd_set_unit_attributes(un, devi); 7268 7269 /* 7270 * By default, we mark the capacity, lbasize, and geometry 7271 * as invalid. Only if we successfully read a valid capacity 7272 * will we update the un_blockcount and un_tgt_blocksize with the 7273 * valid values (the geometry will be validated later). 7274 */ 7275 un->un_f_blockcount_is_valid = FALSE; 7276 un->un_f_tgt_blocksize_is_valid = FALSE; 7277 7278 /* 7279 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine 7280 * otherwise. 7281 */ 7282 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE; 7283 un->un_blockcount = 0; 7284 7285 /* 7286 * Set up the per-instance info needed to determine the correct 7287 * CDBs and other info for issuing commands to the target. 7288 */ 7289 sd_init_cdb_limits(un); 7290 7291 /* 7292 * Set up the IO chains to use, based upon the target type. 7293 */ 7294 if (un->un_f_non_devbsize_supported) { 7295 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 7296 } else { 7297 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 7298 } 7299 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 7300 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD; 7301 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD; 7302 7303 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf), 7304 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit, 7305 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER); 7306 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi); 7307 7308 7309 if (ISCD(un)) { 7310 un->un_additional_codes = sd_additional_codes; 7311 } else { 7312 un->un_additional_codes = NULL; 7313 } 7314 7315 /* 7316 * Create the kstats here so they can be available for attach-time 7317 * routines that send commands to the unit (either polled or via 7318 * sd_send_scsi_cmd). 7319 * 7320 * Note: This is a critical sequence that needs to be maintained: 7321 * 1) Instantiate the kstats here, before any routines using the 7322 * iopath (i.e. sd_send_scsi_cmd). 7323 * 2) Instantiate and initialize the partition stats 7324 * (sd_set_pstats). 7325 * 3) Initialize the error stats (sd_set_errstats), following 7326 * sd_validate_geometry(),sd_register_devid(), 7327 * and sd_cache_control(). 7328 */ 7329 7330 un->un_stats = kstat_create(sd_label, instance, 7331 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 7332 if (un->un_stats != NULL) { 7333 un->un_stats->ks_lock = SD_MUTEX(un); 7334 kstat_install(un->un_stats); 7335 } 7336 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7337 "sd_unit_attach: un:0x%p un_stats created\n", un); 7338 7339 sd_create_errstats(un, instance); 7340 if (un->un_errstats == NULL) { 7341 goto create_errstats_failed; 7342 } 7343 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7344 "sd_unit_attach: un:0x%p errstats created\n", un); 7345 7346 /* 7347 * The following if/else code was relocated here from below as part 7348 * of the fix for bug (4430280). However with the default setup added 7349 * on entry to this routine, it's no longer absolutely necessary for 7350 * this to be before the call to sd_spin_up_unit. 7351 */ 7352 if (SD_IS_PARALLEL_SCSI(un) || SD_IS_SERIAL(un)) { 7353 int tq_trigger_flag = (((devp->sd_inq->inq_ansi == 4) || 7354 (devp->sd_inq->inq_ansi == 5)) && 7355 devp->sd_inq->inq_bque) || devp->sd_inq->inq_cmdque; 7356 7357 /* 7358 * If tagged queueing is supported by the target 7359 * and by the host adapter then we will enable it 7360 */ 7361 un->un_tagflags = 0; 7362 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && tq_trigger_flag && 7363 (un->un_f_arq_enabled == TRUE)) { 7364 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 7365 1, 1) == 1) { 7366 un->un_tagflags = FLAG_STAG; 7367 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7368 "sd_unit_attach: un:0x%p tag queueing " 7369 "enabled\n", un); 7370 } else if (scsi_ifgetcap(SD_ADDRESS(un), 7371 "untagged-qing", 0) == 1) { 7372 un->un_f_opt_queueing = TRUE; 7373 un->un_saved_throttle = un->un_throttle = 7374 min(un->un_throttle, 3); 7375 } else { 7376 un->un_f_opt_queueing = FALSE; 7377 un->un_saved_throttle = un->un_throttle = 1; 7378 } 7379 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) 7380 == 1) && (un->un_f_arq_enabled == TRUE)) { 7381 /* The Host Adapter supports internal queueing. */ 7382 un->un_f_opt_queueing = TRUE; 7383 un->un_saved_throttle = un->un_throttle = 7384 min(un->un_throttle, 3); 7385 } else { 7386 un->un_f_opt_queueing = FALSE; 7387 un->un_saved_throttle = un->un_throttle = 1; 7388 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7389 "sd_unit_attach: un:0x%p no tag queueing\n", un); 7390 } 7391 7392 /* 7393 * Enable large transfers for SATA/SAS drives 7394 */ 7395 if (SD_IS_SERIAL(un)) { 7396 un->un_max_xfer_size = 7397 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7398 sd_max_xfer_size, SD_MAX_XFER_SIZE); 7399 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7400 "sd_unit_attach: un:0x%p max transfer " 7401 "size=0x%x\n", un, un->un_max_xfer_size); 7402 7403 } 7404 7405 /* Setup or tear down default wide operations for disks */ 7406 7407 /* 7408 * Note: Legacy: it may be possible for both "sd_max_xfer_size" 7409 * and "ssd_max_xfer_size" to exist simultaneously on the same 7410 * system and be set to different values. In the future this 7411 * code may need to be updated when the ssd module is 7412 * obsoleted and removed from the system. (4299588) 7413 */ 7414 if (SD_IS_PARALLEL_SCSI(un) && 7415 (devp->sd_inq->inq_rdf == RDF_SCSI2) && 7416 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { 7417 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7418 1, 1) == 1) { 7419 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7420 "sd_unit_attach: un:0x%p Wide Transfer " 7421 "enabled\n", un); 7422 } 7423 7424 /* 7425 * If tagged queuing has also been enabled, then 7426 * enable large xfers 7427 */ 7428 if (un->un_saved_throttle == sd_max_throttle) { 7429 un->un_max_xfer_size = 7430 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7431 sd_max_xfer_size, SD_MAX_XFER_SIZE); 7432 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7433 "sd_unit_attach: un:0x%p max transfer " 7434 "size=0x%x\n", un, un->un_max_xfer_size); 7435 } 7436 } else { 7437 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7438 0, 1) == 1) { 7439 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7440 "sd_unit_attach: un:0x%p " 7441 "Wide Transfer disabled\n", un); 7442 } 7443 } 7444 } else { 7445 un->un_tagflags = FLAG_STAG; 7446 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, 7447 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE); 7448 } 7449 7450 /* 7451 * If this target supports LUN reset, try to enable it. 7452 */ 7453 if (un->un_f_lun_reset_enabled) { 7454 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { 7455 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7456 "un:0x%p lun_reset capability set\n", un); 7457 } else { 7458 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7459 "un:0x%p lun-reset capability not set\n", un); 7460 } 7461 } 7462 7463 /* 7464 * Adjust the maximum transfer size. This is to fix 7465 * the problem of partial DMA support on SPARC. Some 7466 * HBA driver, like aac, has very small dma_attr_maxxfer 7467 * size, which requires partial DMA support on SPARC. 7468 * In the future the SPARC pci nexus driver may solve 7469 * the problem instead of this fix. 7470 */ 7471 #if defined(__sparc) 7472 max_xfer_size = scsi_ifgetcap(SD_ADDRESS(un), "dma-max", 1); 7473 if ((max_xfer_size > 0) && (max_xfer_size < un->un_max_xfer_size)) { 7474 un->un_max_xfer_size = max_xfer_size; 7475 un->un_partial_dma_supported = 1; 7476 } 7477 #endif 7478 7479 /* 7480 * Set PKT_DMA_PARTIAL flag. 7481 */ 7482 if (un->un_partial_dma_supported == 1) { 7483 un->un_pkt_flags = PKT_DMA_PARTIAL; 7484 } else { 7485 un->un_pkt_flags = 0; 7486 } 7487 7488 /* Initialize sd_ssc_t for internal uscsi commands */ 7489 ssc = sd_ssc_init(un); 7490 scsi_fm_init(devp); 7491 7492 /* 7493 * Allocate memory for SCSI FMA stuffs. 7494 */ 7495 un->un_fm_private = 7496 kmem_zalloc(sizeof (struct sd_fm_internal), KM_SLEEP); 7497 sfip = (struct sd_fm_internal *)un->un_fm_private; 7498 sfip->fm_ssc.ssc_uscsi_cmd = &sfip->fm_ucmd; 7499 sfip->fm_ssc.ssc_uscsi_info = &sfip->fm_uinfo; 7500 sfip->fm_ssc.ssc_un = un; 7501 7502 /* 7503 * At this point in the attach, we have enough info in the 7504 * soft state to be able to issue commands to the target. 7505 * 7506 * All command paths used below MUST issue their commands as 7507 * SD_PATH_DIRECT. This is important as intermediate layers 7508 * are not all initialized yet (such as PM). 7509 */ 7510 7511 /* 7512 * Send a TEST UNIT READY command to the device. This should clear 7513 * any outstanding UNIT ATTENTION that may be present. 7514 * 7515 * Note: Don't check for success, just track if there is a reservation, 7516 * this is a throw away command to clear any unit attentions. 7517 * 7518 * Note: This MUST be the first command issued to the target during 7519 * attach to ensure power on UNIT ATTENTIONS are cleared. 7520 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated 7521 * with attempts at spinning up a device with no media. 7522 */ 7523 status = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR); 7524 if (status != 0) { 7525 if (status == EACCES) 7526 reservation_flag = SD_TARGET_IS_RESERVED; 7527 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7528 } 7529 7530 /* 7531 * If the device is NOT a removable media device, attempt to spin 7532 * it up (using the START_STOP_UNIT command) and read its capacity 7533 * (using the READ CAPACITY command). Note, however, that either 7534 * of these could fail and in some cases we would continue with 7535 * the attach despite the failure (see below). 7536 */ 7537 if (un->un_f_descr_format_supported) { 7538 7539 switch (sd_spin_up_unit(ssc)) { 7540 case 0: 7541 /* 7542 * Spin-up was successful; now try to read the 7543 * capacity. If successful then save the results 7544 * and mark the capacity & lbasize as valid. 7545 */ 7546 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7547 "sd_unit_attach: un:0x%p spin-up successful\n", un); 7548 7549 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity, 7550 &lbasize, SD_PATH_DIRECT); 7551 7552 switch (status) { 7553 case 0: { 7554 if (capacity > DK_MAX_BLOCKS) { 7555 #ifdef _LP64 7556 if ((capacity + 1) > 7557 SD_GROUP1_MAX_ADDRESS) { 7558 /* 7559 * Enable descriptor format 7560 * sense data so that we can 7561 * get 64 bit sense data 7562 * fields. 7563 */ 7564 sd_enable_descr_sense(ssc); 7565 } 7566 #else 7567 /* 32-bit kernels can't handle this */ 7568 scsi_log(SD_DEVINFO(un), 7569 sd_label, CE_WARN, 7570 "disk has %llu blocks, which " 7571 "is too large for a 32-bit " 7572 "kernel", capacity); 7573 7574 #if defined(__i386) || defined(__amd64) 7575 /* 7576 * 1TB disk was treated as (1T - 512)B 7577 * in the past, so that it might have 7578 * valid VTOC and solaris partitions, 7579 * we have to allow it to continue to 7580 * work. 7581 */ 7582 if (capacity -1 > DK_MAX_BLOCKS) 7583 #endif 7584 goto spinup_failed; 7585 #endif 7586 } 7587 7588 /* 7589 * Here it's not necessary to check the case: 7590 * the capacity of the device is bigger than 7591 * what the max hba cdb can support. Because 7592 * sd_send_scsi_READ_CAPACITY will retrieve 7593 * the capacity by sending USCSI command, which 7594 * is constrained by the max hba cdb. Actually, 7595 * sd_send_scsi_READ_CAPACITY will return 7596 * EINVAL when using bigger cdb than required 7597 * cdb length. Will handle this case in 7598 * "case EINVAL". 7599 */ 7600 7601 /* 7602 * The following relies on 7603 * sd_send_scsi_READ_CAPACITY never 7604 * returning 0 for capacity and/or lbasize. 7605 */ 7606 sd_update_block_info(un, lbasize, capacity); 7607 7608 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7609 "sd_unit_attach: un:0x%p capacity = %ld " 7610 "blocks; lbasize= %ld.\n", un, 7611 un->un_blockcount, un->un_tgt_blocksize); 7612 7613 break; 7614 } 7615 case EINVAL: 7616 /* 7617 * In the case where the max-cdb-length property 7618 * is smaller than the required CDB length for 7619 * a SCSI device, a target driver can fail to 7620 * attach to that device. 7621 */ 7622 scsi_log(SD_DEVINFO(un), 7623 sd_label, CE_WARN, 7624 "disk capacity is too large " 7625 "for current cdb length"); 7626 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7627 7628 goto spinup_failed; 7629 case EACCES: 7630 /* 7631 * Should never get here if the spin-up 7632 * succeeded, but code it in anyway. 7633 * From here, just continue with the attach... 7634 */ 7635 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7636 "sd_unit_attach: un:0x%p " 7637 "sd_send_scsi_READ_CAPACITY " 7638 "returned reservation conflict\n", un); 7639 reservation_flag = SD_TARGET_IS_RESERVED; 7640 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7641 break; 7642 default: 7643 /* 7644 * Likewise, should never get here if the 7645 * spin-up succeeded. Just continue with 7646 * the attach... 7647 */ 7648 if (status == EIO) 7649 sd_ssc_assessment(ssc, 7650 SD_FMT_STATUS_CHECK); 7651 else 7652 sd_ssc_assessment(ssc, 7653 SD_FMT_IGNORE); 7654 break; 7655 } 7656 break; 7657 case EACCES: 7658 /* 7659 * Device is reserved by another host. In this case 7660 * we could not spin it up or read the capacity, but 7661 * we continue with the attach anyway. 7662 */ 7663 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7664 "sd_unit_attach: un:0x%p spin-up reservation " 7665 "conflict.\n", un); 7666 reservation_flag = SD_TARGET_IS_RESERVED; 7667 break; 7668 default: 7669 /* Fail the attach if the spin-up failed. */ 7670 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7671 "sd_unit_attach: un:0x%p spin-up failed.", un); 7672 goto spinup_failed; 7673 } 7674 7675 } 7676 7677 /* 7678 * Check to see if this is a MMC drive 7679 */ 7680 if (ISCD(un)) { 7681 sd_set_mmc_caps(ssc); 7682 } 7683 7684 7685 /* 7686 * Add a zero-length attribute to tell the world we support 7687 * kernel ioctls (for layered drivers) 7688 */ 7689 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7690 DDI_KERNEL_IOCTL, NULL, 0); 7691 7692 /* 7693 * Add a boolean property to tell the world we support 7694 * the B_FAILFAST flag (for layered drivers) 7695 */ 7696 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7697 "ddi-failfast-supported", NULL, 0); 7698 7699 /* 7700 * Initialize power management 7701 */ 7702 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL); 7703 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL); 7704 sd_setup_pm(ssc, devi); 7705 if (un->un_f_pm_is_enabled == FALSE) { 7706 /* 7707 * For performance, point to a jump table that does 7708 * not include pm. 7709 * The direct and priority chains don't change with PM. 7710 * 7711 * Note: this is currently done based on individual device 7712 * capabilities. When an interface for determining system 7713 * power enabled state becomes available, or when additional 7714 * layers are added to the command chain, these values will 7715 * have to be re-evaluated for correctness. 7716 */ 7717 if (un->un_f_non_devbsize_supported) { 7718 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM; 7719 } else { 7720 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM; 7721 } 7722 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 7723 } 7724 7725 /* 7726 * This property is set to 0 by HA software to avoid retries 7727 * on a reserved disk. (The preferred property name is 7728 * "retry-on-reservation-conflict") (1189689) 7729 * 7730 * Note: The use of a global here can have unintended consequences. A 7731 * per instance variable is preferable to match the capabilities of 7732 * different underlying hba's (4402600) 7733 */ 7734 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, 7735 DDI_PROP_DONTPASS, "retry-on-reservation-conflict", 7736 sd_retry_on_reservation_conflict); 7737 if (sd_retry_on_reservation_conflict != 0) { 7738 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, 7739 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name, 7740 sd_retry_on_reservation_conflict); 7741 } 7742 7743 /* Set up options for QFULL handling. */ 7744 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7745 "qfull-retries", -1)) != -1) { 7746 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries", 7747 rval, 1); 7748 } 7749 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7750 "qfull-retry-interval", -1)) != -1) { 7751 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval", 7752 rval, 1); 7753 } 7754 7755 /* 7756 * This just prints a message that announces the existence of the 7757 * device. The message is always printed in the system logfile, but 7758 * only appears on the console if the system is booted with the 7759 * -v (verbose) argument. 7760 */ 7761 ddi_report_dev(devi); 7762 7763 un->un_mediastate = DKIO_NONE; 7764 7765 cmlb_alloc_handle(&un->un_cmlbhandle); 7766 7767 #if defined(__i386) || defined(__amd64) 7768 /* 7769 * On x86, compensate for off-by-1 legacy error 7770 */ 7771 if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable && 7772 (lbasize == un->un_sys_blocksize)) 7773 offbyone = CMLB_OFF_BY_ONE; 7774 #endif 7775 7776 if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype, 7777 un->un_f_has_removable_media, un->un_f_is_hotpluggable, 7778 un->un_node_type, offbyone, un->un_cmlbhandle, 7779 (void *)SD_PATH_DIRECT) != 0) { 7780 goto cmlb_attach_failed; 7781 } 7782 7783 7784 /* 7785 * Read and validate the device's geometry (ie, disk label) 7786 * A new unformatted drive will not have a valid geometry, but 7787 * the driver needs to successfully attach to this device so 7788 * the drive can be formatted via ioctls. 7789 */ 7790 geom_label_valid = (cmlb_validate(un->un_cmlbhandle, 0, 7791 (void *)SD_PATH_DIRECT) == 0) ? 1: 0; 7792 7793 mutex_enter(SD_MUTEX(un)); 7794 7795 /* 7796 * Read and initialize the devid for the unit. 7797 */ 7798 if (un->un_f_devid_supported) { 7799 sd_register_devid(ssc, devi, reservation_flag); 7800 } 7801 mutex_exit(SD_MUTEX(un)); 7802 7803 #if (defined(__fibre)) 7804 /* 7805 * Register callbacks for fibre only. You can't do this solely 7806 * on the basis of the devid_type because this is hba specific. 7807 * We need to query our hba capabilities to find out whether to 7808 * register or not. 7809 */ 7810 if (un->un_f_is_fibre) { 7811 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 7812 sd_init_event_callbacks(un); 7813 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7814 "sd_unit_attach: un:0x%p event callbacks inserted", 7815 un); 7816 } 7817 } 7818 #endif 7819 7820 if (un->un_f_opt_disable_cache == TRUE) { 7821 /* 7822 * Disable both read cache and write cache. This is 7823 * the historic behavior of the keywords in the config file. 7824 */ 7825 if (sd_cache_control(ssc, SD_CACHE_DISABLE, SD_CACHE_DISABLE) != 7826 0) { 7827 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7828 "sd_unit_attach: un:0x%p Could not disable " 7829 "caching", un); 7830 goto devid_failed; 7831 } 7832 } 7833 7834 /* 7835 * Check the value of the WCE bit now and 7836 * set un_f_write_cache_enabled accordingly. 7837 */ 7838 (void) sd_get_write_cache_enabled(ssc, &wc_enabled); 7839 mutex_enter(SD_MUTEX(un)); 7840 un->un_f_write_cache_enabled = (wc_enabled != 0); 7841 mutex_exit(SD_MUTEX(un)); 7842 7843 /* 7844 * Check the value of the NV_SUP bit and set 7845 * un_f_suppress_cache_flush accordingly. 7846 */ 7847 sd_get_nv_sup(ssc); 7848 7849 /* 7850 * Find out what type of reservation this disk supports. 7851 */ 7852 status = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS, 0, NULL); 7853 7854 switch (status) { 7855 case 0: 7856 /* 7857 * SCSI-3 reservations are supported. 7858 */ 7859 un->un_reservation_type = SD_SCSI3_RESERVATION; 7860 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7861 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un); 7862 break; 7863 case ENOTSUP: 7864 /* 7865 * The PERSISTENT RESERVE IN command would not be recognized by 7866 * a SCSI-2 device, so assume the reservation type is SCSI-2. 7867 */ 7868 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7869 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un); 7870 un->un_reservation_type = SD_SCSI2_RESERVATION; 7871 7872 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7873 break; 7874 default: 7875 /* 7876 * default to SCSI-3 reservations 7877 */ 7878 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7879 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un); 7880 un->un_reservation_type = SD_SCSI3_RESERVATION; 7881 7882 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7883 break; 7884 } 7885 7886 /* 7887 * Set the pstat and error stat values here, so data obtained during the 7888 * previous attach-time routines is available. 7889 * 7890 * Note: This is a critical sequence that needs to be maintained: 7891 * 1) Instantiate the kstats before any routines using the iopath 7892 * (i.e. sd_send_scsi_cmd). 7893 * 2) Initialize the error stats (sd_set_errstats) and partition 7894 * stats (sd_set_pstats)here, following 7895 * cmlb_validate_geometry(), sd_register_devid(), and 7896 * sd_cache_control(). 7897 */ 7898 7899 if (un->un_f_pkstats_enabled && geom_label_valid) { 7900 sd_set_pstats(un); 7901 SD_TRACE(SD_LOG_IO_PARTITION, un, 7902 "sd_unit_attach: un:0x%p pstats created and set\n", un); 7903 } 7904 7905 sd_set_errstats(un); 7906 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7907 "sd_unit_attach: un:0x%p errstats set\n", un); 7908 7909 7910 /* 7911 * After successfully attaching an instance, we record the information 7912 * of how many luns have been attached on the relative target and 7913 * controller for parallel SCSI. This information is used when sd tries 7914 * to set the tagged queuing capability in HBA. 7915 */ 7916 if (SD_IS_PARALLEL_SCSI(un) && (tgt >= 0) && (tgt < NTARGETS_WIDE)) { 7917 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_ATTACH); 7918 } 7919 7920 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7921 "sd_unit_attach: un:0x%p exit success\n", un); 7922 7923 /* Uninitialize sd_ssc_t pointer */ 7924 sd_ssc_fini(ssc); 7925 7926 return (DDI_SUCCESS); 7927 7928 /* 7929 * An error occurred during the attach; clean up & return failure. 7930 */ 7931 7932 devid_failed: 7933 7934 setup_pm_failed: 7935 ddi_remove_minor_node(devi, NULL); 7936 7937 cmlb_attach_failed: 7938 /* 7939 * Cleanup from the scsi_ifsetcap() calls (437868) 7940 */ 7941 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 7942 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 7943 7944 /* 7945 * Refer to the comments of setting tagged-qing in the beginning of 7946 * sd_unit_attach. We can only disable tagged queuing when there is 7947 * no lun attached on the target. 7948 */ 7949 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 7950 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 7951 } 7952 7953 if (un->un_f_is_fibre == FALSE) { 7954 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 7955 } 7956 7957 spinup_failed: 7958 7959 /* Uninitialize sd_ssc_t pointer */ 7960 sd_ssc_fini(ssc); 7961 7962 mutex_enter(SD_MUTEX(un)); 7963 7964 /* Deallocate SCSI FMA memory spaces */ 7965 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal)); 7966 7967 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ 7968 if (un->un_direct_priority_timeid != NULL) { 7969 timeout_id_t temp_id = un->un_direct_priority_timeid; 7970 un->un_direct_priority_timeid = NULL; 7971 mutex_exit(SD_MUTEX(un)); 7972 (void) untimeout(temp_id); 7973 mutex_enter(SD_MUTEX(un)); 7974 } 7975 7976 /* Cancel any pending start/stop timeouts */ 7977 if (un->un_startstop_timeid != NULL) { 7978 timeout_id_t temp_id = un->un_startstop_timeid; 7979 un->un_startstop_timeid = NULL; 7980 mutex_exit(SD_MUTEX(un)); 7981 (void) untimeout(temp_id); 7982 mutex_enter(SD_MUTEX(un)); 7983 } 7984 7985 /* Cancel any pending reset-throttle timeouts */ 7986 if (un->un_reset_throttle_timeid != NULL) { 7987 timeout_id_t temp_id = un->un_reset_throttle_timeid; 7988 un->un_reset_throttle_timeid = NULL; 7989 mutex_exit(SD_MUTEX(un)); 7990 (void) untimeout(temp_id); 7991 mutex_enter(SD_MUTEX(un)); 7992 } 7993 7994 /* Cancel any pending retry timeouts */ 7995 if (un->un_retry_timeid != NULL) { 7996 timeout_id_t temp_id = un->un_retry_timeid; 7997 un->un_retry_timeid = NULL; 7998 mutex_exit(SD_MUTEX(un)); 7999 (void) untimeout(temp_id); 8000 mutex_enter(SD_MUTEX(un)); 8001 } 8002 8003 /* Cancel any pending delayed cv broadcast timeouts */ 8004 if (un->un_dcvb_timeid != NULL) { 8005 timeout_id_t temp_id = un->un_dcvb_timeid; 8006 un->un_dcvb_timeid = NULL; 8007 mutex_exit(SD_MUTEX(un)); 8008 (void) untimeout(temp_id); 8009 mutex_enter(SD_MUTEX(un)); 8010 } 8011 8012 mutex_exit(SD_MUTEX(un)); 8013 8014 /* There should not be any in-progress I/O so ASSERT this check */ 8015 ASSERT(un->un_ncmds_in_transport == 0); 8016 ASSERT(un->un_ncmds_in_driver == 0); 8017 8018 /* Do not free the softstate if the callback routine is active */ 8019 sd_sync_with_callback(un); 8020 8021 /* 8022 * Partition stats apparently are not used with removables. These would 8023 * not have been created during attach, so no need to clean them up... 8024 */ 8025 if (un->un_errstats != NULL) { 8026 kstat_delete(un->un_errstats); 8027 un->un_errstats = NULL; 8028 } 8029 8030 create_errstats_failed: 8031 8032 if (un->un_stats != NULL) { 8033 kstat_delete(un->un_stats); 8034 un->un_stats = NULL; 8035 } 8036 8037 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8038 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8039 8040 ddi_prop_remove_all(devi); 8041 sema_destroy(&un->un_semoclose); 8042 cv_destroy(&un->un_state_cv); 8043 8044 getrbuf_failed: 8045 8046 sd_free_rqs(un); 8047 8048 alloc_rqs_failed: 8049 8050 devp->sd_private = NULL; 8051 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ 8052 8053 get_softstate_failed: 8054 /* 8055 * Note: the man pages are unclear as to whether or not doing a 8056 * ddi_soft_state_free(sd_state, instance) is the right way to 8057 * clean up after the ddi_soft_state_zalloc() if the subsequent 8058 * ddi_get_soft_state() fails. The implication seems to be 8059 * that the get_soft_state cannot fail if the zalloc succeeds. 8060 */ 8061 ddi_soft_state_free(sd_state, instance); 8062 8063 probe_failed: 8064 scsi_unprobe(devp); 8065 8066 return (DDI_FAILURE); 8067 } 8068 8069 8070 /* 8071 * Function: sd_unit_detach 8072 * 8073 * Description: Performs DDI_DETACH processing for sddetach(). 8074 * 8075 * Return Code: DDI_SUCCESS 8076 * DDI_FAILURE 8077 * 8078 * Context: Kernel thread context 8079 */ 8080 8081 static int 8082 sd_unit_detach(dev_info_t *devi) 8083 { 8084 struct scsi_device *devp; 8085 struct sd_lun *un; 8086 int i; 8087 int tgt; 8088 dev_t dev; 8089 dev_info_t *pdip = ddi_get_parent(devi); 8090 int instance = ddi_get_instance(devi); 8091 8092 mutex_enter(&sd_detach_mutex); 8093 8094 /* 8095 * Fail the detach for any of the following: 8096 * - Unable to get the sd_lun struct for the instance 8097 * - A layered driver has an outstanding open on the instance 8098 * - Another thread is already detaching this instance 8099 * - Another thread is currently performing an open 8100 */ 8101 devp = ddi_get_driver_private(devi); 8102 if ((devp == NULL) || 8103 ((un = (struct sd_lun *)devp->sd_private) == NULL) || 8104 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) || 8105 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) { 8106 mutex_exit(&sd_detach_mutex); 8107 return (DDI_FAILURE); 8108 } 8109 8110 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un); 8111 8112 /* 8113 * Mark this instance as currently in a detach, to inhibit any 8114 * opens from a layered driver. 8115 */ 8116 un->un_detach_count++; 8117 mutex_exit(&sd_detach_mutex); 8118 8119 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 8120 SCSI_ADDR_PROP_TARGET, -1); 8121 8122 dev = sd_make_device(SD_DEVINFO(un)); 8123 8124 #ifndef lint 8125 _NOTE(COMPETING_THREADS_NOW); 8126 #endif 8127 8128 mutex_enter(SD_MUTEX(un)); 8129 8130 /* 8131 * Fail the detach if there are any outstanding layered 8132 * opens on this device. 8133 */ 8134 for (i = 0; i < NDKMAP; i++) { 8135 if (un->un_ocmap.lyropen[i] != 0) { 8136 goto err_notclosed; 8137 } 8138 } 8139 8140 /* 8141 * Verify there are NO outstanding commands issued to this device. 8142 * ie, un_ncmds_in_transport == 0. 8143 * It's possible to have outstanding commands through the physio 8144 * code path, even though everything's closed. 8145 */ 8146 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) || 8147 (un->un_direct_priority_timeid != NULL) || 8148 (un->un_state == SD_STATE_RWAIT)) { 8149 mutex_exit(SD_MUTEX(un)); 8150 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8151 "sd_dr_detach: Detach failure due to outstanding cmds\n"); 8152 goto err_stillbusy; 8153 } 8154 8155 /* 8156 * If we have the device reserved, release the reservation. 8157 */ 8158 if ((un->un_resvd_status & SD_RESERVE) && 8159 !(un->un_resvd_status & SD_LOST_RESERVE)) { 8160 mutex_exit(SD_MUTEX(un)); 8161 /* 8162 * Note: sd_reserve_release sends a command to the device 8163 * via the sd_ioctlcmd() path, and can sleep. 8164 */ 8165 if (sd_reserve_release(dev, SD_RELEASE) != 0) { 8166 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8167 "sd_dr_detach: Cannot release reservation \n"); 8168 } 8169 } else { 8170 mutex_exit(SD_MUTEX(un)); 8171 } 8172 8173 /* 8174 * Untimeout any reserve recover, throttle reset, restart unit 8175 * and delayed broadcast timeout threads. Protect the timeout pointer 8176 * from getting nulled by their callback functions. 8177 */ 8178 mutex_enter(SD_MUTEX(un)); 8179 if (un->un_resvd_timeid != NULL) { 8180 timeout_id_t temp_id = un->un_resvd_timeid; 8181 un->un_resvd_timeid = NULL; 8182 mutex_exit(SD_MUTEX(un)); 8183 (void) untimeout(temp_id); 8184 mutex_enter(SD_MUTEX(un)); 8185 } 8186 8187 if (un->un_reset_throttle_timeid != NULL) { 8188 timeout_id_t temp_id = un->un_reset_throttle_timeid; 8189 un->un_reset_throttle_timeid = NULL; 8190 mutex_exit(SD_MUTEX(un)); 8191 (void) untimeout(temp_id); 8192 mutex_enter(SD_MUTEX(un)); 8193 } 8194 8195 if (un->un_startstop_timeid != NULL) { 8196 timeout_id_t temp_id = un->un_startstop_timeid; 8197 un->un_startstop_timeid = NULL; 8198 mutex_exit(SD_MUTEX(un)); 8199 (void) untimeout(temp_id); 8200 mutex_enter(SD_MUTEX(un)); 8201 } 8202 8203 if (un->un_dcvb_timeid != NULL) { 8204 timeout_id_t temp_id = un->un_dcvb_timeid; 8205 un->un_dcvb_timeid = NULL; 8206 mutex_exit(SD_MUTEX(un)); 8207 (void) untimeout(temp_id); 8208 } else { 8209 mutex_exit(SD_MUTEX(un)); 8210 } 8211 8212 /* Remove any pending reservation reclaim requests for this device */ 8213 sd_rmv_resv_reclaim_req(dev); 8214 8215 mutex_enter(SD_MUTEX(un)); 8216 8217 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */ 8218 if (un->un_direct_priority_timeid != NULL) { 8219 timeout_id_t temp_id = un->un_direct_priority_timeid; 8220 un->un_direct_priority_timeid = NULL; 8221 mutex_exit(SD_MUTEX(un)); 8222 (void) untimeout(temp_id); 8223 mutex_enter(SD_MUTEX(un)); 8224 } 8225 8226 /* Cancel any active multi-host disk watch thread requests */ 8227 if (un->un_mhd_token != NULL) { 8228 mutex_exit(SD_MUTEX(un)); 8229 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token)); 8230 if (scsi_watch_request_terminate(un->un_mhd_token, 8231 SCSI_WATCH_TERMINATE_NOWAIT)) { 8232 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8233 "sd_dr_detach: Cannot cancel mhd watch request\n"); 8234 /* 8235 * Note: We are returning here after having removed 8236 * some driver timeouts above. This is consistent with 8237 * the legacy implementation but perhaps the watch 8238 * terminate call should be made with the wait flag set. 8239 */ 8240 goto err_stillbusy; 8241 } 8242 mutex_enter(SD_MUTEX(un)); 8243 un->un_mhd_token = NULL; 8244 } 8245 8246 if (un->un_swr_token != NULL) { 8247 mutex_exit(SD_MUTEX(un)); 8248 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token)); 8249 if (scsi_watch_request_terminate(un->un_swr_token, 8250 SCSI_WATCH_TERMINATE_NOWAIT)) { 8251 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8252 "sd_dr_detach: Cannot cancel swr watch request\n"); 8253 /* 8254 * Note: We are returning here after having removed 8255 * some driver timeouts above. This is consistent with 8256 * the legacy implementation but perhaps the watch 8257 * terminate call should be made with the wait flag set. 8258 */ 8259 goto err_stillbusy; 8260 } 8261 mutex_enter(SD_MUTEX(un)); 8262 un->un_swr_token = NULL; 8263 } 8264 8265 mutex_exit(SD_MUTEX(un)); 8266 8267 /* 8268 * Clear any scsi_reset_notifies. We clear the reset notifies 8269 * if we have not registered one. 8270 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! 8271 */ 8272 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 8273 sd_mhd_reset_notify_cb, (caddr_t)un); 8274 8275 /* 8276 * protect the timeout pointers from getting nulled by 8277 * their callback functions during the cancellation process. 8278 * In such a scenario untimeout can be invoked with a null value. 8279 */ 8280 _NOTE(NO_COMPETING_THREADS_NOW); 8281 8282 mutex_enter(&un->un_pm_mutex); 8283 if (un->un_pm_idle_timeid != NULL) { 8284 timeout_id_t temp_id = un->un_pm_idle_timeid; 8285 un->un_pm_idle_timeid = NULL; 8286 mutex_exit(&un->un_pm_mutex); 8287 8288 /* 8289 * Timeout is active; cancel it. 8290 * Note that it'll never be active on a device 8291 * that does not support PM therefore we don't 8292 * have to check before calling pm_idle_component. 8293 */ 8294 (void) untimeout(temp_id); 8295 (void) pm_idle_component(SD_DEVINFO(un), 0); 8296 mutex_enter(&un->un_pm_mutex); 8297 } 8298 8299 /* 8300 * Check whether there is already a timeout scheduled for power 8301 * management. If yes then don't lower the power here, that's. 8302 * the timeout handler's job. 8303 */ 8304 if (un->un_pm_timeid != NULL) { 8305 timeout_id_t temp_id = un->un_pm_timeid; 8306 un->un_pm_timeid = NULL; 8307 mutex_exit(&un->un_pm_mutex); 8308 /* 8309 * Timeout is active; cancel it. 8310 * Note that it'll never be active on a device 8311 * that does not support PM therefore we don't 8312 * have to check before calling pm_idle_component. 8313 */ 8314 (void) untimeout(temp_id); 8315 (void) pm_idle_component(SD_DEVINFO(un), 0); 8316 8317 } else { 8318 mutex_exit(&un->un_pm_mutex); 8319 if ((un->un_f_pm_is_enabled == TRUE) && 8320 (pm_lower_power(SD_DEVINFO(un), 0, SD_SPINDLE_OFF) != 8321 DDI_SUCCESS)) { 8322 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8323 "sd_dr_detach: Lower power request failed, ignoring.\n"); 8324 /* 8325 * Fix for bug: 4297749, item # 13 8326 * The above test now includes a check to see if PM is 8327 * supported by this device before call 8328 * pm_lower_power(). 8329 * Note, the following is not dead code. The call to 8330 * pm_lower_power above will generate a call back into 8331 * our sdpower routine which might result in a timeout 8332 * handler getting activated. Therefore the following 8333 * code is valid and necessary. 8334 */ 8335 mutex_enter(&un->un_pm_mutex); 8336 if (un->un_pm_timeid != NULL) { 8337 timeout_id_t temp_id = un->un_pm_timeid; 8338 un->un_pm_timeid = NULL; 8339 mutex_exit(&un->un_pm_mutex); 8340 (void) untimeout(temp_id); 8341 (void) pm_idle_component(SD_DEVINFO(un), 0); 8342 } else { 8343 mutex_exit(&un->un_pm_mutex); 8344 } 8345 } 8346 } 8347 8348 /* 8349 * Cleanup from the scsi_ifsetcap() calls (437868) 8350 * Relocated here from above to be after the call to 8351 * pm_lower_power, which was getting errors. 8352 */ 8353 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 8354 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 8355 8356 /* 8357 * Currently, tagged queuing is supported per target based by HBA. 8358 * Setting this per lun instance actually sets the capability of this 8359 * target in HBA, which affects those luns already attached on the 8360 * same target. So during detach, we can only disable this capability 8361 * only when this is the only lun left on this target. By doing 8362 * this, we assume a target has the same tagged queuing capability 8363 * for every lun. The condition can be removed when HBA is changed to 8364 * support per lun based tagged queuing capability. 8365 */ 8366 if (sd_scsi_get_target_lun_count(pdip, tgt) <= 1) { 8367 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 8368 } 8369 8370 if (un->un_f_is_fibre == FALSE) { 8371 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 8372 } 8373 8374 /* 8375 * Remove any event callbacks, fibre only 8376 */ 8377 if (un->un_f_is_fibre == TRUE) { 8378 if ((un->un_insert_event != NULL) && 8379 (ddi_remove_event_handler(un->un_insert_cb_id) != 8380 DDI_SUCCESS)) { 8381 /* 8382 * Note: We are returning here after having done 8383 * substantial cleanup above. This is consistent 8384 * with the legacy implementation but this may not 8385 * be the right thing to do. 8386 */ 8387 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8388 "sd_dr_detach: Cannot cancel insert event\n"); 8389 goto err_remove_event; 8390 } 8391 un->un_insert_event = NULL; 8392 8393 if ((un->un_remove_event != NULL) && 8394 (ddi_remove_event_handler(un->un_remove_cb_id) != 8395 DDI_SUCCESS)) { 8396 /* 8397 * Note: We are returning here after having done 8398 * substantial cleanup above. This is consistent 8399 * with the legacy implementation but this may not 8400 * be the right thing to do. 8401 */ 8402 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8403 "sd_dr_detach: Cannot cancel remove event\n"); 8404 goto err_remove_event; 8405 } 8406 un->un_remove_event = NULL; 8407 } 8408 8409 /* Do not free the softstate if the callback routine is active */ 8410 sd_sync_with_callback(un); 8411 8412 cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 8413 cmlb_free_handle(&un->un_cmlbhandle); 8414 8415 /* 8416 * Hold the detach mutex here, to make sure that no other threads ever 8417 * can access a (partially) freed soft state structure. 8418 */ 8419 mutex_enter(&sd_detach_mutex); 8420 8421 /* 8422 * Clean up the soft state struct. 8423 * Cleanup is done in reverse order of allocs/inits. 8424 * At this point there should be no competing threads anymore. 8425 */ 8426 8427 scsi_fm_fini(devp); 8428 8429 /* 8430 * Deallocate memory for SCSI FMA. 8431 */ 8432 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal)); 8433 8434 /* Unregister and free device id. */ 8435 ddi_devid_unregister(devi); 8436 if (un->un_devid) { 8437 ddi_devid_free(un->un_devid); 8438 un->un_devid = NULL; 8439 } 8440 8441 /* 8442 * Destroy wmap cache if it exists. 8443 */ 8444 if (un->un_wm_cache != NULL) { 8445 kmem_cache_destroy(un->un_wm_cache); 8446 un->un_wm_cache = NULL; 8447 } 8448 8449 /* 8450 * kstat cleanup is done in detach for all device types (4363169). 8451 * We do not want to fail detach if the device kstats are not deleted 8452 * since there is a confusion about the devo_refcnt for the device. 8453 * We just delete the kstats and let detach complete successfully. 8454 */ 8455 if (un->un_stats != NULL) { 8456 kstat_delete(un->un_stats); 8457 un->un_stats = NULL; 8458 } 8459 if (un->un_errstats != NULL) { 8460 kstat_delete(un->un_errstats); 8461 un->un_errstats = NULL; 8462 } 8463 8464 /* Remove partition stats */ 8465 if (un->un_f_pkstats_enabled) { 8466 for (i = 0; i < NSDMAP; i++) { 8467 if (un->un_pstats[i] != NULL) { 8468 kstat_delete(un->un_pstats[i]); 8469 un->un_pstats[i] = NULL; 8470 } 8471 } 8472 } 8473 8474 /* Remove xbuf registration */ 8475 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8476 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8477 8478 /* Remove driver properties */ 8479 ddi_prop_remove_all(devi); 8480 8481 mutex_destroy(&un->un_pm_mutex); 8482 cv_destroy(&un->un_pm_busy_cv); 8483 8484 cv_destroy(&un->un_wcc_cv); 8485 8486 /* Open/close semaphore */ 8487 sema_destroy(&un->un_semoclose); 8488 8489 /* Removable media condvar. */ 8490 cv_destroy(&un->un_state_cv); 8491 8492 /* Suspend/resume condvar. */ 8493 cv_destroy(&un->un_suspend_cv); 8494 cv_destroy(&un->un_disk_busy_cv); 8495 8496 sd_free_rqs(un); 8497 8498 /* Free up soft state */ 8499 devp->sd_private = NULL; 8500 8501 bzero(un, sizeof (struct sd_lun)); 8502 ddi_soft_state_free(sd_state, instance); 8503 8504 mutex_exit(&sd_detach_mutex); 8505 8506 /* This frees up the INQUIRY data associated with the device. */ 8507 scsi_unprobe(devp); 8508 8509 /* 8510 * After successfully detaching an instance, we update the information 8511 * of how many luns have been attached in the relative target and 8512 * controller for parallel SCSI. This information is used when sd tries 8513 * to set the tagged queuing capability in HBA. 8514 * Since un has been released, we can't use SD_IS_PARALLEL_SCSI(un) to 8515 * check if the device is parallel SCSI. However, we don't need to 8516 * check here because we've already checked during attach. No device 8517 * that is not parallel SCSI is in the chain. 8518 */ 8519 if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) { 8520 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH); 8521 } 8522 8523 return (DDI_SUCCESS); 8524 8525 err_notclosed: 8526 mutex_exit(SD_MUTEX(un)); 8527 8528 err_stillbusy: 8529 _NOTE(NO_COMPETING_THREADS_NOW); 8530 8531 err_remove_event: 8532 mutex_enter(&sd_detach_mutex); 8533 un->un_detach_count--; 8534 mutex_exit(&sd_detach_mutex); 8535 8536 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n"); 8537 return (DDI_FAILURE); 8538 } 8539 8540 8541 /* 8542 * Function: sd_create_errstats 8543 * 8544 * Description: This routine instantiates the device error stats. 8545 * 8546 * Note: During attach the stats are instantiated first so they are 8547 * available for attach-time routines that utilize the driver 8548 * iopath to send commands to the device. The stats are initialized 8549 * separately so data obtained during some attach-time routines is 8550 * available. (4362483) 8551 * 8552 * Arguments: un - driver soft state (unit) structure 8553 * instance - driver instance 8554 * 8555 * Context: Kernel thread context 8556 */ 8557 8558 static void 8559 sd_create_errstats(struct sd_lun *un, int instance) 8560 { 8561 struct sd_errstats *stp; 8562 char kstatmodule_err[KSTAT_STRLEN]; 8563 char kstatname[KSTAT_STRLEN]; 8564 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t)); 8565 8566 ASSERT(un != NULL); 8567 8568 if (un->un_errstats != NULL) { 8569 return; 8570 } 8571 8572 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err), 8573 "%serr", sd_label); 8574 (void) snprintf(kstatname, sizeof (kstatname), 8575 "%s%d,err", sd_label, instance); 8576 8577 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname, 8578 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT); 8579 8580 if (un->un_errstats == NULL) { 8581 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8582 "sd_create_errstats: Failed kstat_create\n"); 8583 return; 8584 } 8585 8586 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8587 kstat_named_init(&stp->sd_softerrs, "Soft Errors", 8588 KSTAT_DATA_UINT32); 8589 kstat_named_init(&stp->sd_harderrs, "Hard Errors", 8590 KSTAT_DATA_UINT32); 8591 kstat_named_init(&stp->sd_transerrs, "Transport Errors", 8592 KSTAT_DATA_UINT32); 8593 kstat_named_init(&stp->sd_vid, "Vendor", 8594 KSTAT_DATA_CHAR); 8595 kstat_named_init(&stp->sd_pid, "Product", 8596 KSTAT_DATA_CHAR); 8597 kstat_named_init(&stp->sd_revision, "Revision", 8598 KSTAT_DATA_CHAR); 8599 kstat_named_init(&stp->sd_serial, "Serial No", 8600 KSTAT_DATA_CHAR); 8601 kstat_named_init(&stp->sd_capacity, "Size", 8602 KSTAT_DATA_ULONGLONG); 8603 kstat_named_init(&stp->sd_rq_media_err, "Media Error", 8604 KSTAT_DATA_UINT32); 8605 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready", 8606 KSTAT_DATA_UINT32); 8607 kstat_named_init(&stp->sd_rq_nodev_err, "No Device", 8608 KSTAT_DATA_UINT32); 8609 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable", 8610 KSTAT_DATA_UINT32); 8611 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request", 8612 KSTAT_DATA_UINT32); 8613 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis", 8614 KSTAT_DATA_UINT32); 8615 8616 un->un_errstats->ks_private = un; 8617 un->un_errstats->ks_update = nulldev; 8618 8619 kstat_install(un->un_errstats); 8620 } 8621 8622 8623 /* 8624 * Function: sd_set_errstats 8625 * 8626 * Description: This routine sets the value of the vendor id, product id, 8627 * revision, serial number, and capacity device error stats. 8628 * 8629 * Note: During attach the stats are instantiated first so they are 8630 * available for attach-time routines that utilize the driver 8631 * iopath to send commands to the device. The stats are initialized 8632 * separately so data obtained during some attach-time routines is 8633 * available. (4362483) 8634 * 8635 * Arguments: un - driver soft state (unit) structure 8636 * 8637 * Context: Kernel thread context 8638 */ 8639 8640 static void 8641 sd_set_errstats(struct sd_lun *un) 8642 { 8643 struct sd_errstats *stp; 8644 8645 ASSERT(un != NULL); 8646 ASSERT(un->un_errstats != NULL); 8647 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8648 ASSERT(stp != NULL); 8649 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8); 8650 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16); 8651 (void) strncpy(stp->sd_revision.value.c, 8652 un->un_sd->sd_inq->inq_revision, 4); 8653 8654 /* 8655 * All the errstats are persistent across detach/attach, 8656 * so reset all the errstats here in case of the hot 8657 * replacement of disk drives, except for not changed 8658 * Sun qualified drives. 8659 */ 8660 if ((bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) != 0) || 8661 (bcmp(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8662 sizeof (SD_INQUIRY(un)->inq_serial)) != 0)) { 8663 stp->sd_softerrs.value.ui32 = 0; 8664 stp->sd_harderrs.value.ui32 = 0; 8665 stp->sd_transerrs.value.ui32 = 0; 8666 stp->sd_rq_media_err.value.ui32 = 0; 8667 stp->sd_rq_ntrdy_err.value.ui32 = 0; 8668 stp->sd_rq_nodev_err.value.ui32 = 0; 8669 stp->sd_rq_recov_err.value.ui32 = 0; 8670 stp->sd_rq_illrq_err.value.ui32 = 0; 8671 stp->sd_rq_pfa_err.value.ui32 = 0; 8672 } 8673 8674 /* 8675 * Set the "Serial No" kstat for Sun qualified drives (indicated by 8676 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid) 8677 * (4376302)) 8678 */ 8679 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) { 8680 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8681 sizeof (SD_INQUIRY(un)->inq_serial)); 8682 } 8683 8684 if (un->un_f_blockcount_is_valid != TRUE) { 8685 /* 8686 * Set capacity error stat to 0 for no media. This ensures 8687 * a valid capacity is displayed in response to 'iostat -E' 8688 * when no media is present in the device. 8689 */ 8690 stp->sd_capacity.value.ui64 = 0; 8691 } else { 8692 /* 8693 * Multiply un_blockcount by un->un_sys_blocksize to get 8694 * capacity. 8695 * 8696 * Note: for non-512 blocksize devices "un_blockcount" has been 8697 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by 8698 * (un_tgt_blocksize / un->un_sys_blocksize). 8699 */ 8700 stp->sd_capacity.value.ui64 = (uint64_t) 8701 ((uint64_t)un->un_blockcount * un->un_sys_blocksize); 8702 } 8703 } 8704 8705 8706 /* 8707 * Function: sd_set_pstats 8708 * 8709 * Description: This routine instantiates and initializes the partition 8710 * stats for each partition with more than zero blocks. 8711 * (4363169) 8712 * 8713 * Arguments: un - driver soft state (unit) structure 8714 * 8715 * Context: Kernel thread context 8716 */ 8717 8718 static void 8719 sd_set_pstats(struct sd_lun *un) 8720 { 8721 char kstatname[KSTAT_STRLEN]; 8722 int instance; 8723 int i; 8724 diskaddr_t nblks = 0; 8725 char *partname = NULL; 8726 8727 ASSERT(un != NULL); 8728 8729 instance = ddi_get_instance(SD_DEVINFO(un)); 8730 8731 /* Note:x86: is this a VTOC8/VTOC16 difference? */ 8732 for (i = 0; i < NSDMAP; i++) { 8733 8734 if (cmlb_partinfo(un->un_cmlbhandle, i, 8735 &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0) 8736 continue; 8737 mutex_enter(SD_MUTEX(un)); 8738 8739 if ((un->un_pstats[i] == NULL) && 8740 (nblks != 0)) { 8741 8742 (void) snprintf(kstatname, sizeof (kstatname), 8743 "%s%d,%s", sd_label, instance, 8744 partname); 8745 8746 un->un_pstats[i] = kstat_create(sd_label, 8747 instance, kstatname, "partition", KSTAT_TYPE_IO, 8748 1, KSTAT_FLAG_PERSISTENT); 8749 if (un->un_pstats[i] != NULL) { 8750 un->un_pstats[i]->ks_lock = SD_MUTEX(un); 8751 kstat_install(un->un_pstats[i]); 8752 } 8753 } 8754 mutex_exit(SD_MUTEX(un)); 8755 } 8756 } 8757 8758 8759 #if (defined(__fibre)) 8760 /* 8761 * Function: sd_init_event_callbacks 8762 * 8763 * Description: This routine initializes the insertion and removal event 8764 * callbacks. (fibre only) 8765 * 8766 * Arguments: un - driver soft state (unit) structure 8767 * 8768 * Context: Kernel thread context 8769 */ 8770 8771 static void 8772 sd_init_event_callbacks(struct sd_lun *un) 8773 { 8774 ASSERT(un != NULL); 8775 8776 if ((un->un_insert_event == NULL) && 8777 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT, 8778 &un->un_insert_event) == DDI_SUCCESS)) { 8779 /* 8780 * Add the callback for an insertion event 8781 */ 8782 (void) ddi_add_event_handler(SD_DEVINFO(un), 8783 un->un_insert_event, sd_event_callback, (void *)un, 8784 &(un->un_insert_cb_id)); 8785 } 8786 8787 if ((un->un_remove_event == NULL) && 8788 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT, 8789 &un->un_remove_event) == DDI_SUCCESS)) { 8790 /* 8791 * Add the callback for a removal event 8792 */ 8793 (void) ddi_add_event_handler(SD_DEVINFO(un), 8794 un->un_remove_event, sd_event_callback, (void *)un, 8795 &(un->un_remove_cb_id)); 8796 } 8797 } 8798 8799 8800 /* 8801 * Function: sd_event_callback 8802 * 8803 * Description: This routine handles insert/remove events (photon). The 8804 * state is changed to OFFLINE which can be used to supress 8805 * error msgs. (fibre only) 8806 * 8807 * Arguments: un - driver soft state (unit) structure 8808 * 8809 * Context: Callout thread context 8810 */ 8811 /* ARGSUSED */ 8812 static void 8813 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg, 8814 void *bus_impldata) 8815 { 8816 struct sd_lun *un = (struct sd_lun *)arg; 8817 8818 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event)); 8819 if (event == un->un_insert_event) { 8820 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event"); 8821 mutex_enter(SD_MUTEX(un)); 8822 if (un->un_state == SD_STATE_OFFLINE) { 8823 if (un->un_last_state != SD_STATE_SUSPENDED) { 8824 un->un_state = un->un_last_state; 8825 } else { 8826 /* 8827 * We have gone through SUSPEND/RESUME while 8828 * we were offline. Restore the last state 8829 */ 8830 un->un_state = un->un_save_state; 8831 } 8832 } 8833 mutex_exit(SD_MUTEX(un)); 8834 8835 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event)); 8836 } else if (event == un->un_remove_event) { 8837 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event"); 8838 mutex_enter(SD_MUTEX(un)); 8839 /* 8840 * We need to handle an event callback that occurs during 8841 * the suspend operation, since we don't prevent it. 8842 */ 8843 if (un->un_state != SD_STATE_OFFLINE) { 8844 if (un->un_state != SD_STATE_SUSPENDED) { 8845 New_state(un, SD_STATE_OFFLINE); 8846 } else { 8847 un->un_last_state = SD_STATE_OFFLINE; 8848 } 8849 } 8850 mutex_exit(SD_MUTEX(un)); 8851 } else { 8852 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 8853 "!Unknown event\n"); 8854 } 8855 8856 } 8857 #endif 8858 8859 /* 8860 * Function: sd_cache_control() 8861 * 8862 * Description: This routine is the driver entry point for setting 8863 * read and write caching by modifying the WCE (write cache 8864 * enable) and RCD (read cache disable) bits of mode 8865 * page 8 (MODEPAGE_CACHING). 8866 * 8867 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 8868 * structure for this target. 8869 * rcd_flag - flag for controlling the read cache 8870 * wce_flag - flag for controlling the write cache 8871 * 8872 * Return Code: EIO 8873 * code returned by sd_send_scsi_MODE_SENSE and 8874 * sd_send_scsi_MODE_SELECT 8875 * 8876 * Context: Kernel Thread 8877 */ 8878 8879 static int 8880 sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag) 8881 { 8882 struct mode_caching *mode_caching_page; 8883 uchar_t *header; 8884 size_t buflen; 8885 int hdrlen; 8886 int bd_len; 8887 int rval = 0; 8888 struct mode_header_grp2 *mhp; 8889 struct sd_lun *un; 8890 int status; 8891 8892 ASSERT(ssc != NULL); 8893 un = ssc->ssc_un; 8894 ASSERT(un != NULL); 8895 8896 /* 8897 * Do a test unit ready, otherwise a mode sense may not work if this 8898 * is the first command sent to the device after boot. 8899 */ 8900 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 8901 if (status != 0) 8902 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 8903 8904 if (un->un_f_cfg_is_atapi == TRUE) { 8905 hdrlen = MODE_HEADER_LENGTH_GRP2; 8906 } else { 8907 hdrlen = MODE_HEADER_LENGTH; 8908 } 8909 8910 /* 8911 * Allocate memory for the retrieved mode page and its headers. Set 8912 * a pointer to the page itself. Use mode_cache_scsi3 to insure 8913 * we get all of the mode sense data otherwise, the mode select 8914 * will fail. mode_cache_scsi3 is a superset of mode_caching. 8915 */ 8916 buflen = hdrlen + MODE_BLK_DESC_LENGTH + 8917 sizeof (struct mode_cache_scsi3); 8918 8919 header = kmem_zalloc(buflen, KM_SLEEP); 8920 8921 /* Get the information from the device. */ 8922 if (un->un_f_cfg_is_atapi == TRUE) { 8923 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, header, buflen, 8924 MODEPAGE_CACHING, SD_PATH_DIRECT); 8925 } else { 8926 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 8927 MODEPAGE_CACHING, SD_PATH_DIRECT); 8928 } 8929 8930 if (rval != 0) { 8931 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 8932 "sd_cache_control: Mode Sense Failed\n"); 8933 goto mode_sense_failed; 8934 } 8935 8936 /* 8937 * Determine size of Block Descriptors in order to locate 8938 * the mode page data. ATAPI devices return 0, SCSI devices 8939 * should return MODE_BLK_DESC_LENGTH. 8940 */ 8941 if (un->un_f_cfg_is_atapi == TRUE) { 8942 mhp = (struct mode_header_grp2 *)header; 8943 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 8944 } else { 8945 bd_len = ((struct mode_header *)header)->bdesc_length; 8946 } 8947 8948 if (bd_len > MODE_BLK_DESC_LENGTH) { 8949 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 8950 "sd_cache_control: Mode Sense returned invalid " 8951 "block descriptor length\n"); 8952 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 8953 "sd_cache_control: Mode Sense returned invalid " 8954 "block descriptor length"); 8955 rval = EIO; 8956 goto mode_sense_failed; 8957 } 8958 8959 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 8960 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 8961 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 8962 " caching page code mismatch %d\n", 8963 mode_caching_page->mode_page.code); 8964 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 8965 "sd_cache_control: Mode Sense caching page code " 8966 "mismatch %d", mode_caching_page->mode_page.code); 8967 rval = EIO; 8968 goto mode_sense_failed; 8969 } 8970 8971 /* Check the relevant bits on successful mode sense. */ 8972 if ((mode_caching_page->rcd && rcd_flag == SD_CACHE_ENABLE) || 8973 (!mode_caching_page->rcd && rcd_flag == SD_CACHE_DISABLE) || 8974 (mode_caching_page->wce && wce_flag == SD_CACHE_DISABLE) || 8975 (!mode_caching_page->wce && wce_flag == SD_CACHE_ENABLE)) { 8976 8977 size_t sbuflen; 8978 uchar_t save_pg; 8979 8980 /* 8981 * Construct select buffer length based on the 8982 * length of the sense data returned. 8983 */ 8984 sbuflen = hdrlen + MODE_BLK_DESC_LENGTH + 8985 sizeof (struct mode_page) + 8986 (int)mode_caching_page->mode_page.length; 8987 8988 /* 8989 * Set the caching bits as requested. 8990 */ 8991 if (rcd_flag == SD_CACHE_ENABLE) 8992 mode_caching_page->rcd = 0; 8993 else if (rcd_flag == SD_CACHE_DISABLE) 8994 mode_caching_page->rcd = 1; 8995 8996 if (wce_flag == SD_CACHE_ENABLE) 8997 mode_caching_page->wce = 1; 8998 else if (wce_flag == SD_CACHE_DISABLE) 8999 mode_caching_page->wce = 0; 9000 9001 /* 9002 * Save the page if the mode sense says the 9003 * drive supports it. 9004 */ 9005 save_pg = mode_caching_page->mode_page.ps ? 9006 SD_SAVE_PAGE : SD_DONTSAVE_PAGE; 9007 9008 /* Clear reserved bits before mode select. */ 9009 mode_caching_page->mode_page.ps = 0; 9010 9011 /* 9012 * Clear out mode header for mode select. 9013 * The rest of the retrieved page will be reused. 9014 */ 9015 bzero(header, hdrlen); 9016 9017 if (un->un_f_cfg_is_atapi == TRUE) { 9018 mhp = (struct mode_header_grp2 *)header; 9019 mhp->bdesc_length_hi = bd_len >> 8; 9020 mhp->bdesc_length_lo = (uchar_t)bd_len & 0xff; 9021 } else { 9022 ((struct mode_header *)header)->bdesc_length = bd_len; 9023 } 9024 9025 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9026 9027 /* Issue mode select to change the cache settings */ 9028 if (un->un_f_cfg_is_atapi == TRUE) { 9029 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP1, header, 9030 sbuflen, save_pg, SD_PATH_DIRECT); 9031 } else { 9032 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, header, 9033 sbuflen, save_pg, SD_PATH_DIRECT); 9034 } 9035 9036 } 9037 9038 9039 mode_sense_failed: 9040 9041 kmem_free(header, buflen); 9042 9043 if (rval != 0) { 9044 if (rval == EIO) 9045 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 9046 else 9047 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9048 } 9049 return (rval); 9050 } 9051 9052 9053 /* 9054 * Function: sd_get_write_cache_enabled() 9055 * 9056 * Description: This routine is the driver entry point for determining if 9057 * write caching is enabled. It examines the WCE (write cache 9058 * enable) bits of mode page 8 (MODEPAGE_CACHING). 9059 * 9060 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 9061 * structure for this target. 9062 * is_enabled - pointer to int where write cache enabled state 9063 * is returned (non-zero -> write cache enabled) 9064 * 9065 * 9066 * Return Code: EIO 9067 * code returned by sd_send_scsi_MODE_SENSE 9068 * 9069 * Context: Kernel Thread 9070 * 9071 * NOTE: If ioctl is added to disable write cache, this sequence should 9072 * be followed so that no locking is required for accesses to 9073 * un->un_f_write_cache_enabled: 9074 * do mode select to clear wce 9075 * do synchronize cache to flush cache 9076 * set un->un_f_write_cache_enabled = FALSE 9077 * 9078 * Conversely, an ioctl to enable the write cache should be done 9079 * in this order: 9080 * set un->un_f_write_cache_enabled = TRUE 9081 * do mode select to set wce 9082 */ 9083 9084 static int 9085 sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled) 9086 { 9087 struct mode_caching *mode_caching_page; 9088 uchar_t *header; 9089 size_t buflen; 9090 int hdrlen; 9091 int bd_len; 9092 int rval = 0; 9093 struct sd_lun *un; 9094 int status; 9095 9096 ASSERT(ssc != NULL); 9097 un = ssc->ssc_un; 9098 ASSERT(un != NULL); 9099 ASSERT(is_enabled != NULL); 9100 9101 /* in case of error, flag as enabled */ 9102 *is_enabled = TRUE; 9103 9104 /* 9105 * Do a test unit ready, otherwise a mode sense may not work if this 9106 * is the first command sent to the device after boot. 9107 */ 9108 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 9109 9110 if (status != 0) 9111 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9112 9113 if (un->un_f_cfg_is_atapi == TRUE) { 9114 hdrlen = MODE_HEADER_LENGTH_GRP2; 9115 } else { 9116 hdrlen = MODE_HEADER_LENGTH; 9117 } 9118 9119 /* 9120 * Allocate memory for the retrieved mode page and its headers. Set 9121 * a pointer to the page itself. 9122 */ 9123 buflen = hdrlen + MODE_BLK_DESC_LENGTH + sizeof (struct mode_caching); 9124 header = kmem_zalloc(buflen, KM_SLEEP); 9125 9126 /* Get the information from the device. */ 9127 if (un->un_f_cfg_is_atapi == TRUE) { 9128 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, header, buflen, 9129 MODEPAGE_CACHING, SD_PATH_DIRECT); 9130 } else { 9131 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 9132 MODEPAGE_CACHING, SD_PATH_DIRECT); 9133 } 9134 9135 if (rval != 0) { 9136 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 9137 "sd_get_write_cache_enabled: Mode Sense Failed\n"); 9138 goto mode_sense_failed; 9139 } 9140 9141 /* 9142 * Determine size of Block Descriptors in order to locate 9143 * the mode page data. ATAPI devices return 0, SCSI devices 9144 * should return MODE_BLK_DESC_LENGTH. 9145 */ 9146 if (un->un_f_cfg_is_atapi == TRUE) { 9147 struct mode_header_grp2 *mhp; 9148 mhp = (struct mode_header_grp2 *)header; 9149 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 9150 } else { 9151 bd_len = ((struct mode_header *)header)->bdesc_length; 9152 } 9153 9154 if (bd_len > MODE_BLK_DESC_LENGTH) { 9155 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 9156 "sd_get_write_cache_enabled: Mode Sense returned invalid " 9157 "block descriptor length\n"); 9158 /* FMA should make upset complain here */ 9159 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 9160 "sd_get_write_cache_enabled: Mode Sense returned invalid " 9161 "block descriptor length %d", bd_len); 9162 rval = EIO; 9163 goto mode_sense_failed; 9164 } 9165 9166 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 9167 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 9168 SD_ERROR(SD_LOG_COMMON, un, "sd_cache_control: Mode Sense" 9169 " caching page code mismatch %d\n", 9170 mode_caching_page->mode_page.code); 9171 /* FMA could make upset complain here */ 9172 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 9173 "sd_cache_control: Mode Sense caching page code " 9174 "mismatch %d", mode_caching_page->mode_page.code); 9175 rval = EIO; 9176 goto mode_sense_failed; 9177 } 9178 *is_enabled = mode_caching_page->wce; 9179 9180 mode_sense_failed: 9181 if (rval == 0) { 9182 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 9183 } else if (rval == EIO) { 9184 /* 9185 * Some disks do not support mode sense(6), we 9186 * should ignore this kind of error(sense key is 9187 * 0x5 - illegal request). 9188 */ 9189 uint8_t *sensep; 9190 int senlen; 9191 9192 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 9193 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 9194 ssc->ssc_uscsi_cmd->uscsi_rqresid); 9195 9196 if (senlen > 0 && 9197 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 9198 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 9199 } else { 9200 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 9201 } 9202 } else { 9203 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9204 } 9205 kmem_free(header, buflen); 9206 return (rval); 9207 } 9208 9209 /* 9210 * Function: sd_get_nv_sup() 9211 * 9212 * Description: This routine is the driver entry point for 9213 * determining whether non-volatile cache is supported. This 9214 * determination process works as follows: 9215 * 9216 * 1. sd first queries sd.conf on whether 9217 * suppress_cache_flush bit is set for this device. 9218 * 9219 * 2. if not there, then queries the internal disk table. 9220 * 9221 * 3. if either sd.conf or internal disk table specifies 9222 * cache flush be suppressed, we don't bother checking 9223 * NV_SUP bit. 9224 * 9225 * If SUPPRESS_CACHE_FLUSH bit is not set to 1, sd queries 9226 * the optional INQUIRY VPD page 0x86. If the device 9227 * supports VPD page 0x86, sd examines the NV_SUP 9228 * (non-volatile cache support) bit in the INQUIRY VPD page 9229 * 0x86: 9230 * o If NV_SUP bit is set, sd assumes the device has a 9231 * non-volatile cache and set the 9232 * un_f_sync_nv_supported to TRUE. 9233 * o Otherwise cache is not non-volatile, 9234 * un_f_sync_nv_supported is set to FALSE. 9235 * 9236 * Arguments: un - driver soft state (unit) structure 9237 * 9238 * Return Code: 9239 * 9240 * Context: Kernel Thread 9241 */ 9242 9243 static void 9244 sd_get_nv_sup(sd_ssc_t *ssc) 9245 { 9246 int rval = 0; 9247 uchar_t *inq86 = NULL; 9248 size_t inq86_len = MAX_INQUIRY_SIZE; 9249 size_t inq86_resid = 0; 9250 struct dk_callback *dkc; 9251 struct sd_lun *un; 9252 9253 ASSERT(ssc != NULL); 9254 un = ssc->ssc_un; 9255 ASSERT(un != NULL); 9256 9257 mutex_enter(SD_MUTEX(un)); 9258 9259 /* 9260 * Be conservative on the device's support of 9261 * SYNC_NV bit: un_f_sync_nv_supported is 9262 * initialized to be false. 9263 */ 9264 un->un_f_sync_nv_supported = FALSE; 9265 9266 /* 9267 * If either sd.conf or internal disk table 9268 * specifies cache flush be suppressed, then 9269 * we don't bother checking NV_SUP bit. 9270 */ 9271 if (un->un_f_suppress_cache_flush == TRUE) { 9272 mutex_exit(SD_MUTEX(un)); 9273 return; 9274 } 9275 9276 if (sd_check_vpd_page_support(ssc) == 0 && 9277 un->un_vpd_page_mask & SD_VPD_EXTENDED_DATA_PG) { 9278 mutex_exit(SD_MUTEX(un)); 9279 /* collect page 86 data if available */ 9280 inq86 = kmem_zalloc(inq86_len, KM_SLEEP); 9281 9282 rval = sd_send_scsi_INQUIRY(ssc, inq86, inq86_len, 9283 0x01, 0x86, &inq86_resid); 9284 9285 if (rval == 0 && (inq86_len - inq86_resid > 6)) { 9286 SD_TRACE(SD_LOG_COMMON, un, 9287 "sd_get_nv_sup: \ 9288 successfully get VPD page: %x \ 9289 PAGE LENGTH: %x BYTE 6: %x\n", 9290 inq86[1], inq86[3], inq86[6]); 9291 9292 mutex_enter(SD_MUTEX(un)); 9293 /* 9294 * check the value of NV_SUP bit: only if the device 9295 * reports NV_SUP bit to be 1, the 9296 * un_f_sync_nv_supported bit will be set to true. 9297 */ 9298 if (inq86[6] & SD_VPD_NV_SUP) { 9299 un->un_f_sync_nv_supported = TRUE; 9300 } 9301 mutex_exit(SD_MUTEX(un)); 9302 } else if (rval != 0) { 9303 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9304 } 9305 9306 kmem_free(inq86, inq86_len); 9307 } else { 9308 mutex_exit(SD_MUTEX(un)); 9309 } 9310 9311 /* 9312 * Send a SYNC CACHE command to check whether 9313 * SYNC_NV bit is supported. This command should have 9314 * un_f_sync_nv_supported set to correct value. 9315 */ 9316 mutex_enter(SD_MUTEX(un)); 9317 if (un->un_f_sync_nv_supported) { 9318 mutex_exit(SD_MUTEX(un)); 9319 dkc = kmem_zalloc(sizeof (struct dk_callback), KM_SLEEP); 9320 dkc->dkc_flag = FLUSH_VOLATILE; 9321 (void) sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 9322 9323 /* 9324 * Send a TEST UNIT READY command to the device. This should 9325 * clear any outstanding UNIT ATTENTION that may be present. 9326 */ 9327 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR); 9328 if (rval != 0) 9329 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9330 9331 kmem_free(dkc, sizeof (struct dk_callback)); 9332 } else { 9333 mutex_exit(SD_MUTEX(un)); 9334 } 9335 9336 SD_TRACE(SD_LOG_COMMON, un, "sd_get_nv_sup: \ 9337 un_f_suppress_cache_flush is set to %d\n", 9338 un->un_f_suppress_cache_flush); 9339 } 9340 9341 /* 9342 * Function: sd_make_device 9343 * 9344 * Description: Utility routine to return the Solaris device number from 9345 * the data in the device's dev_info structure. 9346 * 9347 * Return Code: The Solaris device number 9348 * 9349 * Context: Any 9350 */ 9351 9352 static dev_t 9353 sd_make_device(dev_info_t *devi) 9354 { 9355 return (makedevice(ddi_name_to_major(ddi_get_name(devi)), 9356 ddi_get_instance(devi) << SDUNIT_SHIFT)); 9357 } 9358 9359 9360 /* 9361 * Function: sd_pm_entry 9362 * 9363 * Description: Called at the start of a new command to manage power 9364 * and busy status of a device. This includes determining whether 9365 * the current power state of the device is sufficient for 9366 * performing the command or whether it must be changed. 9367 * The PM framework is notified appropriately. 9368 * Only with a return status of DDI_SUCCESS will the 9369 * component be busy to the framework. 9370 * 9371 * All callers of sd_pm_entry must check the return status 9372 * and only call sd_pm_exit it it was DDI_SUCCESS. A status 9373 * of DDI_FAILURE indicates the device failed to power up. 9374 * In this case un_pm_count has been adjusted so the result 9375 * on exit is still powered down, ie. count is less than 0. 9376 * Calling sd_pm_exit with this count value hits an ASSERT. 9377 * 9378 * Return Code: DDI_SUCCESS or DDI_FAILURE 9379 * 9380 * Context: Kernel thread context. 9381 */ 9382 9383 static int 9384 sd_pm_entry(struct sd_lun *un) 9385 { 9386 int return_status = DDI_SUCCESS; 9387 9388 ASSERT(!mutex_owned(SD_MUTEX(un))); 9389 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9390 9391 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n"); 9392 9393 if (un->un_f_pm_is_enabled == FALSE) { 9394 SD_TRACE(SD_LOG_IO_PM, un, 9395 "sd_pm_entry: exiting, PM not enabled\n"); 9396 return (return_status); 9397 } 9398 9399 /* 9400 * Just increment a counter if PM is enabled. On the transition from 9401 * 0 ==> 1, mark the device as busy. The iodone side will decrement 9402 * the count with each IO and mark the device as idle when the count 9403 * hits 0. 9404 * 9405 * If the count is less than 0 the device is powered down. If a powered 9406 * down device is successfully powered up then the count must be 9407 * incremented to reflect the power up. Note that it'll get incremented 9408 * a second time to become busy. 9409 * 9410 * Because the following has the potential to change the device state 9411 * and must release the un_pm_mutex to do so, only one thread can be 9412 * allowed through at a time. 9413 */ 9414 9415 mutex_enter(&un->un_pm_mutex); 9416 while (un->un_pm_busy == TRUE) { 9417 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex); 9418 } 9419 un->un_pm_busy = TRUE; 9420 9421 if (un->un_pm_count < 1) { 9422 9423 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n"); 9424 9425 /* 9426 * Indicate we are now busy so the framework won't attempt to 9427 * power down the device. This call will only fail if either 9428 * we passed a bad component number or the device has no 9429 * components. Neither of these should ever happen. 9430 */ 9431 mutex_exit(&un->un_pm_mutex); 9432 return_status = pm_busy_component(SD_DEVINFO(un), 0); 9433 ASSERT(return_status == DDI_SUCCESS); 9434 9435 mutex_enter(&un->un_pm_mutex); 9436 9437 if (un->un_pm_count < 0) { 9438 mutex_exit(&un->un_pm_mutex); 9439 9440 SD_TRACE(SD_LOG_IO_PM, un, 9441 "sd_pm_entry: power up component\n"); 9442 9443 /* 9444 * pm_raise_power will cause sdpower to be called 9445 * which brings the device power level to the 9446 * desired state, ON in this case. If successful, 9447 * un_pm_count and un_power_level will be updated 9448 * appropriately. 9449 */ 9450 return_status = pm_raise_power(SD_DEVINFO(un), 0, 9451 SD_SPINDLE_ON); 9452 9453 mutex_enter(&un->un_pm_mutex); 9454 9455 if (return_status != DDI_SUCCESS) { 9456 /* 9457 * Power up failed. 9458 * Idle the device and adjust the count 9459 * so the result on exit is that we're 9460 * still powered down, ie. count is less than 0. 9461 */ 9462 SD_TRACE(SD_LOG_IO_PM, un, 9463 "sd_pm_entry: power up failed," 9464 " idle the component\n"); 9465 9466 (void) pm_idle_component(SD_DEVINFO(un), 0); 9467 un->un_pm_count--; 9468 } else { 9469 /* 9470 * Device is powered up, verify the 9471 * count is non-negative. 9472 * This is debug only. 9473 */ 9474 ASSERT(un->un_pm_count == 0); 9475 } 9476 } 9477 9478 if (return_status == DDI_SUCCESS) { 9479 /* 9480 * For performance, now that the device has been tagged 9481 * as busy, and it's known to be powered up, update the 9482 * chain types to use jump tables that do not include 9483 * pm. This significantly lowers the overhead and 9484 * therefore improves performance. 9485 */ 9486 9487 mutex_exit(&un->un_pm_mutex); 9488 mutex_enter(SD_MUTEX(un)); 9489 SD_TRACE(SD_LOG_IO_PM, un, 9490 "sd_pm_entry: changing uscsi_chain_type from %d\n", 9491 un->un_uscsi_chain_type); 9492 9493 if (un->un_f_non_devbsize_supported) { 9494 un->un_buf_chain_type = 9495 SD_CHAIN_INFO_RMMEDIA_NO_PM; 9496 } else { 9497 un->un_buf_chain_type = 9498 SD_CHAIN_INFO_DISK_NO_PM; 9499 } 9500 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 9501 9502 SD_TRACE(SD_LOG_IO_PM, un, 9503 " changed uscsi_chain_type to %d\n", 9504 un->un_uscsi_chain_type); 9505 mutex_exit(SD_MUTEX(un)); 9506 mutex_enter(&un->un_pm_mutex); 9507 9508 if (un->un_pm_idle_timeid == NULL) { 9509 /* 300 ms. */ 9510 un->un_pm_idle_timeid = 9511 timeout(sd_pm_idletimeout_handler, un, 9512 (drv_usectohz((clock_t)300000))); 9513 /* 9514 * Include an extra call to busy which keeps the 9515 * device busy with-respect-to the PM layer 9516 * until the timer fires, at which time it'll 9517 * get the extra idle call. 9518 */ 9519 (void) pm_busy_component(SD_DEVINFO(un), 0); 9520 } 9521 } 9522 } 9523 un->un_pm_busy = FALSE; 9524 /* Next... */ 9525 cv_signal(&un->un_pm_busy_cv); 9526 9527 un->un_pm_count++; 9528 9529 SD_TRACE(SD_LOG_IO_PM, un, 9530 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count); 9531 9532 mutex_exit(&un->un_pm_mutex); 9533 9534 return (return_status); 9535 } 9536 9537 9538 /* 9539 * Function: sd_pm_exit 9540 * 9541 * Description: Called at the completion of a command to manage busy 9542 * status for the device. If the device becomes idle the 9543 * PM framework is notified. 9544 * 9545 * Context: Kernel thread context 9546 */ 9547 9548 static void 9549 sd_pm_exit(struct sd_lun *un) 9550 { 9551 ASSERT(!mutex_owned(SD_MUTEX(un))); 9552 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9553 9554 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n"); 9555 9556 /* 9557 * After attach the following flag is only read, so don't 9558 * take the penalty of acquiring a mutex for it. 9559 */ 9560 if (un->un_f_pm_is_enabled == TRUE) { 9561 9562 mutex_enter(&un->un_pm_mutex); 9563 un->un_pm_count--; 9564 9565 SD_TRACE(SD_LOG_IO_PM, un, 9566 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count); 9567 9568 ASSERT(un->un_pm_count >= 0); 9569 if (un->un_pm_count == 0) { 9570 mutex_exit(&un->un_pm_mutex); 9571 9572 SD_TRACE(SD_LOG_IO_PM, un, 9573 "sd_pm_exit: idle component\n"); 9574 9575 (void) pm_idle_component(SD_DEVINFO(un), 0); 9576 9577 } else { 9578 mutex_exit(&un->un_pm_mutex); 9579 } 9580 } 9581 9582 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n"); 9583 } 9584 9585 9586 /* 9587 * Function: sdopen 9588 * 9589 * Description: Driver's open(9e) entry point function. 9590 * 9591 * Arguments: dev_i - pointer to device number 9592 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE) 9593 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9594 * cred_p - user credential pointer 9595 * 9596 * Return Code: EINVAL 9597 * ENXIO 9598 * EIO 9599 * EROFS 9600 * EBUSY 9601 * 9602 * Context: Kernel thread context 9603 */ 9604 /* ARGSUSED */ 9605 static int 9606 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 9607 { 9608 struct sd_lun *un; 9609 int nodelay; 9610 int part; 9611 uint64_t partmask; 9612 int instance; 9613 dev_t dev; 9614 int rval = EIO; 9615 diskaddr_t nblks = 0; 9616 diskaddr_t label_cap; 9617 9618 /* Validate the open type */ 9619 if (otyp >= OTYPCNT) { 9620 return (EINVAL); 9621 } 9622 9623 dev = *dev_p; 9624 instance = SDUNIT(dev); 9625 mutex_enter(&sd_detach_mutex); 9626 9627 /* 9628 * Fail the open if there is no softstate for the instance, or 9629 * if another thread somewhere is trying to detach the instance. 9630 */ 9631 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 9632 (un->un_detach_count != 0)) { 9633 mutex_exit(&sd_detach_mutex); 9634 /* 9635 * The probe cache only needs to be cleared when open (9e) fails 9636 * with ENXIO (4238046). 9637 */ 9638 /* 9639 * un-conditionally clearing probe cache is ok with 9640 * separate sd/ssd binaries 9641 * x86 platform can be an issue with both parallel 9642 * and fibre in 1 binary 9643 */ 9644 sd_scsi_clear_probe_cache(); 9645 return (ENXIO); 9646 } 9647 9648 /* 9649 * The un_layer_count is to prevent another thread in specfs from 9650 * trying to detach the instance, which can happen when we are 9651 * called from a higher-layer driver instead of thru specfs. 9652 * This will not be needed when DDI provides a layered driver 9653 * interface that allows specfs to know that an instance is in 9654 * use by a layered driver & should not be detached. 9655 * 9656 * Note: the semantics for layered driver opens are exactly one 9657 * close for every open. 9658 */ 9659 if (otyp == OTYP_LYR) { 9660 un->un_layer_count++; 9661 } 9662 9663 /* 9664 * Keep a count of the current # of opens in progress. This is because 9665 * some layered drivers try to call us as a regular open. This can 9666 * cause problems that we cannot prevent, however by keeping this count 9667 * we can at least keep our open and detach routines from racing against 9668 * each other under such conditions. 9669 */ 9670 un->un_opens_in_progress++; 9671 mutex_exit(&sd_detach_mutex); 9672 9673 nodelay = (flag & (FNDELAY | FNONBLOCK)); 9674 part = SDPART(dev); 9675 partmask = 1 << part; 9676 9677 /* 9678 * We use a semaphore here in order to serialize 9679 * open and close requests on the device. 9680 */ 9681 sema_p(&un->un_semoclose); 9682 9683 mutex_enter(SD_MUTEX(un)); 9684 9685 /* 9686 * All device accesses go thru sdstrategy() where we check 9687 * on suspend status but there could be a scsi_poll command, 9688 * which bypasses sdstrategy(), so we need to check pm 9689 * status. 9690 */ 9691 9692 if (!nodelay) { 9693 while ((un->un_state == SD_STATE_SUSPENDED) || 9694 (un->un_state == SD_STATE_PM_CHANGING)) { 9695 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9696 } 9697 9698 mutex_exit(SD_MUTEX(un)); 9699 if (sd_pm_entry(un) != DDI_SUCCESS) { 9700 rval = EIO; 9701 SD_ERROR(SD_LOG_OPEN_CLOSE, un, 9702 "sdopen: sd_pm_entry failed\n"); 9703 goto open_failed_with_pm; 9704 } 9705 mutex_enter(SD_MUTEX(un)); 9706 } 9707 9708 /* check for previous exclusive open */ 9709 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); 9710 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9711 "sdopen: exclopen=%x, flag=%x, regopen=%x\n", 9712 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 9713 9714 if (un->un_exclopen & (partmask)) { 9715 goto excl_open_fail; 9716 } 9717 9718 if (flag & FEXCL) { 9719 int i; 9720 if (un->un_ocmap.lyropen[part]) { 9721 goto excl_open_fail; 9722 } 9723 for (i = 0; i < (OTYPCNT - 1); i++) { 9724 if (un->un_ocmap.regopen[i] & (partmask)) { 9725 goto excl_open_fail; 9726 } 9727 } 9728 } 9729 9730 /* 9731 * Check the write permission if this is a removable media device, 9732 * NDELAY has not been set, and writable permission is requested. 9733 * 9734 * Note: If NDELAY was set and this is write-protected media the WRITE 9735 * attempt will fail with EIO as part of the I/O processing. This is a 9736 * more permissive implementation that allows the open to succeed and 9737 * WRITE attempts to fail when appropriate. 9738 */ 9739 if (un->un_f_chk_wp_open) { 9740 if ((flag & FWRITE) && (!nodelay)) { 9741 mutex_exit(SD_MUTEX(un)); 9742 /* 9743 * Defer the check for write permission on writable 9744 * DVD drive till sdstrategy and will not fail open even 9745 * if FWRITE is set as the device can be writable 9746 * depending upon the media and the media can change 9747 * after the call to open(). 9748 */ 9749 if (un->un_f_dvdram_writable_device == FALSE) { 9750 if (ISCD(un) || sr_check_wp(dev)) { 9751 rval = EROFS; 9752 mutex_enter(SD_MUTEX(un)); 9753 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9754 "write to cd or write protected media\n"); 9755 goto open_fail; 9756 } 9757 } 9758 mutex_enter(SD_MUTEX(un)); 9759 } 9760 } 9761 9762 /* 9763 * If opening in NDELAY/NONBLOCK mode, just return. 9764 * Check if disk is ready and has a valid geometry later. 9765 */ 9766 if (!nodelay) { 9767 sd_ssc_t *ssc; 9768 9769 mutex_exit(SD_MUTEX(un)); 9770 ssc = sd_ssc_init(un); 9771 rval = sd_ready_and_valid(ssc, part); 9772 sd_ssc_fini(ssc); 9773 mutex_enter(SD_MUTEX(un)); 9774 /* 9775 * Fail if device is not ready or if the number of disk 9776 * blocks is zero or negative for non CD devices. 9777 */ 9778 9779 nblks = 0; 9780 9781 if (rval == SD_READY_VALID && (!ISCD(un))) { 9782 /* if cmlb_partinfo fails, nblks remains 0 */ 9783 mutex_exit(SD_MUTEX(un)); 9784 (void) cmlb_partinfo(un->un_cmlbhandle, part, &nblks, 9785 NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 9786 mutex_enter(SD_MUTEX(un)); 9787 } 9788 9789 if ((rval != SD_READY_VALID) || 9790 (!ISCD(un) && nblks <= 0)) { 9791 rval = un->un_f_has_removable_media ? ENXIO : EIO; 9792 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9793 "device not ready or invalid disk block value\n"); 9794 goto open_fail; 9795 } 9796 #if defined(__i386) || defined(__amd64) 9797 } else { 9798 uchar_t *cp; 9799 /* 9800 * x86 requires special nodelay handling, so that p0 is 9801 * always defined and accessible. 9802 * Invalidate geometry only if device is not already open. 9803 */ 9804 cp = &un->un_ocmap.chkd[0]; 9805 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9806 if (*cp != (uchar_t)0) { 9807 break; 9808 } 9809 cp++; 9810 } 9811 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9812 mutex_exit(SD_MUTEX(un)); 9813 cmlb_invalidate(un->un_cmlbhandle, 9814 (void *)SD_PATH_DIRECT); 9815 mutex_enter(SD_MUTEX(un)); 9816 } 9817 9818 #endif 9819 } 9820 9821 if (otyp == OTYP_LYR) { 9822 un->un_ocmap.lyropen[part]++; 9823 } else { 9824 un->un_ocmap.regopen[otyp] |= partmask; 9825 } 9826 9827 /* Set up open and exclusive open flags */ 9828 if (flag & FEXCL) { 9829 un->un_exclopen |= (partmask); 9830 } 9831 9832 /* 9833 * If the lun is EFI labeled and lun capacity is greater than the 9834 * capacity contained in the label, log a sys-event to notify the 9835 * interested module. 9836 * To avoid an infinite loop of logging sys-event, we only log the 9837 * event when the lun is not opened in NDELAY mode. The event handler 9838 * should open the lun in NDELAY mode. 9839 */ 9840 if (!(flag & FNDELAY)) { 9841 mutex_exit(SD_MUTEX(un)); 9842 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 9843 (void*)SD_PATH_DIRECT) == 0) { 9844 mutex_enter(SD_MUTEX(un)); 9845 if (un->un_f_blockcount_is_valid && 9846 un->un_blockcount > label_cap) { 9847 mutex_exit(SD_MUTEX(un)); 9848 sd_log_lun_expansion_event(un, 9849 (nodelay ? KM_NOSLEEP : KM_SLEEP)); 9850 mutex_enter(SD_MUTEX(un)); 9851 } 9852 } else { 9853 mutex_enter(SD_MUTEX(un)); 9854 } 9855 } 9856 9857 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9858 "open of part %d type %d\n", part, otyp); 9859 9860 mutex_exit(SD_MUTEX(un)); 9861 if (!nodelay) { 9862 sd_pm_exit(un); 9863 } 9864 9865 sema_v(&un->un_semoclose); 9866 9867 mutex_enter(&sd_detach_mutex); 9868 un->un_opens_in_progress--; 9869 mutex_exit(&sd_detach_mutex); 9870 9871 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n"); 9872 return (DDI_SUCCESS); 9873 9874 excl_open_fail: 9875 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n"); 9876 rval = EBUSY; 9877 9878 open_fail: 9879 mutex_exit(SD_MUTEX(un)); 9880 9881 /* 9882 * On a failed open we must exit the pm management. 9883 */ 9884 if (!nodelay) { 9885 sd_pm_exit(un); 9886 } 9887 open_failed_with_pm: 9888 sema_v(&un->un_semoclose); 9889 9890 mutex_enter(&sd_detach_mutex); 9891 un->un_opens_in_progress--; 9892 if (otyp == OTYP_LYR) { 9893 un->un_layer_count--; 9894 } 9895 mutex_exit(&sd_detach_mutex); 9896 9897 return (rval); 9898 } 9899 9900 9901 /* 9902 * Function: sdclose 9903 * 9904 * Description: Driver's close(9e) entry point function. 9905 * 9906 * Arguments: dev - device number 9907 * flag - file status flag, informational only 9908 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9909 * cred_p - user credential pointer 9910 * 9911 * Return Code: ENXIO 9912 * 9913 * Context: Kernel thread context 9914 */ 9915 /* ARGSUSED */ 9916 static int 9917 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 9918 { 9919 struct sd_lun *un; 9920 uchar_t *cp; 9921 int part; 9922 int nodelay; 9923 int rval = 0; 9924 9925 /* Validate the open type */ 9926 if (otyp >= OTYPCNT) { 9927 return (ENXIO); 9928 } 9929 9930 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9931 return (ENXIO); 9932 } 9933 9934 part = SDPART(dev); 9935 nodelay = flag & (FNDELAY | FNONBLOCK); 9936 9937 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9938 "sdclose: close of part %d type %d\n", part, otyp); 9939 9940 /* 9941 * We use a semaphore here in order to serialize 9942 * open and close requests on the device. 9943 */ 9944 sema_p(&un->un_semoclose); 9945 9946 mutex_enter(SD_MUTEX(un)); 9947 9948 /* Don't proceed if power is being changed. */ 9949 while (un->un_state == SD_STATE_PM_CHANGING) { 9950 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9951 } 9952 9953 if (un->un_exclopen & (1 << part)) { 9954 un->un_exclopen &= ~(1 << part); 9955 } 9956 9957 /* Update the open partition map */ 9958 if (otyp == OTYP_LYR) { 9959 un->un_ocmap.lyropen[part] -= 1; 9960 } else { 9961 un->un_ocmap.regopen[otyp] &= ~(1 << part); 9962 } 9963 9964 cp = &un->un_ocmap.chkd[0]; 9965 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9966 if (*cp != NULL) { 9967 break; 9968 } 9969 cp++; 9970 } 9971 9972 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9973 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n"); 9974 9975 /* 9976 * We avoid persistance upon the last close, and set 9977 * the throttle back to the maximum. 9978 */ 9979 un->un_throttle = un->un_saved_throttle; 9980 9981 if (un->un_state == SD_STATE_OFFLINE) { 9982 if (un->un_f_is_fibre == FALSE) { 9983 scsi_log(SD_DEVINFO(un), sd_label, 9984 CE_WARN, "offline\n"); 9985 } 9986 mutex_exit(SD_MUTEX(un)); 9987 cmlb_invalidate(un->un_cmlbhandle, 9988 (void *)SD_PATH_DIRECT); 9989 mutex_enter(SD_MUTEX(un)); 9990 9991 } else { 9992 /* 9993 * Flush any outstanding writes in NVRAM cache. 9994 * Note: SYNCHRONIZE CACHE is an optional SCSI-2 9995 * cmd, it may not work for non-Pluto devices. 9996 * SYNCHRONIZE CACHE is not required for removables, 9997 * except DVD-RAM drives. 9998 * 9999 * Also note: because SYNCHRONIZE CACHE is currently 10000 * the only command issued here that requires the 10001 * drive be powered up, only do the power up before 10002 * sending the Sync Cache command. If additional 10003 * commands are added which require a powered up 10004 * drive, the following sequence may have to change. 10005 * 10006 * And finally, note that parallel SCSI on SPARC 10007 * only issues a Sync Cache to DVD-RAM, a newly 10008 * supported device. 10009 */ 10010 #if defined(__i386) || defined(__amd64) 10011 if ((un->un_f_sync_cache_supported && 10012 un->un_f_sync_cache_required) || 10013 un->un_f_dvdram_writable_device == TRUE) { 10014 #else 10015 if (un->un_f_dvdram_writable_device == TRUE) { 10016 #endif 10017 mutex_exit(SD_MUTEX(un)); 10018 if (sd_pm_entry(un) == DDI_SUCCESS) { 10019 rval = 10020 sd_send_scsi_SYNCHRONIZE_CACHE(un, 10021 NULL); 10022 /* ignore error if not supported */ 10023 if (rval == ENOTSUP) { 10024 rval = 0; 10025 } else if (rval != 0) { 10026 rval = EIO; 10027 } 10028 sd_pm_exit(un); 10029 } else { 10030 rval = EIO; 10031 } 10032 mutex_enter(SD_MUTEX(un)); 10033 } 10034 10035 /* 10036 * For devices which supports DOOR_LOCK, send an ALLOW 10037 * MEDIA REMOVAL command, but don't get upset if it 10038 * fails. We need to raise the power of the drive before 10039 * we can call sd_send_scsi_DOORLOCK() 10040 */ 10041 if (un->un_f_doorlock_supported) { 10042 mutex_exit(SD_MUTEX(un)); 10043 if (sd_pm_entry(un) == DDI_SUCCESS) { 10044 sd_ssc_t *ssc; 10045 10046 ssc = sd_ssc_init(un); 10047 rval = sd_send_scsi_DOORLOCK(ssc, 10048 SD_REMOVAL_ALLOW, SD_PATH_DIRECT); 10049 if (rval != 0) 10050 sd_ssc_assessment(ssc, 10051 SD_FMT_IGNORE); 10052 sd_ssc_fini(ssc); 10053 10054 sd_pm_exit(un); 10055 if (ISCD(un) && (rval != 0) && 10056 (nodelay != 0)) { 10057 rval = ENXIO; 10058 } 10059 } else { 10060 rval = EIO; 10061 } 10062 mutex_enter(SD_MUTEX(un)); 10063 } 10064 10065 /* 10066 * If a device has removable media, invalidate all 10067 * parameters related to media, such as geometry, 10068 * blocksize, and blockcount. 10069 */ 10070 if (un->un_f_has_removable_media) { 10071 sr_ejected(un); 10072 } 10073 10074 /* 10075 * Destroy the cache (if it exists) which was 10076 * allocated for the write maps since this is 10077 * the last close for this media. 10078 */ 10079 if (un->un_wm_cache) { 10080 /* 10081 * Check if there are pending commands. 10082 * and if there are give a warning and 10083 * do not destroy the cache. 10084 */ 10085 if (un->un_ncmds_in_driver > 0) { 10086 scsi_log(SD_DEVINFO(un), 10087 sd_label, CE_WARN, 10088 "Unable to clean up memory " 10089 "because of pending I/O\n"); 10090 } else { 10091 kmem_cache_destroy( 10092 un->un_wm_cache); 10093 un->un_wm_cache = NULL; 10094 } 10095 } 10096 } 10097 } 10098 10099 mutex_exit(SD_MUTEX(un)); 10100 sema_v(&un->un_semoclose); 10101 10102 if (otyp == OTYP_LYR) { 10103 mutex_enter(&sd_detach_mutex); 10104 /* 10105 * The detach routine may run when the layer count 10106 * drops to zero. 10107 */ 10108 un->un_layer_count--; 10109 mutex_exit(&sd_detach_mutex); 10110 } 10111 10112 return (rval); 10113 } 10114 10115 10116 /* 10117 * Function: sd_ready_and_valid 10118 * 10119 * Description: Test if device is ready and has a valid geometry. 10120 * 10121 * Arguments: ssc - sd_ssc_t will contain un 10122 * un - driver soft state (unit) structure 10123 * 10124 * Return Code: SD_READY_VALID ready and valid label 10125 * SD_NOT_READY_VALID not ready, no label 10126 * SD_RESERVED_BY_OTHERS reservation conflict 10127 * 10128 * Context: Never called at interrupt context. 10129 */ 10130 10131 static int 10132 sd_ready_and_valid(sd_ssc_t *ssc, int part) 10133 { 10134 struct sd_errstats *stp; 10135 uint64_t capacity; 10136 uint_t lbasize; 10137 int rval = SD_READY_VALID; 10138 char name_str[48]; 10139 int is_valid; 10140 struct sd_lun *un; 10141 int status; 10142 10143 ASSERT(ssc != NULL); 10144 un = ssc->ssc_un; 10145 ASSERT(un != NULL); 10146 ASSERT(!mutex_owned(SD_MUTEX(un))); 10147 10148 mutex_enter(SD_MUTEX(un)); 10149 /* 10150 * If a device has removable media, we must check if media is 10151 * ready when checking if this device is ready and valid. 10152 */ 10153 if (un->un_f_has_removable_media) { 10154 mutex_exit(SD_MUTEX(un)); 10155 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10156 10157 if (status != 0) { 10158 rval = SD_NOT_READY_VALID; 10159 mutex_enter(SD_MUTEX(un)); 10160 10161 /* Ignore all failed status for removalbe media */ 10162 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10163 10164 goto done; 10165 } 10166 10167 is_valid = SD_IS_VALID_LABEL(un); 10168 mutex_enter(SD_MUTEX(un)); 10169 if (!is_valid || 10170 (un->un_f_blockcount_is_valid == FALSE) || 10171 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 10172 10173 /* capacity has to be read every open. */ 10174 mutex_exit(SD_MUTEX(un)); 10175 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity, 10176 &lbasize, SD_PATH_DIRECT); 10177 10178 if (status != 0) { 10179 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10180 10181 cmlb_invalidate(un->un_cmlbhandle, 10182 (void *)SD_PATH_DIRECT); 10183 mutex_enter(SD_MUTEX(un)); 10184 rval = SD_NOT_READY_VALID; 10185 10186 goto done; 10187 } else { 10188 mutex_enter(SD_MUTEX(un)); 10189 sd_update_block_info(un, lbasize, capacity); 10190 } 10191 } 10192 10193 /* 10194 * Check if the media in the device is writable or not. 10195 */ 10196 if (!is_valid && ISCD(un)) { 10197 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT); 10198 } 10199 10200 } else { 10201 /* 10202 * Do a test unit ready to clear any unit attention from non-cd 10203 * devices. 10204 */ 10205 mutex_exit(SD_MUTEX(un)); 10206 10207 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10208 if (status != 0) { 10209 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10210 } 10211 10212 mutex_enter(SD_MUTEX(un)); 10213 } 10214 10215 10216 /* 10217 * If this is a non 512 block device, allocate space for 10218 * the wmap cache. This is being done here since every time 10219 * a media is changed this routine will be called and the 10220 * block size is a function of media rather than device. 10221 */ 10222 if (un->un_f_non_devbsize_supported && NOT_DEVBSIZE(un)) { 10223 if (!(un->un_wm_cache)) { 10224 (void) snprintf(name_str, sizeof (name_str), 10225 "%s%d_cache", 10226 ddi_driver_name(SD_DEVINFO(un)), 10227 ddi_get_instance(SD_DEVINFO(un))); 10228 un->un_wm_cache = kmem_cache_create( 10229 name_str, sizeof (struct sd_w_map), 10230 8, sd_wm_cache_constructor, 10231 sd_wm_cache_destructor, NULL, 10232 (void *)un, NULL, 0); 10233 if (!(un->un_wm_cache)) { 10234 rval = ENOMEM; 10235 goto done; 10236 } 10237 } 10238 } 10239 10240 if (un->un_state == SD_STATE_NORMAL) { 10241 /* 10242 * If the target is not yet ready here (defined by a TUR 10243 * failure), invalidate the geometry and print an 'offline' 10244 * message. This is a legacy message, as the state of the 10245 * target is not actually changed to SD_STATE_OFFLINE. 10246 * 10247 * If the TUR fails for EACCES (Reservation Conflict), 10248 * SD_RESERVED_BY_OTHERS will be returned to indicate 10249 * reservation conflict. If the TUR fails for other 10250 * reasons, SD_NOT_READY_VALID will be returned. 10251 */ 10252 int err; 10253 10254 mutex_exit(SD_MUTEX(un)); 10255 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10256 mutex_enter(SD_MUTEX(un)); 10257 10258 if (err != 0) { 10259 mutex_exit(SD_MUTEX(un)); 10260 cmlb_invalidate(un->un_cmlbhandle, 10261 (void *)SD_PATH_DIRECT); 10262 mutex_enter(SD_MUTEX(un)); 10263 if (err == EACCES) { 10264 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10265 "reservation conflict\n"); 10266 rval = SD_RESERVED_BY_OTHERS; 10267 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10268 } else { 10269 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10270 "drive offline\n"); 10271 rval = SD_NOT_READY_VALID; 10272 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 10273 } 10274 goto done; 10275 } 10276 } 10277 10278 if (un->un_f_format_in_progress == FALSE) { 10279 mutex_exit(SD_MUTEX(un)); 10280 10281 if (cmlb_partinfo(un->un_cmlbhandle, part, NULL, NULL, NULL, 10282 NULL, (void *) SD_PATH_DIRECT) != 0) { 10283 rval = SD_NOT_READY_VALID; 10284 mutex_enter(SD_MUTEX(un)); 10285 10286 goto done; 10287 } 10288 if (un->un_f_pkstats_enabled) { 10289 sd_set_pstats(un); 10290 SD_TRACE(SD_LOG_IO_PARTITION, un, 10291 "sd_ready_and_valid: un:0x%p pstats created and " 10292 "set\n", un); 10293 } 10294 mutex_enter(SD_MUTEX(un)); 10295 } 10296 10297 /* 10298 * If this device supports DOOR_LOCK command, try and send 10299 * this command to PREVENT MEDIA REMOVAL, but don't get upset 10300 * if it fails. For a CD, however, it is an error 10301 */ 10302 if (un->un_f_doorlock_supported) { 10303 mutex_exit(SD_MUTEX(un)); 10304 status = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 10305 SD_PATH_DIRECT); 10306 10307 if ((status != 0) && ISCD(un)) { 10308 rval = SD_NOT_READY_VALID; 10309 mutex_enter(SD_MUTEX(un)); 10310 10311 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10312 10313 goto done; 10314 } else if (status != 0) 10315 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10316 mutex_enter(SD_MUTEX(un)); 10317 } 10318 10319 /* The state has changed, inform the media watch routines */ 10320 un->un_mediastate = DKIO_INSERTED; 10321 cv_broadcast(&un->un_state_cv); 10322 rval = SD_READY_VALID; 10323 10324 done: 10325 10326 /* 10327 * Initialize the capacity kstat value, if no media previously 10328 * (capacity kstat is 0) and a media has been inserted 10329 * (un_blockcount > 0). 10330 */ 10331 if (un->un_errstats != NULL) { 10332 stp = (struct sd_errstats *)un->un_errstats->ks_data; 10333 if ((stp->sd_capacity.value.ui64 == 0) && 10334 (un->un_f_blockcount_is_valid == TRUE)) { 10335 stp->sd_capacity.value.ui64 = 10336 (uint64_t)((uint64_t)un->un_blockcount * 10337 un->un_sys_blocksize); 10338 } 10339 } 10340 10341 mutex_exit(SD_MUTEX(un)); 10342 return (rval); 10343 } 10344 10345 10346 /* 10347 * Function: sdmin 10348 * 10349 * Description: Routine to limit the size of a data transfer. Used in 10350 * conjunction with physio(9F). 10351 * 10352 * Arguments: bp - pointer to the indicated buf(9S) struct. 10353 * 10354 * Context: Kernel thread context. 10355 */ 10356 10357 static void 10358 sdmin(struct buf *bp) 10359 { 10360 struct sd_lun *un; 10361 int instance; 10362 10363 instance = SDUNIT(bp->b_edev); 10364 10365 un = ddi_get_soft_state(sd_state, instance); 10366 ASSERT(un != NULL); 10367 10368 if (bp->b_bcount > un->un_max_xfer_size) { 10369 bp->b_bcount = un->un_max_xfer_size; 10370 } 10371 } 10372 10373 10374 /* 10375 * Function: sdread 10376 * 10377 * Description: Driver's read(9e) entry point function. 10378 * 10379 * Arguments: dev - device number 10380 * uio - structure pointer describing where data is to be stored 10381 * in user's space 10382 * cred_p - user credential pointer 10383 * 10384 * Return Code: ENXIO 10385 * EIO 10386 * EINVAL 10387 * value returned by physio 10388 * 10389 * Context: Kernel thread context. 10390 */ 10391 /* ARGSUSED */ 10392 static int 10393 sdread(dev_t dev, struct uio *uio, cred_t *cred_p) 10394 { 10395 struct sd_lun *un = NULL; 10396 int secmask; 10397 int err = 0; 10398 sd_ssc_t *ssc; 10399 10400 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10401 return (ENXIO); 10402 } 10403 10404 ASSERT(!mutex_owned(SD_MUTEX(un))); 10405 10406 10407 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10408 mutex_enter(SD_MUTEX(un)); 10409 /* 10410 * Because the call to sd_ready_and_valid will issue I/O we 10411 * must wait here if either the device is suspended or 10412 * if it's power level is changing. 10413 */ 10414 while ((un->un_state == SD_STATE_SUSPENDED) || 10415 (un->un_state == SD_STATE_PM_CHANGING)) { 10416 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10417 } 10418 un->un_ncmds_in_driver++; 10419 mutex_exit(SD_MUTEX(un)); 10420 10421 /* Initialize sd_ssc_t for internal uscsi commands */ 10422 ssc = sd_ssc_init(un); 10423 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10424 err = EIO; 10425 } else { 10426 err = 0; 10427 } 10428 sd_ssc_fini(ssc); 10429 10430 mutex_enter(SD_MUTEX(un)); 10431 un->un_ncmds_in_driver--; 10432 ASSERT(un->un_ncmds_in_driver >= 0); 10433 mutex_exit(SD_MUTEX(un)); 10434 if (err != 0) 10435 return (err); 10436 } 10437 10438 /* 10439 * Read requests are restricted to multiples of the system block size. 10440 */ 10441 secmask = un->un_sys_blocksize - 1; 10442 10443 if (uio->uio_loffset & ((offset_t)(secmask))) { 10444 SD_ERROR(SD_LOG_READ_WRITE, un, 10445 "sdread: file offset not modulo %d\n", 10446 un->un_sys_blocksize); 10447 err = EINVAL; 10448 } else if (uio->uio_iov->iov_len & (secmask)) { 10449 SD_ERROR(SD_LOG_READ_WRITE, un, 10450 "sdread: transfer length not modulo %d\n", 10451 un->un_sys_blocksize); 10452 err = EINVAL; 10453 } else { 10454 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio); 10455 } 10456 10457 return (err); 10458 } 10459 10460 10461 /* 10462 * Function: sdwrite 10463 * 10464 * Description: Driver's write(9e) entry point function. 10465 * 10466 * Arguments: dev - device number 10467 * uio - structure pointer describing where data is stored in 10468 * user's space 10469 * cred_p - user credential pointer 10470 * 10471 * Return Code: ENXIO 10472 * EIO 10473 * EINVAL 10474 * value returned by physio 10475 * 10476 * Context: Kernel thread context. 10477 */ 10478 /* ARGSUSED */ 10479 static int 10480 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 10481 { 10482 struct sd_lun *un = NULL; 10483 int secmask; 10484 int err = 0; 10485 sd_ssc_t *ssc; 10486 10487 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10488 return (ENXIO); 10489 } 10490 10491 ASSERT(!mutex_owned(SD_MUTEX(un))); 10492 10493 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10494 mutex_enter(SD_MUTEX(un)); 10495 /* 10496 * Because the call to sd_ready_and_valid will issue I/O we 10497 * must wait here if either the device is suspended or 10498 * if it's power level is changing. 10499 */ 10500 while ((un->un_state == SD_STATE_SUSPENDED) || 10501 (un->un_state == SD_STATE_PM_CHANGING)) { 10502 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10503 } 10504 un->un_ncmds_in_driver++; 10505 mutex_exit(SD_MUTEX(un)); 10506 10507 /* Initialize sd_ssc_t for internal uscsi commands */ 10508 ssc = sd_ssc_init(un); 10509 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10510 err = EIO; 10511 } else { 10512 err = 0; 10513 } 10514 sd_ssc_fini(ssc); 10515 10516 mutex_enter(SD_MUTEX(un)); 10517 un->un_ncmds_in_driver--; 10518 ASSERT(un->un_ncmds_in_driver >= 0); 10519 mutex_exit(SD_MUTEX(un)); 10520 if (err != 0) 10521 return (err); 10522 } 10523 10524 /* 10525 * Write requests are restricted to multiples of the system block size. 10526 */ 10527 secmask = un->un_sys_blocksize - 1; 10528 10529 if (uio->uio_loffset & ((offset_t)(secmask))) { 10530 SD_ERROR(SD_LOG_READ_WRITE, un, 10531 "sdwrite: file offset not modulo %d\n", 10532 un->un_sys_blocksize); 10533 err = EINVAL; 10534 } else if (uio->uio_iov->iov_len & (secmask)) { 10535 SD_ERROR(SD_LOG_READ_WRITE, un, 10536 "sdwrite: transfer length not modulo %d\n", 10537 un->un_sys_blocksize); 10538 err = EINVAL; 10539 } else { 10540 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio); 10541 } 10542 10543 return (err); 10544 } 10545 10546 10547 /* 10548 * Function: sdaread 10549 * 10550 * Description: Driver's aread(9e) entry point function. 10551 * 10552 * Arguments: dev - device number 10553 * aio - structure pointer describing where data is to be stored 10554 * cred_p - user credential pointer 10555 * 10556 * Return Code: ENXIO 10557 * EIO 10558 * EINVAL 10559 * value returned by aphysio 10560 * 10561 * Context: Kernel thread context. 10562 */ 10563 /* ARGSUSED */ 10564 static int 10565 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10566 { 10567 struct sd_lun *un = NULL; 10568 struct uio *uio = aio->aio_uio; 10569 int secmask; 10570 int err = 0; 10571 sd_ssc_t *ssc; 10572 10573 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10574 return (ENXIO); 10575 } 10576 10577 ASSERT(!mutex_owned(SD_MUTEX(un))); 10578 10579 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10580 mutex_enter(SD_MUTEX(un)); 10581 /* 10582 * Because the call to sd_ready_and_valid will issue I/O we 10583 * must wait here if either the device is suspended or 10584 * if it's power level is changing. 10585 */ 10586 while ((un->un_state == SD_STATE_SUSPENDED) || 10587 (un->un_state == SD_STATE_PM_CHANGING)) { 10588 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10589 } 10590 un->un_ncmds_in_driver++; 10591 mutex_exit(SD_MUTEX(un)); 10592 10593 /* Initialize sd_ssc_t for internal uscsi commands */ 10594 ssc = sd_ssc_init(un); 10595 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10596 err = EIO; 10597 } else { 10598 err = 0; 10599 } 10600 sd_ssc_fini(ssc); 10601 10602 mutex_enter(SD_MUTEX(un)); 10603 un->un_ncmds_in_driver--; 10604 ASSERT(un->un_ncmds_in_driver >= 0); 10605 mutex_exit(SD_MUTEX(un)); 10606 if (err != 0) 10607 return (err); 10608 } 10609 10610 /* 10611 * Read requests are restricted to multiples of the system block size. 10612 */ 10613 secmask = un->un_sys_blocksize - 1; 10614 10615 if (uio->uio_loffset & ((offset_t)(secmask))) { 10616 SD_ERROR(SD_LOG_READ_WRITE, un, 10617 "sdaread: file offset not modulo %d\n", 10618 un->un_sys_blocksize); 10619 err = EINVAL; 10620 } else if (uio->uio_iov->iov_len & (secmask)) { 10621 SD_ERROR(SD_LOG_READ_WRITE, un, 10622 "sdaread: transfer length not modulo %d\n", 10623 un->un_sys_blocksize); 10624 err = EINVAL; 10625 } else { 10626 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio); 10627 } 10628 10629 return (err); 10630 } 10631 10632 10633 /* 10634 * Function: sdawrite 10635 * 10636 * Description: Driver's awrite(9e) entry point function. 10637 * 10638 * Arguments: dev - device number 10639 * aio - structure pointer describing where data is stored 10640 * cred_p - user credential pointer 10641 * 10642 * Return Code: ENXIO 10643 * EIO 10644 * EINVAL 10645 * value returned by aphysio 10646 * 10647 * Context: Kernel thread context. 10648 */ 10649 /* ARGSUSED */ 10650 static int 10651 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10652 { 10653 struct sd_lun *un = NULL; 10654 struct uio *uio = aio->aio_uio; 10655 int secmask; 10656 int err = 0; 10657 sd_ssc_t *ssc; 10658 10659 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10660 return (ENXIO); 10661 } 10662 10663 ASSERT(!mutex_owned(SD_MUTEX(un))); 10664 10665 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10666 mutex_enter(SD_MUTEX(un)); 10667 /* 10668 * Because the call to sd_ready_and_valid will issue I/O we 10669 * must wait here if either the device is suspended or 10670 * if it's power level is changing. 10671 */ 10672 while ((un->un_state == SD_STATE_SUSPENDED) || 10673 (un->un_state == SD_STATE_PM_CHANGING)) { 10674 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10675 } 10676 un->un_ncmds_in_driver++; 10677 mutex_exit(SD_MUTEX(un)); 10678 10679 /* Initialize sd_ssc_t for internal uscsi commands */ 10680 ssc = sd_ssc_init(un); 10681 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10682 err = EIO; 10683 } else { 10684 err = 0; 10685 } 10686 sd_ssc_fini(ssc); 10687 10688 mutex_enter(SD_MUTEX(un)); 10689 un->un_ncmds_in_driver--; 10690 ASSERT(un->un_ncmds_in_driver >= 0); 10691 mutex_exit(SD_MUTEX(un)); 10692 if (err != 0) 10693 return (err); 10694 } 10695 10696 /* 10697 * Write requests are restricted to multiples of the system block size. 10698 */ 10699 secmask = un->un_sys_blocksize - 1; 10700 10701 if (uio->uio_loffset & ((offset_t)(secmask))) { 10702 SD_ERROR(SD_LOG_READ_WRITE, un, 10703 "sdawrite: file offset not modulo %d\n", 10704 un->un_sys_blocksize); 10705 err = EINVAL; 10706 } else if (uio->uio_iov->iov_len & (secmask)) { 10707 SD_ERROR(SD_LOG_READ_WRITE, un, 10708 "sdawrite: transfer length not modulo %d\n", 10709 un->un_sys_blocksize); 10710 err = EINVAL; 10711 } else { 10712 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio); 10713 } 10714 10715 return (err); 10716 } 10717 10718 10719 10720 10721 10722 /* 10723 * Driver IO processing follows the following sequence: 10724 * 10725 * sdioctl(9E) sdstrategy(9E) biodone(9F) 10726 * | | ^ 10727 * v v | 10728 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+ 10729 * | | | | 10730 * v | | | 10731 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone() 10732 * | | ^ ^ 10733 * v v | | 10734 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | | 10735 * | | | | 10736 * +---+ | +------------+ +-------+ 10737 * | | | | 10738 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10739 * | v | | 10740 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() | 10741 * | | ^ | 10742 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10743 * | v | | 10744 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() | 10745 * | | ^ | 10746 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10747 * | v | | 10748 * | sd_checksum_iostart() sd_checksum_iodone() | 10749 * | | ^ | 10750 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+ 10751 * | v | | 10752 * | sd_pm_iostart() sd_pm_iodone() | 10753 * | | ^ | 10754 * | | | | 10755 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+ 10756 * | ^ 10757 * v | 10758 * sd_core_iostart() | 10759 * | | 10760 * | +------>(*destroypkt)() 10761 * +-> sd_start_cmds() <-+ | | 10762 * | | | v 10763 * | | | scsi_destroy_pkt(9F) 10764 * | | | 10765 * +->(*initpkt)() +- sdintr() 10766 * | | | | 10767 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx() 10768 * | +-> scsi_setup_cdb(9F) | 10769 * | | 10770 * +--> scsi_transport(9F) | 10771 * | | 10772 * +----> SCSA ---->+ 10773 * 10774 * 10775 * This code is based upon the following presumptions: 10776 * 10777 * - iostart and iodone functions operate on buf(9S) structures. These 10778 * functions perform the necessary operations on the buf(9S) and pass 10779 * them along to the next function in the chain by using the macros 10780 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE() 10781 * (for iodone side functions). 10782 * 10783 * - The iostart side functions may sleep. The iodone side functions 10784 * are called under interrupt context and may NOT sleep. Therefore 10785 * iodone side functions also may not call iostart side functions. 10786 * (NOTE: iostart side functions should NOT sleep for memory, as 10787 * this could result in deadlock.) 10788 * 10789 * - An iostart side function may call its corresponding iodone side 10790 * function directly (if necessary). 10791 * 10792 * - In the event of an error, an iostart side function can return a buf(9S) 10793 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and 10794 * b_error in the usual way of course). 10795 * 10796 * - The taskq mechanism may be used by the iodone side functions to dispatch 10797 * requests to the iostart side functions. The iostart side functions in 10798 * this case would be called under the context of a taskq thread, so it's 10799 * OK for them to block/sleep/spin in this case. 10800 * 10801 * - iostart side functions may allocate "shadow" buf(9S) structs and 10802 * pass them along to the next function in the chain. The corresponding 10803 * iodone side functions must coalesce the "shadow" bufs and return 10804 * the "original" buf to the next higher layer. 10805 * 10806 * - The b_private field of the buf(9S) struct holds a pointer to 10807 * an sd_xbuf struct, which contains information needed to 10808 * construct the scsi_pkt for the command. 10809 * 10810 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each 10811 * layer must acquire & release the SD_MUTEX(un) as needed. 10812 */ 10813 10814 10815 /* 10816 * Create taskq for all targets in the system. This is created at 10817 * _init(9E) and destroyed at _fini(9E). 10818 * 10819 * Note: here we set the minalloc to a reasonably high number to ensure that 10820 * we will have an adequate supply of task entries available at interrupt time. 10821 * This is used in conjunction with the TASKQ_PREPOPULATE flag in 10822 * sd_create_taskq(). Since we do not want to sleep for allocations at 10823 * interrupt time, set maxalloc equal to minalloc. That way we will just fail 10824 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq 10825 * requests any one instant in time. 10826 */ 10827 #define SD_TASKQ_NUMTHREADS 8 10828 #define SD_TASKQ_MINALLOC 256 10829 #define SD_TASKQ_MAXALLOC 256 10830 10831 static taskq_t *sd_tq = NULL; 10832 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq)) 10833 10834 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; 10835 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; 10836 10837 /* 10838 * The following task queue is being created for the write part of 10839 * read-modify-write of non-512 block size devices. 10840 * Limit the number of threads to 1 for now. This number has been chosen 10841 * considering the fact that it applies only to dvd ram drives/MO drives 10842 * currently. Performance for which is not main criteria at this stage. 10843 * Note: It needs to be explored if we can use a single taskq in future 10844 */ 10845 #define SD_WMR_TASKQ_NUMTHREADS 1 10846 static taskq_t *sd_wmr_tq = NULL; 10847 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq)) 10848 10849 /* 10850 * Function: sd_taskq_create 10851 * 10852 * Description: Create taskq thread(s) and preallocate task entries 10853 * 10854 * Return Code: Returns a pointer to the allocated taskq_t. 10855 * 10856 * Context: Can sleep. Requires blockable context. 10857 * 10858 * Notes: - The taskq() facility currently is NOT part of the DDI. 10859 * (definitely NOT recommeded for 3rd-party drivers!) :-) 10860 * - taskq_create() will block for memory, also it will panic 10861 * if it cannot create the requested number of threads. 10862 * - Currently taskq_create() creates threads that cannot be 10863 * swapped. 10864 * - We use TASKQ_PREPOPULATE to ensure we have an adequate 10865 * supply of taskq entries at interrupt time (ie, so that we 10866 * do not have to sleep for memory) 10867 */ 10868 10869 static void 10870 sd_taskq_create(void) 10871 { 10872 char taskq_name[TASKQ_NAMELEN]; 10873 10874 ASSERT(sd_tq == NULL); 10875 ASSERT(sd_wmr_tq == NULL); 10876 10877 (void) snprintf(taskq_name, sizeof (taskq_name), 10878 "%s_drv_taskq", sd_label); 10879 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS, 10880 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 10881 TASKQ_PREPOPULATE)); 10882 10883 (void) snprintf(taskq_name, sizeof (taskq_name), 10884 "%s_rmw_taskq", sd_label); 10885 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS, 10886 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 10887 TASKQ_PREPOPULATE)); 10888 } 10889 10890 10891 /* 10892 * Function: sd_taskq_delete 10893 * 10894 * Description: Complementary cleanup routine for sd_taskq_create(). 10895 * 10896 * Context: Kernel thread context. 10897 */ 10898 10899 static void 10900 sd_taskq_delete(void) 10901 { 10902 ASSERT(sd_tq != NULL); 10903 ASSERT(sd_wmr_tq != NULL); 10904 taskq_destroy(sd_tq); 10905 taskq_destroy(sd_wmr_tq); 10906 sd_tq = NULL; 10907 sd_wmr_tq = NULL; 10908 } 10909 10910 10911 /* 10912 * Function: sdstrategy 10913 * 10914 * Description: Driver's strategy (9E) entry point function. 10915 * 10916 * Arguments: bp - pointer to buf(9S) 10917 * 10918 * Return Code: Always returns zero 10919 * 10920 * Context: Kernel thread context. 10921 */ 10922 10923 static int 10924 sdstrategy(struct buf *bp) 10925 { 10926 struct sd_lun *un; 10927 10928 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 10929 if (un == NULL) { 10930 bioerror(bp, EIO); 10931 bp->b_resid = bp->b_bcount; 10932 biodone(bp); 10933 return (0); 10934 } 10935 /* As was done in the past, fail new cmds. if state is dumping. */ 10936 if (un->un_state == SD_STATE_DUMPING) { 10937 bioerror(bp, ENXIO); 10938 bp->b_resid = bp->b_bcount; 10939 biodone(bp); 10940 return (0); 10941 } 10942 10943 ASSERT(!mutex_owned(SD_MUTEX(un))); 10944 10945 /* 10946 * Commands may sneak in while we released the mutex in 10947 * DDI_SUSPEND, we should block new commands. However, old 10948 * commands that are still in the driver at this point should 10949 * still be allowed to drain. 10950 */ 10951 mutex_enter(SD_MUTEX(un)); 10952 /* 10953 * Must wait here if either the device is suspended or 10954 * if it's power level is changing. 10955 */ 10956 while ((un->un_state == SD_STATE_SUSPENDED) || 10957 (un->un_state == SD_STATE_PM_CHANGING)) { 10958 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10959 } 10960 10961 un->un_ncmds_in_driver++; 10962 10963 /* 10964 * atapi: Since we are running the CD for now in PIO mode we need to 10965 * call bp_mapin here to avoid bp_mapin called interrupt context under 10966 * the HBA's init_pkt routine. 10967 */ 10968 if (un->un_f_cfg_is_atapi == TRUE) { 10969 mutex_exit(SD_MUTEX(un)); 10970 bp_mapin(bp); 10971 mutex_enter(SD_MUTEX(un)); 10972 } 10973 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n", 10974 un->un_ncmds_in_driver); 10975 10976 if (bp->b_flags & B_WRITE) 10977 un->un_f_sync_cache_required = TRUE; 10978 10979 mutex_exit(SD_MUTEX(un)); 10980 10981 /* 10982 * This will (eventually) allocate the sd_xbuf area and 10983 * call sd_xbuf_strategy(). We just want to return the 10984 * result of ddi_xbuf_qstrategy so that we have an opt- 10985 * imized tail call which saves us a stack frame. 10986 */ 10987 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); 10988 } 10989 10990 10991 /* 10992 * Function: sd_xbuf_strategy 10993 * 10994 * Description: Function for initiating IO operations via the 10995 * ddi_xbuf_qstrategy() mechanism. 10996 * 10997 * Context: Kernel thread context. 10998 */ 10999 11000 static void 11001 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg) 11002 { 11003 struct sd_lun *un = arg; 11004 11005 ASSERT(bp != NULL); 11006 ASSERT(xp != NULL); 11007 ASSERT(un != NULL); 11008 ASSERT(!mutex_owned(SD_MUTEX(un))); 11009 11010 /* 11011 * Initialize the fields in the xbuf and save a pointer to the 11012 * xbuf in bp->b_private. 11013 */ 11014 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL); 11015 11016 /* Send the buf down the iostart chain */ 11017 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp); 11018 } 11019 11020 11021 /* 11022 * Function: sd_xbuf_init 11023 * 11024 * Description: Prepare the given sd_xbuf struct for use. 11025 * 11026 * Arguments: un - ptr to softstate 11027 * bp - ptr to associated buf(9S) 11028 * xp - ptr to associated sd_xbuf 11029 * chain_type - IO chain type to use: 11030 * SD_CHAIN_NULL 11031 * SD_CHAIN_BUFIO 11032 * SD_CHAIN_USCSI 11033 * SD_CHAIN_DIRECT 11034 * SD_CHAIN_DIRECT_PRIORITY 11035 * pktinfop - ptr to private data struct for scsi_pkt(9S) 11036 * initialization; may be NULL if none. 11037 * 11038 * Context: Kernel thread context 11039 */ 11040 11041 static void 11042 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 11043 uchar_t chain_type, void *pktinfop) 11044 { 11045 int index; 11046 11047 ASSERT(un != NULL); 11048 ASSERT(bp != NULL); 11049 ASSERT(xp != NULL); 11050 11051 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n", 11052 bp, chain_type); 11053 11054 xp->xb_un = un; 11055 xp->xb_pktp = NULL; 11056 xp->xb_pktinfo = pktinfop; 11057 xp->xb_private = bp->b_private; 11058 xp->xb_blkno = (daddr_t)bp->b_blkno; 11059 11060 /* 11061 * Set up the iostart and iodone chain indexes in the xbuf, based 11062 * upon the specified chain type to use. 11063 */ 11064 switch (chain_type) { 11065 case SD_CHAIN_NULL: 11066 /* 11067 * Fall thru to just use the values for the buf type, even 11068 * tho for the NULL chain these values will never be used. 11069 */ 11070 /* FALLTHRU */ 11071 case SD_CHAIN_BUFIO: 11072 index = un->un_buf_chain_type; 11073 break; 11074 case SD_CHAIN_USCSI: 11075 index = un->un_uscsi_chain_type; 11076 break; 11077 case SD_CHAIN_DIRECT: 11078 index = un->un_direct_chain_type; 11079 break; 11080 case SD_CHAIN_DIRECT_PRIORITY: 11081 index = un->un_priority_chain_type; 11082 break; 11083 default: 11084 /* We're really broken if we ever get here... */ 11085 panic("sd_xbuf_init: illegal chain type!"); 11086 /*NOTREACHED*/ 11087 } 11088 11089 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index; 11090 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index; 11091 11092 /* 11093 * It might be a bit easier to simply bzero the entire xbuf above, 11094 * but it turns out that since we init a fair number of members anyway, 11095 * we save a fair number cycles by doing explicit assignment of zero. 11096 */ 11097 xp->xb_pkt_flags = 0; 11098 xp->xb_dma_resid = 0; 11099 xp->xb_retry_count = 0; 11100 xp->xb_victim_retry_count = 0; 11101 xp->xb_ua_retry_count = 0; 11102 xp->xb_nr_retry_count = 0; 11103 xp->xb_sense_bp = NULL; 11104 xp->xb_sense_status = 0; 11105 xp->xb_sense_state = 0; 11106 xp->xb_sense_resid = 0; 11107 xp->xb_ena = 0; 11108 11109 bp->b_private = xp; 11110 bp->b_flags &= ~(B_DONE | B_ERROR); 11111 bp->b_resid = 0; 11112 bp->av_forw = NULL; 11113 bp->av_back = NULL; 11114 bioerror(bp, 0); 11115 11116 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n"); 11117 } 11118 11119 11120 /* 11121 * Function: sd_uscsi_strategy 11122 * 11123 * Description: Wrapper for calling into the USCSI chain via physio(9F) 11124 * 11125 * Arguments: bp - buf struct ptr 11126 * 11127 * Return Code: Always returns 0 11128 * 11129 * Context: Kernel thread context 11130 */ 11131 11132 static int 11133 sd_uscsi_strategy(struct buf *bp) 11134 { 11135 struct sd_lun *un; 11136 struct sd_uscsi_info *uip; 11137 struct sd_xbuf *xp; 11138 uchar_t chain_type; 11139 uchar_t cmd; 11140 11141 ASSERT(bp != NULL); 11142 11143 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 11144 if (un == NULL) { 11145 bioerror(bp, EIO); 11146 bp->b_resid = bp->b_bcount; 11147 biodone(bp); 11148 return (0); 11149 } 11150 11151 ASSERT(!mutex_owned(SD_MUTEX(un))); 11152 11153 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp); 11154 11155 /* 11156 * A pointer to a struct sd_uscsi_info is expected in bp->b_private 11157 */ 11158 ASSERT(bp->b_private != NULL); 11159 uip = (struct sd_uscsi_info *)bp->b_private; 11160 cmd = ((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_cdb[0]; 11161 11162 mutex_enter(SD_MUTEX(un)); 11163 /* 11164 * atapi: Since we are running the CD for now in PIO mode we need to 11165 * call bp_mapin here to avoid bp_mapin called interrupt context under 11166 * the HBA's init_pkt routine. 11167 */ 11168 if (un->un_f_cfg_is_atapi == TRUE) { 11169 mutex_exit(SD_MUTEX(un)); 11170 bp_mapin(bp); 11171 mutex_enter(SD_MUTEX(un)); 11172 } 11173 un->un_ncmds_in_driver++; 11174 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n", 11175 un->un_ncmds_in_driver); 11176 11177 if ((bp->b_flags & B_WRITE) && (bp->b_bcount != 0) && 11178 (cmd != SCMD_MODE_SELECT) && (cmd != SCMD_MODE_SELECT_G1)) 11179 un->un_f_sync_cache_required = TRUE; 11180 11181 mutex_exit(SD_MUTEX(un)); 11182 11183 switch (uip->ui_flags) { 11184 case SD_PATH_DIRECT: 11185 chain_type = SD_CHAIN_DIRECT; 11186 break; 11187 case SD_PATH_DIRECT_PRIORITY: 11188 chain_type = SD_CHAIN_DIRECT_PRIORITY; 11189 break; 11190 default: 11191 chain_type = SD_CHAIN_USCSI; 11192 break; 11193 } 11194 11195 /* 11196 * We may allocate extra buf for external USCSI commands. If the 11197 * application asks for bigger than 20-byte sense data via USCSI, 11198 * SCSA layer will allocate 252 bytes sense buf for that command. 11199 */ 11200 if (((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_rqlen > 11201 SENSE_LENGTH) { 11202 xp = kmem_zalloc(sizeof (struct sd_xbuf) - SENSE_LENGTH + 11203 MAX_SENSE_LENGTH, KM_SLEEP); 11204 } else { 11205 xp = kmem_zalloc(sizeof (struct sd_xbuf), KM_SLEEP); 11206 } 11207 11208 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp); 11209 11210 /* Use the index obtained within xbuf_init */ 11211 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp); 11212 11213 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp); 11214 11215 return (0); 11216 } 11217 11218 /* 11219 * Function: sd_send_scsi_cmd 11220 * 11221 * Description: Runs a USCSI command for user (when called thru sdioctl), 11222 * or for the driver 11223 * 11224 * Arguments: dev - the dev_t for the device 11225 * incmd - ptr to a valid uscsi_cmd struct 11226 * flag - bit flag, indicating open settings, 32/64 bit type 11227 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 11228 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 11229 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 11230 * to use the USCSI "direct" chain and bypass the normal 11231 * command waitq. 11232 * 11233 * Return Code: 0 - successful completion of the given command 11234 * EIO - scsi_uscsi_handle_command() failed 11235 * ENXIO - soft state not found for specified dev 11236 * EINVAL 11237 * EFAULT - copyin/copyout error 11238 * return code of scsi_uscsi_handle_command(): 11239 * EIO 11240 * ENXIO 11241 * EACCES 11242 * 11243 * Context: Waits for command to complete. Can sleep. 11244 */ 11245 11246 static int 11247 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 11248 enum uio_seg dataspace, int path_flag) 11249 { 11250 struct sd_lun *un; 11251 sd_ssc_t *ssc; 11252 int rval; 11253 11254 un = ddi_get_soft_state(sd_state, SDUNIT(dev)); 11255 if (un == NULL) { 11256 return (ENXIO); 11257 } 11258 11259 /* 11260 * Using sd_ssc_send to handle uscsi cmd 11261 */ 11262 ssc = sd_ssc_init(un); 11263 rval = sd_ssc_send(ssc, incmd, flag, dataspace, path_flag); 11264 sd_ssc_fini(ssc); 11265 11266 return (rval); 11267 } 11268 11269 /* 11270 * Function: sd_ssc_init 11271 * 11272 * Description: Uscsi end-user call this function to initialize necessary 11273 * fields, such as uscsi_cmd and sd_uscsi_info struct. 11274 * 11275 * The return value of sd_send_scsi_cmd will be treated as a 11276 * fault in various conditions. Even it is not Zero, some 11277 * callers may ignore the return value. That is to say, we can 11278 * not make an accurate assessment in sdintr, since if a 11279 * command is failed in sdintr it does not mean the caller of 11280 * sd_send_scsi_cmd will treat it as a real failure. 11281 * 11282 * To avoid printing too many error logs for a failed uscsi 11283 * packet that the caller may not treat it as a failure, the 11284 * sd will keep silent for handling all uscsi commands. 11285 * 11286 * During detach->attach and attach-open, for some types of 11287 * problems, the driver should be providing information about 11288 * the problem encountered. Device use USCSI_SILENT, which 11289 * suppresses all driver information. The result is that no 11290 * information about the problem is available. Being 11291 * completely silent during this time is inappropriate. The 11292 * driver needs a more selective filter than USCSI_SILENT, so 11293 * that information related to faults is provided. 11294 * 11295 * To make the accurate accessment, the caller of 11296 * sd_send_scsi_USCSI_CMD should take the ownership and 11297 * get necessary information to print error messages. 11298 * 11299 * If we want to print necessary info of uscsi command, we need to 11300 * keep the uscsi_cmd and sd_uscsi_info till we can make the 11301 * assessment. We use sd_ssc_init to alloc necessary 11302 * structs for sending an uscsi command and we are also 11303 * responsible for free the memory by calling 11304 * sd_ssc_fini. 11305 * 11306 * The calling secquences will look like: 11307 * sd_ssc_init-> 11308 * 11309 * ... 11310 * 11311 * sd_send_scsi_USCSI_CMD-> 11312 * sd_ssc_send-> - - - sdintr 11313 * ... 11314 * 11315 * if we think the return value should be treated as a 11316 * failure, we make the accessment here and print out 11317 * necessary by retrieving uscsi_cmd and sd_uscsi_info' 11318 * 11319 * ... 11320 * 11321 * sd_ssc_fini 11322 * 11323 * 11324 * Arguments: un - pointer to driver soft state (unit) structure for this 11325 * target. 11326 * 11327 * Return code: sd_ssc_t - pointer to allocated sd_ssc_t struct, it contains 11328 * uscsi_cmd and sd_uscsi_info. 11329 * NULL - if can not alloc memory for sd_ssc_t struct 11330 * 11331 * Context: Kernel Thread. 11332 */ 11333 static sd_ssc_t * 11334 sd_ssc_init(struct sd_lun *un) 11335 { 11336 sd_ssc_t *ssc; 11337 struct uscsi_cmd *ucmdp; 11338 struct sd_uscsi_info *uip; 11339 11340 ASSERT(un != NULL); 11341 ASSERT(!mutex_owned(SD_MUTEX(un))); 11342 11343 /* 11344 * Allocate sd_ssc_t structure 11345 */ 11346 ssc = kmem_zalloc(sizeof (sd_ssc_t), KM_SLEEP); 11347 11348 /* 11349 * Allocate uscsi_cmd by calling scsi_uscsi_alloc common routine 11350 */ 11351 ucmdp = scsi_uscsi_alloc(); 11352 11353 /* 11354 * Allocate sd_uscsi_info structure 11355 */ 11356 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 11357 11358 ssc->ssc_uscsi_cmd = ucmdp; 11359 ssc->ssc_uscsi_info = uip; 11360 ssc->ssc_un = un; 11361 11362 return (ssc); 11363 } 11364 11365 /* 11366 * Function: sd_ssc_fini 11367 * 11368 * Description: To free sd_ssc_t and it's hanging off 11369 * 11370 * Arguments: ssc - struct pointer of sd_ssc_t. 11371 */ 11372 static void 11373 sd_ssc_fini(sd_ssc_t *ssc) 11374 { 11375 scsi_uscsi_free(ssc->ssc_uscsi_cmd); 11376 11377 if (ssc->ssc_uscsi_info != NULL) { 11378 kmem_free(ssc->ssc_uscsi_info, sizeof (struct sd_uscsi_info)); 11379 ssc->ssc_uscsi_info = NULL; 11380 } 11381 11382 kmem_free(ssc, sizeof (sd_ssc_t)); 11383 ssc = NULL; 11384 } 11385 11386 /* 11387 * Function: sd_ssc_send 11388 * 11389 * Description: Runs a USCSI command for user when called through sdioctl, 11390 * or for the driver. 11391 * 11392 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11393 * sd_uscsi_info in. 11394 * incmd - ptr to a valid uscsi_cmd struct 11395 * flag - bit flag, indicating open settings, 32/64 bit type 11396 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 11397 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 11398 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 11399 * to use the USCSI "direct" chain and bypass the normal 11400 * command waitq. 11401 * 11402 * Return Code: 0 - successful completion of the given command 11403 * EIO - scsi_uscsi_handle_command() failed 11404 * ENXIO - soft state not found for specified dev 11405 * EINVAL 11406 * EFAULT - copyin/copyout error 11407 * return code of scsi_uscsi_handle_command(): 11408 * EIO 11409 * ENXIO 11410 * EACCES 11411 * 11412 * Context: Kernel Thread; 11413 * Waits for command to complete. Can sleep. 11414 */ 11415 static int 11416 sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, int flag, 11417 enum uio_seg dataspace, int path_flag) 11418 { 11419 struct sd_uscsi_info *uip; 11420 struct uscsi_cmd *uscmd = ssc->ssc_uscsi_cmd; 11421 struct sd_lun *un; 11422 dev_t dev; 11423 11424 int format = 0; 11425 int rval; 11426 11427 11428 ASSERT(ssc != NULL); 11429 un = ssc->ssc_un; 11430 ASSERT(un != NULL); 11431 ASSERT(!mutex_owned(SD_MUTEX(un))); 11432 ASSERT(!(ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT)); 11433 /* 11434 * We need to make sure sd_ssc_send will have sd_ssc_assessment 11435 * followed to avoid missing any point of telemetry. 11436 */ 11437 ssc->ssc_flags |= SSC_FLAGS_NEED_ASSESSMENT; 11438 11439 if (uscmd == NULL) { 11440 return (ENXIO); 11441 } 11442 11443 11444 #ifdef SDDEBUG 11445 switch (dataspace) { 11446 case UIO_USERSPACE: 11447 SD_TRACE(SD_LOG_IO, un, 11448 "sd_ssc_send: entry: un:0x%p UIO_USERSPACE\n", un); 11449 break; 11450 case UIO_SYSSPACE: 11451 SD_TRACE(SD_LOG_IO, un, 11452 "sd_ssc_send: entry: un:0x%p UIO_SYSSPACE\n", un); 11453 break; 11454 default: 11455 SD_TRACE(SD_LOG_IO, un, 11456 "sd_ssc_send: entry: un:0x%p UNEXPECTED SPACE\n", un); 11457 break; 11458 } 11459 #endif 11460 11461 rval = scsi_uscsi_copyin((intptr_t)incmd, flag, 11462 SD_ADDRESS(un), &uscmd); 11463 if (rval != 0) { 11464 SD_TRACE(SD_LOG_IO, un, "sd_sense_scsi_cmd: " 11465 "scsi_uscsi_alloc_and_copyin failed\n", un); 11466 return (rval); 11467 } 11468 11469 if ((uscmd->uscsi_cdb != NULL) && 11470 (uscmd->uscsi_cdb[0] == SCMD_FORMAT)) { 11471 mutex_enter(SD_MUTEX(un)); 11472 un->un_f_format_in_progress = TRUE; 11473 mutex_exit(SD_MUTEX(un)); 11474 format = 1; 11475 } 11476 11477 /* 11478 * Allocate an sd_uscsi_info struct and fill it with the info 11479 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 11480 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 11481 * since we allocate the buf here in this function, we do not 11482 * need to preserve the prior contents of b_private. 11483 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 11484 */ 11485 uip = ssc->ssc_uscsi_info; 11486 uip->ui_flags = path_flag; 11487 uip->ui_cmdp = uscmd; 11488 11489 /* 11490 * Commands sent with priority are intended for error recovery 11491 * situations, and do not have retries performed. 11492 */ 11493 if (path_flag == SD_PATH_DIRECT_PRIORITY) { 11494 uscmd->uscsi_flags |= USCSI_DIAGNOSE; 11495 } 11496 uscmd->uscsi_flags &= ~USCSI_NOINTR; 11497 11498 dev = SD_GET_DEV(un); 11499 rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd, 11500 sd_uscsi_strategy, NULL, uip); 11501 11502 /* 11503 * mark ssc_flags right after handle_cmd to make sure 11504 * the uscsi has been sent 11505 */ 11506 ssc->ssc_flags |= SSC_FLAGS_CMD_ISSUED; 11507 11508 #ifdef SDDEBUG 11509 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: " 11510 "uscsi_status: 0x%02x uscsi_resid:0x%x\n", 11511 uscmd->uscsi_status, uscmd->uscsi_resid); 11512 if (uscmd->uscsi_bufaddr != NULL) { 11513 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: " 11514 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n", 11515 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen); 11516 if (dataspace == UIO_SYSSPACE) { 11517 SD_DUMP_MEMORY(un, SD_LOG_IO, 11518 "data", (uchar_t *)uscmd->uscsi_bufaddr, 11519 uscmd->uscsi_buflen, SD_LOG_HEX); 11520 } 11521 } 11522 #endif 11523 11524 if (format == 1) { 11525 mutex_enter(SD_MUTEX(un)); 11526 un->un_f_format_in_progress = FALSE; 11527 mutex_exit(SD_MUTEX(un)); 11528 } 11529 11530 (void) scsi_uscsi_copyout((intptr_t)incmd, uscmd); 11531 11532 return (rval); 11533 } 11534 11535 /* 11536 * Function: sd_ssc_print 11537 * 11538 * Description: Print information available to the console. 11539 * 11540 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11541 * sd_uscsi_info in. 11542 * sd_severity - log level. 11543 * Context: Kernel thread or interrupt context. 11544 */ 11545 static void 11546 sd_ssc_print(sd_ssc_t *ssc, int sd_severity) 11547 { 11548 struct uscsi_cmd *ucmdp; 11549 struct scsi_device *devp; 11550 dev_info_t *devinfo; 11551 uchar_t *sensep; 11552 int senlen; 11553 union scsi_cdb *cdbp; 11554 uchar_t com; 11555 extern struct scsi_key_strings scsi_cmds[]; 11556 11557 ASSERT(ssc != NULL); 11558 11559 ucmdp = ssc->ssc_uscsi_cmd; 11560 devp = SD_SCSI_DEVP(ssc->ssc_un); 11561 devinfo = SD_DEVINFO(ssc->ssc_un); 11562 ASSERT(ucmdp != NULL); 11563 ASSERT(devp != NULL); 11564 ASSERT(devinfo != NULL); 11565 sensep = (uint8_t *)ucmdp->uscsi_rqbuf; 11566 senlen = ucmdp->uscsi_rqlen - ucmdp->uscsi_rqresid; 11567 cdbp = (union scsi_cdb *)ucmdp->uscsi_cdb; 11568 11569 /* In certain case (like DOORLOCK), the cdb could be NULL. */ 11570 if (cdbp == NULL) 11571 return; 11572 /* We don't print log if no sense data available. */ 11573 if (senlen == 0) 11574 sensep = NULL; 11575 com = cdbp->scc_cmd; 11576 scsi_generic_errmsg(devp, sd_label, sd_severity, 0, 0, com, 11577 scsi_cmds, sensep, ssc->ssc_un->un_additional_codes, NULL); 11578 } 11579 11580 /* 11581 * Function: sd_ssc_assessment 11582 * 11583 * Description: We use this function to make an assessment at the point 11584 * where SD driver may encounter a potential error. 11585 * 11586 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11587 * sd_uscsi_info in. 11588 * tp_assess - a hint of strategy for ereport posting. 11589 * Possible values of tp_assess include: 11590 * SD_FMT_IGNORE - we don't post any ereport because we're 11591 * sure that it is ok to ignore the underlying problems. 11592 * SD_FMT_IGNORE_COMPROMISE - we don't post any ereport for now 11593 * but it might be not correct to ignore the underlying hardware 11594 * error. 11595 * SD_FMT_STATUS_CHECK - we will post an ereport with the 11596 * payload driver-assessment of value "fail" or 11597 * "fatal"(depending on what information we have here). This 11598 * assessment value is usually set when SD driver think there 11599 * is a potential error occurred(Typically, when return value 11600 * of the SCSI command is EIO). 11601 * SD_FMT_STANDARD - we will post an ereport with the payload 11602 * driver-assessment of value "info". This assessment value is 11603 * set when the SCSI command returned successfully and with 11604 * sense data sent back. 11605 * 11606 * Context: Kernel thread. 11607 */ 11608 static void 11609 sd_ssc_assessment(sd_ssc_t *ssc, enum sd_type_assessment tp_assess) 11610 { 11611 int senlen = 0; 11612 struct uscsi_cmd *ucmdp = NULL; 11613 struct sd_lun *un; 11614 11615 ASSERT(ssc != NULL); 11616 ASSERT(ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT); 11617 11618 ssc->ssc_flags &= ~SSC_FLAGS_NEED_ASSESSMENT; 11619 un = ssc->ssc_un; 11620 ASSERT(un != NULL); 11621 11622 /* 11623 * We don't handle CD-ROM, and removable media 11624 */ 11625 if (ISCD(un) || un->un_f_has_removable_media) { 11626 ssc->ssc_flags &= ~SSC_FLAGS_CMD_ISSUED; 11627 ssc->ssc_flags &= ~SSC_FLAGS_INVALID_DATA; 11628 return; 11629 } 11630 11631 /* 11632 * Only handle an issued command which is waiting for assessment. 11633 */ 11634 if (!(ssc->ssc_flags & SSC_FLAGS_CMD_ISSUED)) { 11635 sd_ssc_print(ssc, SCSI_ERR_INFO); 11636 return; 11637 } else 11638 ssc->ssc_flags &= ~SSC_FLAGS_CMD_ISSUED; 11639 11640 ucmdp = ssc->ssc_uscsi_cmd; 11641 ASSERT(ucmdp != NULL); 11642 11643 /* 11644 * We will not deal with non-retryable commands here. 11645 */ 11646 if (ucmdp->uscsi_flags & USCSI_DIAGNOSE) { 11647 ssc->ssc_flags &= ~SSC_FLAGS_INVALID_DATA; 11648 return; 11649 } 11650 11651 switch (tp_assess) { 11652 case SD_FMT_IGNORE: 11653 case SD_FMT_IGNORE_COMPROMISE: 11654 ssc->ssc_flags &= ~SSC_FLAGS_INVALID_DATA; 11655 break; 11656 case SD_FMT_STATUS_CHECK: 11657 /* 11658 * For a failed command(including the succeeded command 11659 * with invalid data sent back). 11660 */ 11661 sd_ssc_post(ssc, SD_FM_DRV_FATAL); 11662 break; 11663 case SD_FMT_STANDARD: 11664 /* 11665 * Always for the succeeded commands probably with sense 11666 * data sent back. 11667 * Limitation: 11668 * We can only handle a succeeded command with sense 11669 * data sent back when auto-request-sense is enabled. 11670 */ 11671 senlen = ssc->ssc_uscsi_cmd->uscsi_rqlen - 11672 ssc->ssc_uscsi_cmd->uscsi_rqresid; 11673 if ((ssc->ssc_uscsi_info->ui_pkt_state & STATE_ARQ_DONE) && 11674 (un->un_f_arq_enabled == TRUE) && 11675 senlen > 0 && 11676 ssc->ssc_uscsi_cmd->uscsi_rqbuf != NULL) { 11677 sd_ssc_post(ssc, SD_FM_DRV_NOTICE); 11678 } 11679 break; 11680 default: 11681 /* 11682 * Should be an software error. 11683 */ 11684 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 11685 "sd_ssc_assessment got wrong \ 11686 sd_type_assessment %d\n", tp_assess); 11687 break; 11688 } 11689 } 11690 11691 /* 11692 * Function: sd_ssc_post 11693 * 11694 * Description: 1. read the driver property to get fm-scsi-log flag. 11695 * 2. print log if fm_log_capable is non-zero. 11696 * 3. call sd_ssc_ereport_post to post ereport if possible. 11697 * 11698 * Context: May be called from kernel thread or interrupt context. 11699 */ 11700 static void 11701 sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess) 11702 { 11703 struct sd_lun *un; 11704 int fm_scsi_log = 0; 11705 int sd_severity; 11706 11707 ASSERT(ssc != NULL); 11708 un = ssc->ssc_un; 11709 ASSERT(un != NULL); 11710 11711 fm_scsi_log = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 11712 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "fm-scsi-log", 0); 11713 11714 if (fm_scsi_log != 0) { 11715 switch (sd_assess) { 11716 case SD_FM_DRV_FATAL: 11717 sd_severity = SCSI_ERR_FATAL; 11718 break; 11719 case SD_FM_DRV_RECOVERY: 11720 sd_severity = SCSI_ERR_RECOVERED; 11721 break; 11722 case SD_FM_DRV_RETRY: 11723 sd_severity = SCSI_ERR_RETRYABLE; 11724 break; 11725 case SD_FM_DRV_NOTICE: 11726 sd_severity = SCSI_ERR_INFO; 11727 break; 11728 default: 11729 sd_severity = SCSI_ERR_UNKNOWN; 11730 } 11731 /* print log */ 11732 sd_ssc_print(ssc, sd_severity); 11733 } 11734 11735 /* always post ereport */ 11736 sd_ssc_ereport_post(ssc, sd_assess); 11737 } 11738 11739 /* 11740 * Function: sd_ssc_set_info 11741 * 11742 * Description: Mark ssc_flags and set ssc_info which would be the 11743 * payload of uderr ereport. This function will cause 11744 * sd_ssc_ereport_post to post uderr ereport only. 11745 * 11746 * Context: Kernel thread or interrupt context 11747 */ 11748 static void 11749 sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, const char *fmt, ...) 11750 { 11751 va_list ap; 11752 11753 ASSERT(ssc != NULL); 11754 11755 ssc->ssc_flags |= ssc_flags; 11756 va_start(ap, fmt); 11757 (void) vsnprintf(ssc->ssc_info, sizeof (ssc->ssc_info), fmt, ap); 11758 va_end(ap); 11759 } 11760 11761 /* 11762 * Function: sd_buf_iodone 11763 * 11764 * Description: Frees the sd_xbuf & returns the buf to its originator. 11765 * 11766 * Context: May be called from interrupt context. 11767 */ 11768 /* ARGSUSED */ 11769 static void 11770 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp) 11771 { 11772 struct sd_xbuf *xp; 11773 11774 ASSERT(un != NULL); 11775 ASSERT(bp != NULL); 11776 ASSERT(!mutex_owned(SD_MUTEX(un))); 11777 11778 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n"); 11779 11780 xp = SD_GET_XBUF(bp); 11781 ASSERT(xp != NULL); 11782 11783 mutex_enter(SD_MUTEX(un)); 11784 11785 /* 11786 * Grab time when the cmd completed. 11787 * This is used for determining if the system has been 11788 * idle long enough to make it idle to the PM framework. 11789 * This is for lowering the overhead, and therefore improving 11790 * performance per I/O operation. 11791 */ 11792 un->un_pm_idle_time = ddi_get_time(); 11793 11794 un->un_ncmds_in_driver--; 11795 ASSERT(un->un_ncmds_in_driver >= 0); 11796 SD_INFO(SD_LOG_IO, un, "sd_buf_iodone: un_ncmds_in_driver = %ld\n", 11797 un->un_ncmds_in_driver); 11798 11799 mutex_exit(SD_MUTEX(un)); 11800 11801 ddi_xbuf_done(bp, un->un_xbuf_attr); /* xbuf is gone after this */ 11802 biodone(bp); /* bp is gone after this */ 11803 11804 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n"); 11805 } 11806 11807 11808 /* 11809 * Function: sd_uscsi_iodone 11810 * 11811 * Description: Frees the sd_xbuf & returns the buf to its originator. 11812 * 11813 * Context: May be called from interrupt context. 11814 */ 11815 /* ARGSUSED */ 11816 static void 11817 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 11818 { 11819 struct sd_xbuf *xp; 11820 11821 ASSERT(un != NULL); 11822 ASSERT(bp != NULL); 11823 11824 xp = SD_GET_XBUF(bp); 11825 ASSERT(xp != NULL); 11826 ASSERT(!mutex_owned(SD_MUTEX(un))); 11827 11828 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n"); 11829 11830 bp->b_private = xp->xb_private; 11831 11832 mutex_enter(SD_MUTEX(un)); 11833 11834 /* 11835 * Grab time when the cmd completed. 11836 * This is used for determining if the system has been 11837 * idle long enough to make it idle to the PM framework. 11838 * This is for lowering the overhead, and therefore improving 11839 * performance per I/O operation. 11840 */ 11841 un->un_pm_idle_time = ddi_get_time(); 11842 11843 un->un_ncmds_in_driver--; 11844 ASSERT(un->un_ncmds_in_driver >= 0); 11845 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n", 11846 un->un_ncmds_in_driver); 11847 11848 mutex_exit(SD_MUTEX(un)); 11849 11850 if (((struct uscsi_cmd *)(xp->xb_pktinfo))->uscsi_rqlen > 11851 SENSE_LENGTH) { 11852 kmem_free(xp, sizeof (struct sd_xbuf) - SENSE_LENGTH + 11853 MAX_SENSE_LENGTH); 11854 } else { 11855 kmem_free(xp, sizeof (struct sd_xbuf)); 11856 } 11857 11858 biodone(bp); 11859 11860 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n"); 11861 } 11862 11863 11864 /* 11865 * Function: sd_mapblockaddr_iostart 11866 * 11867 * Description: Verify request lies within the partition limits for 11868 * the indicated minor device. Issue "overrun" buf if 11869 * request would exceed partition range. Converts 11870 * partition-relative block address to absolute. 11871 * 11872 * Context: Can sleep 11873 * 11874 * Issues: This follows what the old code did, in terms of accessing 11875 * some of the partition info in the unit struct without holding 11876 * the mutext. This is a general issue, if the partition info 11877 * can be altered while IO is in progress... as soon as we send 11878 * a buf, its partitioning can be invalid before it gets to the 11879 * device. Probably the right fix is to move partitioning out 11880 * of the driver entirely. 11881 */ 11882 11883 static void 11884 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp) 11885 { 11886 diskaddr_t nblocks; /* #blocks in the given partition */ 11887 daddr_t blocknum; /* Block number specified by the buf */ 11888 size_t requested_nblocks; 11889 size_t available_nblocks; 11890 int partition; 11891 diskaddr_t partition_offset; 11892 struct sd_xbuf *xp; 11893 11894 ASSERT(un != NULL); 11895 ASSERT(bp != NULL); 11896 ASSERT(!mutex_owned(SD_MUTEX(un))); 11897 11898 SD_TRACE(SD_LOG_IO_PARTITION, un, 11899 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp); 11900 11901 xp = SD_GET_XBUF(bp); 11902 ASSERT(xp != NULL); 11903 11904 /* 11905 * If the geometry is not indicated as valid, attempt to access 11906 * the unit & verify the geometry/label. This can be the case for 11907 * removable-media devices, of if the device was opened in 11908 * NDELAY/NONBLOCK mode. 11909 */ 11910 partition = SDPART(bp->b_edev); 11911 11912 if (!SD_IS_VALID_LABEL(un)) { 11913 sd_ssc_t *ssc; 11914 /* 11915 * Initialize sd_ssc_t for internal uscsi commands 11916 * In case of potential porformance issue, we need 11917 * to alloc memory only if there is invalid label 11918 */ 11919 ssc = sd_ssc_init(un); 11920 11921 if (sd_ready_and_valid(ssc, partition) != SD_READY_VALID) { 11922 /* 11923 * For removable devices it is possible to start an 11924 * I/O without a media by opening the device in nodelay 11925 * mode. Also for writable CDs there can be many 11926 * scenarios where there is no geometry yet but volume 11927 * manager is trying to issue a read() just because 11928 * it can see TOC on the CD. So do not print a message 11929 * for removables. 11930 */ 11931 if (!un->un_f_has_removable_media) { 11932 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 11933 "i/o to invalid geometry\n"); 11934 } 11935 bioerror(bp, EIO); 11936 bp->b_resid = bp->b_bcount; 11937 SD_BEGIN_IODONE(index, un, bp); 11938 11939 sd_ssc_fini(ssc); 11940 return; 11941 } 11942 sd_ssc_fini(ssc); 11943 } 11944 11945 nblocks = 0; 11946 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 11947 &nblocks, &partition_offset, NULL, NULL, (void *)SD_PATH_DIRECT); 11948 11949 /* 11950 * blocknum is the starting block number of the request. At this 11951 * point it is still relative to the start of the minor device. 11952 */ 11953 blocknum = xp->xb_blkno; 11954 11955 /* 11956 * Legacy: If the starting block number is one past the last block 11957 * in the partition, do not set B_ERROR in the buf. 11958 */ 11959 if (blocknum == nblocks) { 11960 goto error_exit; 11961 } 11962 11963 /* 11964 * Confirm that the first block of the request lies within the 11965 * partition limits. Also the requested number of bytes must be 11966 * a multiple of the system block size. 11967 */ 11968 if ((blocknum < 0) || (blocknum >= nblocks) || 11969 ((bp->b_bcount & (un->un_sys_blocksize - 1)) != 0)) { 11970 bp->b_flags |= B_ERROR; 11971 goto error_exit; 11972 } 11973 11974 /* 11975 * If the requsted # blocks exceeds the available # blocks, that 11976 * is an overrun of the partition. 11977 */ 11978 requested_nblocks = SD_BYTES2SYSBLOCKS(un, bp->b_bcount); 11979 available_nblocks = (size_t)(nblocks - blocknum); 11980 ASSERT(nblocks >= blocknum); 11981 11982 if (requested_nblocks > available_nblocks) { 11983 /* 11984 * Allocate an "overrun" buf to allow the request to proceed 11985 * for the amount of space available in the partition. The 11986 * amount not transferred will be added into the b_resid 11987 * when the operation is complete. The overrun buf 11988 * replaces the original buf here, and the original buf 11989 * is saved inside the overrun buf, for later use. 11990 */ 11991 size_t resid = SD_SYSBLOCKS2BYTES(un, 11992 (offset_t)(requested_nblocks - available_nblocks)); 11993 size_t count = bp->b_bcount - resid; 11994 /* 11995 * Note: count is an unsigned entity thus it'll NEVER 11996 * be less than 0 so ASSERT the original values are 11997 * correct. 11998 */ 11999 ASSERT(bp->b_bcount >= resid); 12000 12001 bp = sd_bioclone_alloc(bp, count, blocknum, 12002 (int (*)(struct buf *)) sd_mapblockaddr_iodone); 12003 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */ 12004 ASSERT(xp != NULL); 12005 } 12006 12007 /* At this point there should be no residual for this buf. */ 12008 ASSERT(bp->b_resid == 0); 12009 12010 /* Convert the block number to an absolute address. */ 12011 xp->xb_blkno += partition_offset; 12012 12013 SD_NEXT_IOSTART(index, un, bp); 12014 12015 SD_TRACE(SD_LOG_IO_PARTITION, un, 12016 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp); 12017 12018 return; 12019 12020 error_exit: 12021 bp->b_resid = bp->b_bcount; 12022 SD_BEGIN_IODONE(index, un, bp); 12023 SD_TRACE(SD_LOG_IO_PARTITION, un, 12024 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp); 12025 } 12026 12027 12028 /* 12029 * Function: sd_mapblockaddr_iodone 12030 * 12031 * Description: Completion-side processing for partition management. 12032 * 12033 * Context: May be called under interrupt context 12034 */ 12035 12036 static void 12037 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp) 12038 { 12039 /* int partition; */ /* Not used, see below. */ 12040 ASSERT(un != NULL); 12041 ASSERT(bp != NULL); 12042 ASSERT(!mutex_owned(SD_MUTEX(un))); 12043 12044 SD_TRACE(SD_LOG_IO_PARTITION, un, 12045 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp); 12046 12047 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) { 12048 /* 12049 * We have an "overrun" buf to deal with... 12050 */ 12051 struct sd_xbuf *xp; 12052 struct buf *obp; /* ptr to the original buf */ 12053 12054 xp = SD_GET_XBUF(bp); 12055 ASSERT(xp != NULL); 12056 12057 /* Retrieve the pointer to the original buf */ 12058 obp = (struct buf *)xp->xb_private; 12059 ASSERT(obp != NULL); 12060 12061 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid); 12062 bioerror(obp, bp->b_error); 12063 12064 sd_bioclone_free(bp); 12065 12066 /* 12067 * Get back the original buf. 12068 * Note that since the restoration of xb_blkno below 12069 * was removed, the sd_xbuf is not needed. 12070 */ 12071 bp = obp; 12072 /* 12073 * xp = SD_GET_XBUF(bp); 12074 * ASSERT(xp != NULL); 12075 */ 12076 } 12077 12078 /* 12079 * Convert sd->xb_blkno back to a minor-device relative value. 12080 * Note: this has been commented out, as it is not needed in the 12081 * current implementation of the driver (ie, since this function 12082 * is at the top of the layering chains, so the info will be 12083 * discarded) and it is in the "hot" IO path. 12084 * 12085 * partition = getminor(bp->b_edev) & SDPART_MASK; 12086 * xp->xb_blkno -= un->un_offset[partition]; 12087 */ 12088 12089 SD_NEXT_IODONE(index, un, bp); 12090 12091 SD_TRACE(SD_LOG_IO_PARTITION, un, 12092 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp); 12093 } 12094 12095 12096 /* 12097 * Function: sd_mapblocksize_iostart 12098 * 12099 * Description: Convert between system block size (un->un_sys_blocksize) 12100 * and target block size (un->un_tgt_blocksize). 12101 * 12102 * Context: Can sleep to allocate resources. 12103 * 12104 * Assumptions: A higher layer has already performed any partition validation, 12105 * and converted the xp->xb_blkno to an absolute value relative 12106 * to the start of the device. 12107 * 12108 * It is also assumed that the higher layer has implemented 12109 * an "overrun" mechanism for the case where the request would 12110 * read/write beyond the end of a partition. In this case we 12111 * assume (and ASSERT) that bp->b_resid == 0. 12112 * 12113 * Note: The implementation for this routine assumes the target 12114 * block size remains constant between allocation and transport. 12115 */ 12116 12117 static void 12118 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp) 12119 { 12120 struct sd_mapblocksize_info *bsp; 12121 struct sd_xbuf *xp; 12122 offset_t first_byte; 12123 daddr_t start_block, end_block; 12124 daddr_t request_bytes; 12125 ushort_t is_aligned = FALSE; 12126 12127 ASSERT(un != NULL); 12128 ASSERT(bp != NULL); 12129 ASSERT(!mutex_owned(SD_MUTEX(un))); 12130 ASSERT(bp->b_resid == 0); 12131 12132 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12133 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp); 12134 12135 /* 12136 * For a non-writable CD, a write request is an error 12137 */ 12138 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) && 12139 (un->un_f_mmc_writable_media == FALSE)) { 12140 bioerror(bp, EIO); 12141 bp->b_resid = bp->b_bcount; 12142 SD_BEGIN_IODONE(index, un, bp); 12143 return; 12144 } 12145 12146 /* 12147 * We do not need a shadow buf if the device is using 12148 * un->un_sys_blocksize as its block size or if bcount == 0. 12149 * In this case there is no layer-private data block allocated. 12150 */ 12151 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 12152 (bp->b_bcount == 0)) { 12153 goto done; 12154 } 12155 12156 #if defined(__i386) || defined(__amd64) 12157 /* We do not support non-block-aligned transfers for ROD devices */ 12158 ASSERT(!ISROD(un)); 12159 #endif 12160 12161 xp = SD_GET_XBUF(bp); 12162 ASSERT(xp != NULL); 12163 12164 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12165 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n", 12166 un->un_tgt_blocksize, un->un_sys_blocksize); 12167 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12168 "request start block:0x%x\n", xp->xb_blkno); 12169 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12170 "request len:0x%x\n", bp->b_bcount); 12171 12172 /* 12173 * Allocate the layer-private data area for the mapblocksize layer. 12174 * Layers are allowed to use the xp_private member of the sd_xbuf 12175 * struct to store the pointer to their layer-private data block, but 12176 * each layer also has the responsibility of restoring the prior 12177 * contents of xb_private before returning the buf/xbuf to the 12178 * higher layer that sent it. 12179 * 12180 * Here we save the prior contents of xp->xb_private into the 12181 * bsp->mbs_oprivate field of our layer-private data area. This value 12182 * is restored by sd_mapblocksize_iodone() just prior to freeing up 12183 * the layer-private area and returning the buf/xbuf to the layer 12184 * that sent it. 12185 * 12186 * Note that here we use kmem_zalloc for the allocation as there are 12187 * parts of the mapblocksize code that expect certain fields to be 12188 * zero unless explicitly set to a required value. 12189 */ 12190 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 12191 bsp->mbs_oprivate = xp->xb_private; 12192 xp->xb_private = bsp; 12193 12194 /* 12195 * This treats the data on the disk (target) as an array of bytes. 12196 * first_byte is the byte offset, from the beginning of the device, 12197 * to the location of the request. This is converted from a 12198 * un->un_sys_blocksize block address to a byte offset, and then back 12199 * to a block address based upon a un->un_tgt_blocksize block size. 12200 * 12201 * xp->xb_blkno should be absolute upon entry into this function, 12202 * but, but it is based upon partitions that use the "system" 12203 * block size. It must be adjusted to reflect the block size of 12204 * the target. 12205 * 12206 * Note that end_block is actually the block that follows the last 12207 * block of the request, but that's what is needed for the computation. 12208 */ 12209 first_byte = SD_SYSBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 12210 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize; 12211 end_block = (first_byte + bp->b_bcount + un->un_tgt_blocksize - 1) / 12212 un->un_tgt_blocksize; 12213 12214 /* request_bytes is rounded up to a multiple of the target block size */ 12215 request_bytes = (end_block - start_block) * un->un_tgt_blocksize; 12216 12217 /* 12218 * See if the starting address of the request and the request 12219 * length are aligned on a un->un_tgt_blocksize boundary. If aligned 12220 * then we do not need to allocate a shadow buf to handle the request. 12221 */ 12222 if (((first_byte % un->un_tgt_blocksize) == 0) && 12223 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) { 12224 is_aligned = TRUE; 12225 } 12226 12227 if ((bp->b_flags & B_READ) == 0) { 12228 /* 12229 * Lock the range for a write operation. An aligned request is 12230 * considered a simple write; otherwise the request must be a 12231 * read-modify-write. 12232 */ 12233 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1, 12234 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW); 12235 } 12236 12237 /* 12238 * Alloc a shadow buf if the request is not aligned. Also, this is 12239 * where the READ command is generated for a read-modify-write. (The 12240 * write phase is deferred until after the read completes.) 12241 */ 12242 if (is_aligned == FALSE) { 12243 12244 struct sd_mapblocksize_info *shadow_bsp; 12245 struct sd_xbuf *shadow_xp; 12246 struct buf *shadow_bp; 12247 12248 /* 12249 * Allocate the shadow buf and it associated xbuf. Note that 12250 * after this call the xb_blkno value in both the original 12251 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the 12252 * same: absolute relative to the start of the device, and 12253 * adjusted for the target block size. The b_blkno in the 12254 * shadow buf will also be set to this value. We should never 12255 * change b_blkno in the original bp however. 12256 * 12257 * Note also that the shadow buf will always need to be a 12258 * READ command, regardless of whether the incoming command 12259 * is a READ or a WRITE. 12260 */ 12261 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ, 12262 xp->xb_blkno, 12263 (int (*)(struct buf *)) sd_mapblocksize_iodone); 12264 12265 shadow_xp = SD_GET_XBUF(shadow_bp); 12266 12267 /* 12268 * Allocate the layer-private data for the shadow buf. 12269 * (No need to preserve xb_private in the shadow xbuf.) 12270 */ 12271 shadow_xp->xb_private = shadow_bsp = 12272 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 12273 12274 /* 12275 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone 12276 * to figure out where the start of the user data is (based upon 12277 * the system block size) in the data returned by the READ 12278 * command (which will be based upon the target blocksize). Note 12279 * that this is only really used if the request is unaligned. 12280 */ 12281 bsp->mbs_copy_offset = (ssize_t)(first_byte - 12282 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize)); 12283 ASSERT((bsp->mbs_copy_offset >= 0) && 12284 (bsp->mbs_copy_offset < un->un_tgt_blocksize)); 12285 12286 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset; 12287 12288 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index; 12289 12290 /* Transfer the wmap (if any) to the shadow buf */ 12291 shadow_bsp->mbs_wmp = bsp->mbs_wmp; 12292 bsp->mbs_wmp = NULL; 12293 12294 /* 12295 * The shadow buf goes on from here in place of the 12296 * original buf. 12297 */ 12298 shadow_bsp->mbs_orig_bp = bp; 12299 bp = shadow_bp; 12300 } 12301 12302 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12303 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno); 12304 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12305 "sd_mapblocksize_iostart: tgt request len:0x%x\n", 12306 request_bytes); 12307 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12308 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp); 12309 12310 done: 12311 SD_NEXT_IOSTART(index, un, bp); 12312 12313 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12314 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp); 12315 } 12316 12317 12318 /* 12319 * Function: sd_mapblocksize_iodone 12320 * 12321 * Description: Completion side processing for block-size mapping. 12322 * 12323 * Context: May be called under interrupt context 12324 */ 12325 12326 static void 12327 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp) 12328 { 12329 struct sd_mapblocksize_info *bsp; 12330 struct sd_xbuf *xp; 12331 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */ 12332 struct buf *orig_bp; /* ptr to the original buf */ 12333 offset_t shadow_end; 12334 offset_t request_end; 12335 offset_t shadow_start; 12336 ssize_t copy_offset; 12337 size_t copy_length; 12338 size_t shortfall; 12339 uint_t is_write; /* TRUE if this bp is a WRITE */ 12340 uint_t has_wmap; /* TRUE is this bp has a wmap */ 12341 12342 ASSERT(un != NULL); 12343 ASSERT(bp != NULL); 12344 12345 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12346 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp); 12347 12348 /* 12349 * There is no shadow buf or layer-private data if the target is 12350 * using un->un_sys_blocksize as its block size or if bcount == 0. 12351 */ 12352 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 12353 (bp->b_bcount == 0)) { 12354 goto exit; 12355 } 12356 12357 xp = SD_GET_XBUF(bp); 12358 ASSERT(xp != NULL); 12359 12360 /* Retrieve the pointer to the layer-private data area from the xbuf. */ 12361 bsp = xp->xb_private; 12362 12363 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE; 12364 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE; 12365 12366 if (is_write) { 12367 /* 12368 * For a WRITE request we must free up the block range that 12369 * we have locked up. This holds regardless of whether this is 12370 * an aligned write request or a read-modify-write request. 12371 */ 12372 sd_range_unlock(un, bsp->mbs_wmp); 12373 bsp->mbs_wmp = NULL; 12374 } 12375 12376 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) { 12377 /* 12378 * An aligned read or write command will have no shadow buf; 12379 * there is not much else to do with it. 12380 */ 12381 goto done; 12382 } 12383 12384 orig_bp = bsp->mbs_orig_bp; 12385 ASSERT(orig_bp != NULL); 12386 orig_xp = SD_GET_XBUF(orig_bp); 12387 ASSERT(orig_xp != NULL); 12388 ASSERT(!mutex_owned(SD_MUTEX(un))); 12389 12390 if (!is_write && has_wmap) { 12391 /* 12392 * A READ with a wmap means this is the READ phase of a 12393 * read-modify-write. If an error occurred on the READ then 12394 * we do not proceed with the WRITE phase or copy any data. 12395 * Just release the write maps and return with an error. 12396 */ 12397 if ((bp->b_resid != 0) || (bp->b_error != 0)) { 12398 orig_bp->b_resid = orig_bp->b_bcount; 12399 bioerror(orig_bp, bp->b_error); 12400 sd_range_unlock(un, bsp->mbs_wmp); 12401 goto freebuf_done; 12402 } 12403 } 12404 12405 /* 12406 * Here is where we set up to copy the data from the shadow buf 12407 * into the space associated with the original buf. 12408 * 12409 * To deal with the conversion between block sizes, these 12410 * computations treat the data as an array of bytes, with the 12411 * first byte (byte 0) corresponding to the first byte in the 12412 * first block on the disk. 12413 */ 12414 12415 /* 12416 * shadow_start and shadow_len indicate the location and size of 12417 * the data returned with the shadow IO request. 12418 */ 12419 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 12420 shadow_end = shadow_start + bp->b_bcount - bp->b_resid; 12421 12422 /* 12423 * copy_offset gives the offset (in bytes) from the start of the first 12424 * block of the READ request to the beginning of the data. We retrieve 12425 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved 12426 * there by sd_mapblockize_iostart(). copy_length gives the amount of 12427 * data to be copied (in bytes). 12428 */ 12429 copy_offset = bsp->mbs_copy_offset; 12430 ASSERT((copy_offset >= 0) && (copy_offset < un->un_tgt_blocksize)); 12431 copy_length = orig_bp->b_bcount; 12432 request_end = shadow_start + copy_offset + orig_bp->b_bcount; 12433 12434 /* 12435 * Set up the resid and error fields of orig_bp as appropriate. 12436 */ 12437 if (shadow_end >= request_end) { 12438 /* We got all the requested data; set resid to zero */ 12439 orig_bp->b_resid = 0; 12440 } else { 12441 /* 12442 * We failed to get enough data to fully satisfy the original 12443 * request. Just copy back whatever data we got and set 12444 * up the residual and error code as required. 12445 * 12446 * 'shortfall' is the amount by which the data received with the 12447 * shadow buf has "fallen short" of the requested amount. 12448 */ 12449 shortfall = (size_t)(request_end - shadow_end); 12450 12451 if (shortfall > orig_bp->b_bcount) { 12452 /* 12453 * We did not get enough data to even partially 12454 * fulfill the original request. The residual is 12455 * equal to the amount requested. 12456 */ 12457 orig_bp->b_resid = orig_bp->b_bcount; 12458 } else { 12459 /* 12460 * We did not get all the data that we requested 12461 * from the device, but we will try to return what 12462 * portion we did get. 12463 */ 12464 orig_bp->b_resid = shortfall; 12465 } 12466 ASSERT(copy_length >= orig_bp->b_resid); 12467 copy_length -= orig_bp->b_resid; 12468 } 12469 12470 /* Propagate the error code from the shadow buf to the original buf */ 12471 bioerror(orig_bp, bp->b_error); 12472 12473 if (is_write) { 12474 goto freebuf_done; /* No data copying for a WRITE */ 12475 } 12476 12477 if (has_wmap) { 12478 /* 12479 * This is a READ command from the READ phase of a 12480 * read-modify-write request. We have to copy the data given 12481 * by the user OVER the data returned by the READ command, 12482 * then convert the command from a READ to a WRITE and send 12483 * it back to the target. 12484 */ 12485 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset, 12486 copy_length); 12487 12488 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */ 12489 12490 /* 12491 * Dispatch the WRITE command to the taskq thread, which 12492 * will in turn send the command to the target. When the 12493 * WRITE command completes, we (sd_mapblocksize_iodone()) 12494 * will get called again as part of the iodone chain 12495 * processing for it. Note that we will still be dealing 12496 * with the shadow buf at that point. 12497 */ 12498 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp, 12499 KM_NOSLEEP) != 0) { 12500 /* 12501 * Dispatch was successful so we are done. Return 12502 * without going any higher up the iodone chain. Do 12503 * not free up any layer-private data until after the 12504 * WRITE completes. 12505 */ 12506 return; 12507 } 12508 12509 /* 12510 * Dispatch of the WRITE command failed; set up the error 12511 * condition and send this IO back up the iodone chain. 12512 */ 12513 bioerror(orig_bp, EIO); 12514 orig_bp->b_resid = orig_bp->b_bcount; 12515 12516 } else { 12517 /* 12518 * This is a regular READ request (ie, not a RMW). Copy the 12519 * data from the shadow buf into the original buf. The 12520 * copy_offset compensates for any "misalignment" between the 12521 * shadow buf (with its un->un_tgt_blocksize blocks) and the 12522 * original buf (with its un->un_sys_blocksize blocks). 12523 */ 12524 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr, 12525 copy_length); 12526 } 12527 12528 freebuf_done: 12529 12530 /* 12531 * At this point we still have both the shadow buf AND the original 12532 * buf to deal with, as well as the layer-private data area in each. 12533 * Local variables are as follows: 12534 * 12535 * bp -- points to shadow buf 12536 * xp -- points to xbuf of shadow buf 12537 * bsp -- points to layer-private data area of shadow buf 12538 * orig_bp -- points to original buf 12539 * 12540 * First free the shadow buf and its associated xbuf, then free the 12541 * layer-private data area from the shadow buf. There is no need to 12542 * restore xb_private in the shadow xbuf. 12543 */ 12544 sd_shadow_buf_free(bp); 12545 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 12546 12547 /* 12548 * Now update the local variables to point to the original buf, xbuf, 12549 * and layer-private area. 12550 */ 12551 bp = orig_bp; 12552 xp = SD_GET_XBUF(bp); 12553 ASSERT(xp != NULL); 12554 ASSERT(xp == orig_xp); 12555 bsp = xp->xb_private; 12556 ASSERT(bsp != NULL); 12557 12558 done: 12559 /* 12560 * Restore xb_private to whatever it was set to by the next higher 12561 * layer in the chain, then free the layer-private data area. 12562 */ 12563 xp->xb_private = bsp->mbs_oprivate; 12564 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 12565 12566 exit: 12567 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp), 12568 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp); 12569 12570 SD_NEXT_IODONE(index, un, bp); 12571 } 12572 12573 12574 /* 12575 * Function: sd_checksum_iostart 12576 * 12577 * Description: A stub function for a layer that's currently not used. 12578 * For now just a placeholder. 12579 * 12580 * Context: Kernel thread context 12581 */ 12582 12583 static void 12584 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp) 12585 { 12586 ASSERT(un != NULL); 12587 ASSERT(bp != NULL); 12588 ASSERT(!mutex_owned(SD_MUTEX(un))); 12589 SD_NEXT_IOSTART(index, un, bp); 12590 } 12591 12592 12593 /* 12594 * Function: sd_checksum_iodone 12595 * 12596 * Description: A stub function for a layer that's currently not used. 12597 * For now just a placeholder. 12598 * 12599 * Context: May be called under interrupt context 12600 */ 12601 12602 static void 12603 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp) 12604 { 12605 ASSERT(un != NULL); 12606 ASSERT(bp != NULL); 12607 ASSERT(!mutex_owned(SD_MUTEX(un))); 12608 SD_NEXT_IODONE(index, un, bp); 12609 } 12610 12611 12612 /* 12613 * Function: sd_checksum_uscsi_iostart 12614 * 12615 * Description: A stub function for a layer that's currently not used. 12616 * For now just a placeholder. 12617 * 12618 * Context: Kernel thread context 12619 */ 12620 12621 static void 12622 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp) 12623 { 12624 ASSERT(un != NULL); 12625 ASSERT(bp != NULL); 12626 ASSERT(!mutex_owned(SD_MUTEX(un))); 12627 SD_NEXT_IOSTART(index, un, bp); 12628 } 12629 12630 12631 /* 12632 * Function: sd_checksum_uscsi_iodone 12633 * 12634 * Description: A stub function for a layer that's currently not used. 12635 * For now just a placeholder. 12636 * 12637 * Context: May be called under interrupt context 12638 */ 12639 12640 static void 12641 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 12642 { 12643 ASSERT(un != NULL); 12644 ASSERT(bp != NULL); 12645 ASSERT(!mutex_owned(SD_MUTEX(un))); 12646 SD_NEXT_IODONE(index, un, bp); 12647 } 12648 12649 12650 /* 12651 * Function: sd_pm_iostart 12652 * 12653 * Description: iostart-side routine for Power mangement. 12654 * 12655 * Context: Kernel thread context 12656 */ 12657 12658 static void 12659 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp) 12660 { 12661 ASSERT(un != NULL); 12662 ASSERT(bp != NULL); 12663 ASSERT(!mutex_owned(SD_MUTEX(un))); 12664 ASSERT(!mutex_owned(&un->un_pm_mutex)); 12665 12666 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n"); 12667 12668 if (sd_pm_entry(un) != DDI_SUCCESS) { 12669 /* 12670 * Set up to return the failed buf back up the 'iodone' 12671 * side of the calling chain. 12672 */ 12673 bioerror(bp, EIO); 12674 bp->b_resid = bp->b_bcount; 12675 12676 SD_BEGIN_IODONE(index, un, bp); 12677 12678 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 12679 return; 12680 } 12681 12682 SD_NEXT_IOSTART(index, un, bp); 12683 12684 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 12685 } 12686 12687 12688 /* 12689 * Function: sd_pm_iodone 12690 * 12691 * Description: iodone-side routine for power mangement. 12692 * 12693 * Context: may be called from interrupt context 12694 */ 12695 12696 static void 12697 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp) 12698 { 12699 ASSERT(un != NULL); 12700 ASSERT(bp != NULL); 12701 ASSERT(!mutex_owned(&un->un_pm_mutex)); 12702 12703 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n"); 12704 12705 /* 12706 * After attach the following flag is only read, so don't 12707 * take the penalty of acquiring a mutex for it. 12708 */ 12709 if (un->un_f_pm_is_enabled == TRUE) { 12710 sd_pm_exit(un); 12711 } 12712 12713 SD_NEXT_IODONE(index, un, bp); 12714 12715 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n"); 12716 } 12717 12718 12719 /* 12720 * Function: sd_core_iostart 12721 * 12722 * Description: Primary driver function for enqueuing buf(9S) structs from 12723 * the system and initiating IO to the target device 12724 * 12725 * Context: Kernel thread context. Can sleep. 12726 * 12727 * Assumptions: - The given xp->xb_blkno is absolute 12728 * (ie, relative to the start of the device). 12729 * - The IO is to be done using the native blocksize of 12730 * the device, as specified in un->un_tgt_blocksize. 12731 */ 12732 /* ARGSUSED */ 12733 static void 12734 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp) 12735 { 12736 struct sd_xbuf *xp; 12737 12738 ASSERT(un != NULL); 12739 ASSERT(bp != NULL); 12740 ASSERT(!mutex_owned(SD_MUTEX(un))); 12741 ASSERT(bp->b_resid == 0); 12742 12743 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp); 12744 12745 xp = SD_GET_XBUF(bp); 12746 ASSERT(xp != NULL); 12747 12748 mutex_enter(SD_MUTEX(un)); 12749 12750 /* 12751 * If we are currently in the failfast state, fail any new IO 12752 * that has B_FAILFAST set, then return. 12753 */ 12754 if ((bp->b_flags & B_FAILFAST) && 12755 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) { 12756 mutex_exit(SD_MUTEX(un)); 12757 bioerror(bp, EIO); 12758 bp->b_resid = bp->b_bcount; 12759 SD_BEGIN_IODONE(index, un, bp); 12760 return; 12761 } 12762 12763 if (SD_IS_DIRECT_PRIORITY(xp)) { 12764 /* 12765 * Priority command -- transport it immediately. 12766 * 12767 * Note: We may want to assert that USCSI_DIAGNOSE is set, 12768 * because all direct priority commands should be associated 12769 * with error recovery actions which we don't want to retry. 12770 */ 12771 sd_start_cmds(un, bp); 12772 } else { 12773 /* 12774 * Normal command -- add it to the wait queue, then start 12775 * transporting commands from the wait queue. 12776 */ 12777 sd_add_buf_to_waitq(un, bp); 12778 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 12779 sd_start_cmds(un, NULL); 12780 } 12781 12782 mutex_exit(SD_MUTEX(un)); 12783 12784 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp); 12785 } 12786 12787 12788 /* 12789 * Function: sd_init_cdb_limits 12790 * 12791 * Description: This is to handle scsi_pkt initialization differences 12792 * between the driver platforms. 12793 * 12794 * Legacy behaviors: 12795 * 12796 * If the block number or the sector count exceeds the 12797 * capabilities of a Group 0 command, shift over to a 12798 * Group 1 command. We don't blindly use Group 1 12799 * commands because a) some drives (CDC Wren IVs) get a 12800 * bit confused, and b) there is probably a fair amount 12801 * of speed difference for a target to receive and decode 12802 * a 10 byte command instead of a 6 byte command. 12803 * 12804 * The xfer time difference of 6 vs 10 byte CDBs is 12805 * still significant so this code is still worthwhile. 12806 * 10 byte CDBs are very inefficient with the fas HBA driver 12807 * and older disks. Each CDB byte took 1 usec with some 12808 * popular disks. 12809 * 12810 * Context: Must be called at attach time 12811 */ 12812 12813 static void 12814 sd_init_cdb_limits(struct sd_lun *un) 12815 { 12816 int hba_cdb_limit; 12817 12818 /* 12819 * Use CDB_GROUP1 commands for most devices except for 12820 * parallel SCSI fixed drives in which case we get better 12821 * performance using CDB_GROUP0 commands (where applicable). 12822 */ 12823 un->un_mincdb = SD_CDB_GROUP1; 12824 #if !defined(__fibre) 12825 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) && 12826 !un->un_f_has_removable_media) { 12827 un->un_mincdb = SD_CDB_GROUP0; 12828 } 12829 #endif 12830 12831 /* 12832 * Try to read the max-cdb-length supported by HBA. 12833 */ 12834 un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1); 12835 if (0 >= un->un_max_hba_cdb) { 12836 un->un_max_hba_cdb = CDB_GROUP4; 12837 hba_cdb_limit = SD_CDB_GROUP4; 12838 } else if (0 < un->un_max_hba_cdb && 12839 un->un_max_hba_cdb < CDB_GROUP1) { 12840 hba_cdb_limit = SD_CDB_GROUP0; 12841 } else if (CDB_GROUP1 <= un->un_max_hba_cdb && 12842 un->un_max_hba_cdb < CDB_GROUP5) { 12843 hba_cdb_limit = SD_CDB_GROUP1; 12844 } else if (CDB_GROUP5 <= un->un_max_hba_cdb && 12845 un->un_max_hba_cdb < CDB_GROUP4) { 12846 hba_cdb_limit = SD_CDB_GROUP5; 12847 } else { 12848 hba_cdb_limit = SD_CDB_GROUP4; 12849 } 12850 12851 /* 12852 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4 12853 * commands for fixed disks unless we are building for a 32 bit 12854 * kernel. 12855 */ 12856 #ifdef _LP64 12857 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 12858 min(hba_cdb_limit, SD_CDB_GROUP4); 12859 #else 12860 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 12861 min(hba_cdb_limit, SD_CDB_GROUP1); 12862 #endif 12863 12864 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) 12865 ? sizeof (struct scsi_arq_status) : 1); 12866 un->un_cmd_timeout = (ushort_t)sd_io_time; 12867 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; 12868 } 12869 12870 12871 /* 12872 * Function: sd_initpkt_for_buf 12873 * 12874 * Description: Allocate and initialize for transport a scsi_pkt struct, 12875 * based upon the info specified in the given buf struct. 12876 * 12877 * Assumes the xb_blkno in the request is absolute (ie, 12878 * relative to the start of the device (NOT partition!). 12879 * Also assumes that the request is using the native block 12880 * size of the device (as returned by the READ CAPACITY 12881 * command). 12882 * 12883 * Return Code: SD_PKT_ALLOC_SUCCESS 12884 * SD_PKT_ALLOC_FAILURE 12885 * SD_PKT_ALLOC_FAILURE_NO_DMA 12886 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 12887 * 12888 * Context: Kernel thread and may be called from software interrupt context 12889 * as part of a sdrunout callback. This function may not block or 12890 * call routines that block 12891 */ 12892 12893 static int 12894 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp) 12895 { 12896 struct sd_xbuf *xp; 12897 struct scsi_pkt *pktp = NULL; 12898 struct sd_lun *un; 12899 size_t blockcount; 12900 daddr_t startblock; 12901 int rval; 12902 int cmd_flags; 12903 12904 ASSERT(bp != NULL); 12905 ASSERT(pktpp != NULL); 12906 xp = SD_GET_XBUF(bp); 12907 ASSERT(xp != NULL); 12908 un = SD_GET_UN(bp); 12909 ASSERT(un != NULL); 12910 ASSERT(mutex_owned(SD_MUTEX(un))); 12911 ASSERT(bp->b_resid == 0); 12912 12913 SD_TRACE(SD_LOG_IO_CORE, un, 12914 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp); 12915 12916 mutex_exit(SD_MUTEX(un)); 12917 12918 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12919 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) { 12920 /* 12921 * Already have a scsi_pkt -- just need DMA resources. 12922 * We must recompute the CDB in case the mapping returns 12923 * a nonzero pkt_resid. 12924 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer 12925 * that is being retried, the unmap/remap of the DMA resouces 12926 * will result in the entire transfer starting over again 12927 * from the very first block. 12928 */ 12929 ASSERT(xp->xb_pktp != NULL); 12930 pktp = xp->xb_pktp; 12931 } else { 12932 pktp = NULL; 12933 } 12934 #endif /* __i386 || __amd64 */ 12935 12936 startblock = xp->xb_blkno; /* Absolute block num. */ 12937 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 12938 12939 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK); 12940 12941 /* 12942 * sd_setup_rw_pkt will determine the appropriate CDB group to use, 12943 * call scsi_init_pkt, and build the CDB. 12944 */ 12945 rval = sd_setup_rw_pkt(un, &pktp, bp, 12946 cmd_flags, sdrunout, (caddr_t)un, 12947 startblock, blockcount); 12948 12949 if (rval == 0) { 12950 /* 12951 * Success. 12952 * 12953 * If partial DMA is being used and required for this transfer. 12954 * set it up here. 12955 */ 12956 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 && 12957 (pktp->pkt_resid != 0)) { 12958 12959 /* 12960 * Save the CDB length and pkt_resid for the 12961 * next xfer 12962 */ 12963 xp->xb_dma_resid = pktp->pkt_resid; 12964 12965 /* rezero resid */ 12966 pktp->pkt_resid = 0; 12967 12968 } else { 12969 xp->xb_dma_resid = 0; 12970 } 12971 12972 pktp->pkt_flags = un->un_tagflags; 12973 pktp->pkt_time = un->un_cmd_timeout; 12974 pktp->pkt_comp = sdintr; 12975 12976 pktp->pkt_private = bp; 12977 *pktpp = pktp; 12978 12979 SD_TRACE(SD_LOG_IO_CORE, un, 12980 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp); 12981 12982 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12983 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED; 12984 #endif 12985 12986 mutex_enter(SD_MUTEX(un)); 12987 return (SD_PKT_ALLOC_SUCCESS); 12988 12989 } 12990 12991 /* 12992 * SD_PKT_ALLOC_FAILURE is the only expected failure code 12993 * from sd_setup_rw_pkt. 12994 */ 12995 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 12996 12997 if (rval == SD_PKT_ALLOC_FAILURE) { 12998 *pktpp = NULL; 12999 /* 13000 * Set the driver state to RWAIT to indicate the driver 13001 * is waiting on resource allocations. The driver will not 13002 * suspend, pm_suspend, or detatch while the state is RWAIT. 13003 */ 13004 mutex_enter(SD_MUTEX(un)); 13005 New_state(un, SD_STATE_RWAIT); 13006 13007 SD_ERROR(SD_LOG_IO_CORE, un, 13008 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp); 13009 13010 if ((bp->b_flags & B_ERROR) != 0) { 13011 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 13012 } 13013 return (SD_PKT_ALLOC_FAILURE); 13014 } else { 13015 /* 13016 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13017 * 13018 * This should never happen. Maybe someone messed with the 13019 * kernel's minphys? 13020 */ 13021 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13022 "Request rejected: too large for CDB: " 13023 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount); 13024 SD_ERROR(SD_LOG_IO_CORE, un, 13025 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp); 13026 mutex_enter(SD_MUTEX(un)); 13027 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13028 13029 } 13030 } 13031 13032 13033 /* 13034 * Function: sd_destroypkt_for_buf 13035 * 13036 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing). 13037 * 13038 * Context: Kernel thread or interrupt context 13039 */ 13040 13041 static void 13042 sd_destroypkt_for_buf(struct buf *bp) 13043 { 13044 ASSERT(bp != NULL); 13045 ASSERT(SD_GET_UN(bp) != NULL); 13046 13047 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13048 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp); 13049 13050 ASSERT(SD_GET_PKTP(bp) != NULL); 13051 scsi_destroy_pkt(SD_GET_PKTP(bp)); 13052 13053 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13054 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp); 13055 } 13056 13057 /* 13058 * Function: sd_setup_rw_pkt 13059 * 13060 * Description: Determines appropriate CDB group for the requested LBA 13061 * and transfer length, calls scsi_init_pkt, and builds 13062 * the CDB. Do not use for partial DMA transfers except 13063 * for the initial transfer since the CDB size must 13064 * remain constant. 13065 * 13066 * Context: Kernel thread and may be called from software interrupt 13067 * context as part of a sdrunout callback. This function may not 13068 * block or call routines that block 13069 */ 13070 13071 13072 int 13073 sd_setup_rw_pkt(struct sd_lun *un, 13074 struct scsi_pkt **pktpp, struct buf *bp, int flags, 13075 int (*callback)(caddr_t), caddr_t callback_arg, 13076 diskaddr_t lba, uint32_t blockcount) 13077 { 13078 struct scsi_pkt *return_pktp; 13079 union scsi_cdb *cdbp; 13080 struct sd_cdbinfo *cp = NULL; 13081 int i; 13082 13083 /* 13084 * See which size CDB to use, based upon the request. 13085 */ 13086 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) { 13087 13088 /* 13089 * Check lba and block count against sd_cdbtab limits. 13090 * In the partial DMA case, we have to use the same size 13091 * CDB for all the transfers. Check lba + blockcount 13092 * against the max LBA so we know that segment of the 13093 * transfer can use the CDB we select. 13094 */ 13095 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) && 13096 (blockcount <= sd_cdbtab[i].sc_maxlen)) { 13097 13098 /* 13099 * The command will fit into the CDB type 13100 * specified by sd_cdbtab[i]. 13101 */ 13102 cp = sd_cdbtab + i; 13103 13104 /* 13105 * Call scsi_init_pkt so we can fill in the 13106 * CDB. 13107 */ 13108 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp, 13109 bp, cp->sc_grpcode, un->un_status_len, 0, 13110 flags, callback, callback_arg); 13111 13112 if (return_pktp != NULL) { 13113 13114 /* 13115 * Return new value of pkt 13116 */ 13117 *pktpp = return_pktp; 13118 13119 /* 13120 * To be safe, zero the CDB insuring there is 13121 * no leftover data from a previous command. 13122 */ 13123 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode); 13124 13125 /* 13126 * Handle partial DMA mapping 13127 */ 13128 if (return_pktp->pkt_resid != 0) { 13129 13130 /* 13131 * Not going to xfer as many blocks as 13132 * originally expected 13133 */ 13134 blockcount -= 13135 SD_BYTES2TGTBLOCKS(un, 13136 return_pktp->pkt_resid); 13137 } 13138 13139 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp; 13140 13141 /* 13142 * Set command byte based on the CDB 13143 * type we matched. 13144 */ 13145 cdbp->scc_cmd = cp->sc_grpmask | 13146 ((bp->b_flags & B_READ) ? 13147 SCMD_READ : SCMD_WRITE); 13148 13149 SD_FILL_SCSI1_LUN(un, return_pktp); 13150 13151 /* 13152 * Fill in LBA and length 13153 */ 13154 ASSERT((cp->sc_grpcode == CDB_GROUP1) || 13155 (cp->sc_grpcode == CDB_GROUP4) || 13156 (cp->sc_grpcode == CDB_GROUP0) || 13157 (cp->sc_grpcode == CDB_GROUP5)); 13158 13159 if (cp->sc_grpcode == CDB_GROUP1) { 13160 FORMG1ADDR(cdbp, lba); 13161 FORMG1COUNT(cdbp, blockcount); 13162 return (0); 13163 } else if (cp->sc_grpcode == CDB_GROUP4) { 13164 FORMG4LONGADDR(cdbp, lba); 13165 FORMG4COUNT(cdbp, blockcount); 13166 return (0); 13167 } else if (cp->sc_grpcode == CDB_GROUP0) { 13168 FORMG0ADDR(cdbp, lba); 13169 FORMG0COUNT(cdbp, blockcount); 13170 return (0); 13171 } else if (cp->sc_grpcode == CDB_GROUP5) { 13172 FORMG5ADDR(cdbp, lba); 13173 FORMG5COUNT(cdbp, blockcount); 13174 return (0); 13175 } 13176 13177 /* 13178 * It should be impossible to not match one 13179 * of the CDB types above, so we should never 13180 * reach this point. Set the CDB command byte 13181 * to test-unit-ready to avoid writing 13182 * to somewhere we don't intend. 13183 */ 13184 cdbp->scc_cmd = SCMD_TEST_UNIT_READY; 13185 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13186 } else { 13187 /* 13188 * Couldn't get scsi_pkt 13189 */ 13190 return (SD_PKT_ALLOC_FAILURE); 13191 } 13192 } 13193 } 13194 13195 /* 13196 * None of the available CDB types were suitable. This really 13197 * should never happen: on a 64 bit system we support 13198 * READ16/WRITE16 which will hold an entire 64 bit disk address 13199 * and on a 32 bit system we will refuse to bind to a device 13200 * larger than 2TB so addresses will never be larger than 32 bits. 13201 */ 13202 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13203 } 13204 13205 /* 13206 * Function: sd_setup_next_rw_pkt 13207 * 13208 * Description: Setup packet for partial DMA transfers, except for the 13209 * initial transfer. sd_setup_rw_pkt should be used for 13210 * the initial transfer. 13211 * 13212 * Context: Kernel thread and may be called from interrupt context. 13213 */ 13214 13215 int 13216 sd_setup_next_rw_pkt(struct sd_lun *un, 13217 struct scsi_pkt *pktp, struct buf *bp, 13218 diskaddr_t lba, uint32_t blockcount) 13219 { 13220 uchar_t com; 13221 union scsi_cdb *cdbp; 13222 uchar_t cdb_group_id; 13223 13224 ASSERT(pktp != NULL); 13225 ASSERT(pktp->pkt_cdbp != NULL); 13226 13227 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 13228 com = cdbp->scc_cmd; 13229 cdb_group_id = CDB_GROUPID(com); 13230 13231 ASSERT((cdb_group_id == CDB_GROUPID_0) || 13232 (cdb_group_id == CDB_GROUPID_1) || 13233 (cdb_group_id == CDB_GROUPID_4) || 13234 (cdb_group_id == CDB_GROUPID_5)); 13235 13236 /* 13237 * Move pkt to the next portion of the xfer. 13238 * func is NULL_FUNC so we do not have to release 13239 * the disk mutex here. 13240 */ 13241 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0, 13242 NULL_FUNC, NULL) == pktp) { 13243 /* Success. Handle partial DMA */ 13244 if (pktp->pkt_resid != 0) { 13245 blockcount -= 13246 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid); 13247 } 13248 13249 cdbp->scc_cmd = com; 13250 SD_FILL_SCSI1_LUN(un, pktp); 13251 if (cdb_group_id == CDB_GROUPID_1) { 13252 FORMG1ADDR(cdbp, lba); 13253 FORMG1COUNT(cdbp, blockcount); 13254 return (0); 13255 } else if (cdb_group_id == CDB_GROUPID_4) { 13256 FORMG4LONGADDR(cdbp, lba); 13257 FORMG4COUNT(cdbp, blockcount); 13258 return (0); 13259 } else if (cdb_group_id == CDB_GROUPID_0) { 13260 FORMG0ADDR(cdbp, lba); 13261 FORMG0COUNT(cdbp, blockcount); 13262 return (0); 13263 } else if (cdb_group_id == CDB_GROUPID_5) { 13264 FORMG5ADDR(cdbp, lba); 13265 FORMG5COUNT(cdbp, blockcount); 13266 return (0); 13267 } 13268 13269 /* Unreachable */ 13270 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13271 } 13272 13273 /* 13274 * Error setting up next portion of cmd transfer. 13275 * Something is definitely very wrong and this 13276 * should not happen. 13277 */ 13278 return (SD_PKT_ALLOC_FAILURE); 13279 } 13280 13281 /* 13282 * Function: sd_initpkt_for_uscsi 13283 * 13284 * Description: Allocate and initialize for transport a scsi_pkt struct, 13285 * based upon the info specified in the given uscsi_cmd struct. 13286 * 13287 * Return Code: SD_PKT_ALLOC_SUCCESS 13288 * SD_PKT_ALLOC_FAILURE 13289 * SD_PKT_ALLOC_FAILURE_NO_DMA 13290 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13291 * 13292 * Context: Kernel thread and may be called from software interrupt context 13293 * as part of a sdrunout callback. This function may not block or 13294 * call routines that block 13295 */ 13296 13297 static int 13298 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp) 13299 { 13300 struct uscsi_cmd *uscmd; 13301 struct sd_xbuf *xp; 13302 struct scsi_pkt *pktp; 13303 struct sd_lun *un; 13304 uint32_t flags = 0; 13305 13306 ASSERT(bp != NULL); 13307 ASSERT(pktpp != NULL); 13308 xp = SD_GET_XBUF(bp); 13309 ASSERT(xp != NULL); 13310 un = SD_GET_UN(bp); 13311 ASSERT(un != NULL); 13312 ASSERT(mutex_owned(SD_MUTEX(un))); 13313 13314 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 13315 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 13316 ASSERT(uscmd != NULL); 13317 13318 SD_TRACE(SD_LOG_IO_CORE, un, 13319 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp); 13320 13321 /* 13322 * Allocate the scsi_pkt for the command. 13323 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path 13324 * during scsi_init_pkt time and will continue to use the 13325 * same path as long as the same scsi_pkt is used without 13326 * intervening scsi_dma_free(). Since uscsi command does 13327 * not call scsi_dmafree() before retry failed command, it 13328 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT 13329 * set such that scsi_vhci can use other available path for 13330 * retry. Besides, ucsci command does not allow DMA breakup, 13331 * so there is no need to set PKT_DMA_PARTIAL flag. 13332 */ 13333 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 13334 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 13335 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 13336 ((int)(uscmd->uscsi_rqlen) + sizeof (struct scsi_arq_status) 13337 - sizeof (struct scsi_extended_sense)), 0, 13338 (un->un_pkt_flags & ~PKT_DMA_PARTIAL) | PKT_XARQ, 13339 sdrunout, (caddr_t)un); 13340 } else { 13341 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 13342 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 13343 sizeof (struct scsi_arq_status), 0, 13344 (un->un_pkt_flags & ~PKT_DMA_PARTIAL), 13345 sdrunout, (caddr_t)un); 13346 } 13347 13348 if (pktp == NULL) { 13349 *pktpp = NULL; 13350 /* 13351 * Set the driver state to RWAIT to indicate the driver 13352 * is waiting on resource allocations. The driver will not 13353 * suspend, pm_suspend, or detatch while the state is RWAIT. 13354 */ 13355 New_state(un, SD_STATE_RWAIT); 13356 13357 SD_ERROR(SD_LOG_IO_CORE, un, 13358 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp); 13359 13360 if ((bp->b_flags & B_ERROR) != 0) { 13361 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 13362 } 13363 return (SD_PKT_ALLOC_FAILURE); 13364 } 13365 13366 /* 13367 * We do not do DMA breakup for USCSI commands, so return failure 13368 * here if all the needed DMA resources were not allocated. 13369 */ 13370 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) && 13371 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) { 13372 scsi_destroy_pkt(pktp); 13373 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: " 13374 "No partial DMA for USCSI. exit: buf:0x%p\n", bp); 13375 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL); 13376 } 13377 13378 /* Init the cdb from the given uscsi struct */ 13379 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp, 13380 uscmd->uscsi_cdb[0], 0, 0, 0); 13381 13382 SD_FILL_SCSI1_LUN(un, pktp); 13383 13384 /* 13385 * Set up the optional USCSI flags. See the uscsi (7I) man page 13386 * for listing of the supported flags. 13387 */ 13388 13389 if (uscmd->uscsi_flags & USCSI_SILENT) { 13390 flags |= FLAG_SILENT; 13391 } 13392 13393 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) { 13394 flags |= FLAG_DIAGNOSE; 13395 } 13396 13397 if (uscmd->uscsi_flags & USCSI_ISOLATE) { 13398 flags |= FLAG_ISOLATE; 13399 } 13400 13401 if (un->un_f_is_fibre == FALSE) { 13402 if (uscmd->uscsi_flags & USCSI_RENEGOT) { 13403 flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 13404 } 13405 } 13406 13407 /* 13408 * Set the pkt flags here so we save time later. 13409 * Note: These flags are NOT in the uscsi man page!!! 13410 */ 13411 if (uscmd->uscsi_flags & USCSI_HEAD) { 13412 flags |= FLAG_HEAD; 13413 } 13414 13415 if (uscmd->uscsi_flags & USCSI_NOINTR) { 13416 flags |= FLAG_NOINTR; 13417 } 13418 13419 /* 13420 * For tagged queueing, things get a bit complicated. 13421 * Check first for head of queue and last for ordered queue. 13422 * If neither head nor order, use the default driver tag flags. 13423 */ 13424 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) { 13425 if (uscmd->uscsi_flags & USCSI_HTAG) { 13426 flags |= FLAG_HTAG; 13427 } else if (uscmd->uscsi_flags & USCSI_OTAG) { 13428 flags |= FLAG_OTAG; 13429 } else { 13430 flags |= un->un_tagflags & FLAG_TAGMASK; 13431 } 13432 } 13433 13434 if (uscmd->uscsi_flags & USCSI_NODISCON) { 13435 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON; 13436 } 13437 13438 pktp->pkt_flags = flags; 13439 13440 /* Transfer uscsi information to scsi_pkt */ 13441 (void) scsi_uscsi_pktinit(uscmd, pktp); 13442 13443 /* Copy the caller's CDB into the pkt... */ 13444 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen); 13445 13446 if (uscmd->uscsi_timeout == 0) { 13447 pktp->pkt_time = un->un_uscsi_timeout; 13448 } else { 13449 pktp->pkt_time = uscmd->uscsi_timeout; 13450 } 13451 13452 /* need it later to identify USCSI request in sdintr */ 13453 xp->xb_pkt_flags |= SD_XB_USCSICMD; 13454 13455 xp->xb_sense_resid = uscmd->uscsi_rqresid; 13456 13457 pktp->pkt_private = bp; 13458 pktp->pkt_comp = sdintr; 13459 *pktpp = pktp; 13460 13461 SD_TRACE(SD_LOG_IO_CORE, un, 13462 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp); 13463 13464 return (SD_PKT_ALLOC_SUCCESS); 13465 } 13466 13467 13468 /* 13469 * Function: sd_destroypkt_for_uscsi 13470 * 13471 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi 13472 * IOs.. Also saves relevant info into the associated uscsi_cmd 13473 * struct. 13474 * 13475 * Context: May be called under interrupt context 13476 */ 13477 13478 static void 13479 sd_destroypkt_for_uscsi(struct buf *bp) 13480 { 13481 struct uscsi_cmd *uscmd; 13482 struct sd_xbuf *xp; 13483 struct scsi_pkt *pktp; 13484 struct sd_lun *un; 13485 struct sd_uscsi_info *suip; 13486 13487 ASSERT(bp != NULL); 13488 xp = SD_GET_XBUF(bp); 13489 ASSERT(xp != NULL); 13490 un = SD_GET_UN(bp); 13491 ASSERT(un != NULL); 13492 ASSERT(!mutex_owned(SD_MUTEX(un))); 13493 pktp = SD_GET_PKTP(bp); 13494 ASSERT(pktp != NULL); 13495 13496 SD_TRACE(SD_LOG_IO_CORE, un, 13497 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp); 13498 13499 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 13500 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 13501 ASSERT(uscmd != NULL); 13502 13503 /* Save the status and the residual into the uscsi_cmd struct */ 13504 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 13505 uscmd->uscsi_resid = bp->b_resid; 13506 13507 /* Transfer scsi_pkt information to uscsi */ 13508 (void) scsi_uscsi_pktfini(pktp, uscmd); 13509 13510 /* 13511 * If enabled, copy any saved sense data into the area specified 13512 * by the uscsi command. 13513 */ 13514 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 13515 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 13516 /* 13517 * Note: uscmd->uscsi_rqbuf should always point to a buffer 13518 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd()) 13519 */ 13520 uscmd->uscsi_rqstatus = xp->xb_sense_status; 13521 uscmd->uscsi_rqresid = xp->xb_sense_resid; 13522 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 13523 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 13524 MAX_SENSE_LENGTH); 13525 } else { 13526 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 13527 SENSE_LENGTH); 13528 } 13529 } 13530 /* 13531 * The following assignments are for SCSI FMA. 13532 */ 13533 ASSERT(xp->xb_private != NULL); 13534 suip = (struct sd_uscsi_info *)xp->xb_private; 13535 suip->ui_pkt_reason = pktp->pkt_reason; 13536 suip->ui_pkt_state = pktp->pkt_state; 13537 suip->ui_pkt_statistics = pktp->pkt_statistics; 13538 suip->ui_lba = (uint64_t)SD_GET_BLKNO(bp); 13539 13540 /* We are done with the scsi_pkt; free it now */ 13541 ASSERT(SD_GET_PKTP(bp) != NULL); 13542 scsi_destroy_pkt(SD_GET_PKTP(bp)); 13543 13544 SD_TRACE(SD_LOG_IO_CORE, un, 13545 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp); 13546 } 13547 13548 13549 /* 13550 * Function: sd_bioclone_alloc 13551 * 13552 * Description: Allocate a buf(9S) and init it as per the given buf 13553 * and the various arguments. The associated sd_xbuf 13554 * struct is (nearly) duplicated. The struct buf *bp 13555 * argument is saved in new_xp->xb_private. 13556 * 13557 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 13558 * datalen - size of data area for the shadow bp 13559 * blkno - starting LBA 13560 * func - function pointer for b_iodone in the shadow buf. (May 13561 * be NULL if none.) 13562 * 13563 * Return Code: Pointer to allocates buf(9S) struct 13564 * 13565 * Context: Can sleep. 13566 */ 13567 13568 static struct buf * 13569 sd_bioclone_alloc(struct buf *bp, size_t datalen, 13570 daddr_t blkno, int (*func)(struct buf *)) 13571 { 13572 struct sd_lun *un; 13573 struct sd_xbuf *xp; 13574 struct sd_xbuf *new_xp; 13575 struct buf *new_bp; 13576 13577 ASSERT(bp != NULL); 13578 xp = SD_GET_XBUF(bp); 13579 ASSERT(xp != NULL); 13580 un = SD_GET_UN(bp); 13581 ASSERT(un != NULL); 13582 ASSERT(!mutex_owned(SD_MUTEX(un))); 13583 13584 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func, 13585 NULL, KM_SLEEP); 13586 13587 new_bp->b_lblkno = blkno; 13588 13589 /* 13590 * Allocate an xbuf for the shadow bp and copy the contents of the 13591 * original xbuf into it. 13592 */ 13593 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 13594 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 13595 13596 /* 13597 * The given bp is automatically saved in the xb_private member 13598 * of the new xbuf. Callers are allowed to depend on this. 13599 */ 13600 new_xp->xb_private = bp; 13601 13602 new_bp->b_private = new_xp; 13603 13604 return (new_bp); 13605 } 13606 13607 /* 13608 * Function: sd_shadow_buf_alloc 13609 * 13610 * Description: Allocate a buf(9S) and init it as per the given buf 13611 * and the various arguments. The associated sd_xbuf 13612 * struct is (nearly) duplicated. The struct buf *bp 13613 * argument is saved in new_xp->xb_private. 13614 * 13615 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 13616 * datalen - size of data area for the shadow bp 13617 * bflags - B_READ or B_WRITE (pseudo flag) 13618 * blkno - starting LBA 13619 * func - function pointer for b_iodone in the shadow buf. (May 13620 * be NULL if none.) 13621 * 13622 * Return Code: Pointer to allocates buf(9S) struct 13623 * 13624 * Context: Can sleep. 13625 */ 13626 13627 static struct buf * 13628 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags, 13629 daddr_t blkno, int (*func)(struct buf *)) 13630 { 13631 struct sd_lun *un; 13632 struct sd_xbuf *xp; 13633 struct sd_xbuf *new_xp; 13634 struct buf *new_bp; 13635 13636 ASSERT(bp != NULL); 13637 xp = SD_GET_XBUF(bp); 13638 ASSERT(xp != NULL); 13639 un = SD_GET_UN(bp); 13640 ASSERT(un != NULL); 13641 ASSERT(!mutex_owned(SD_MUTEX(un))); 13642 13643 if (bp->b_flags & (B_PAGEIO | B_PHYS)) { 13644 bp_mapin(bp); 13645 } 13646 13647 bflags &= (B_READ | B_WRITE); 13648 #if defined(__i386) || defined(__amd64) 13649 new_bp = getrbuf(KM_SLEEP); 13650 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP); 13651 new_bp->b_bcount = datalen; 13652 new_bp->b_flags = bflags | 13653 (bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW)); 13654 #else 13655 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL, 13656 datalen, bflags, SLEEP_FUNC, NULL); 13657 #endif 13658 new_bp->av_forw = NULL; 13659 new_bp->av_back = NULL; 13660 new_bp->b_dev = bp->b_dev; 13661 new_bp->b_blkno = blkno; 13662 new_bp->b_iodone = func; 13663 new_bp->b_edev = bp->b_edev; 13664 new_bp->b_resid = 0; 13665 13666 /* We need to preserve the B_FAILFAST flag */ 13667 if (bp->b_flags & B_FAILFAST) { 13668 new_bp->b_flags |= B_FAILFAST; 13669 } 13670 13671 /* 13672 * Allocate an xbuf for the shadow bp and copy the contents of the 13673 * original xbuf into it. 13674 */ 13675 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 13676 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 13677 13678 /* Need later to copy data between the shadow buf & original buf! */ 13679 new_xp->xb_pkt_flags |= PKT_CONSISTENT; 13680 13681 /* 13682 * The given bp is automatically saved in the xb_private member 13683 * of the new xbuf. Callers are allowed to depend on this. 13684 */ 13685 new_xp->xb_private = bp; 13686 13687 new_bp->b_private = new_xp; 13688 13689 return (new_bp); 13690 } 13691 13692 /* 13693 * Function: sd_bioclone_free 13694 * 13695 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations 13696 * in the larger than partition operation. 13697 * 13698 * Context: May be called under interrupt context 13699 */ 13700 13701 static void 13702 sd_bioclone_free(struct buf *bp) 13703 { 13704 struct sd_xbuf *xp; 13705 13706 ASSERT(bp != NULL); 13707 xp = SD_GET_XBUF(bp); 13708 ASSERT(xp != NULL); 13709 13710 /* 13711 * Call bp_mapout() before freeing the buf, in case a lower 13712 * layer or HBA had done a bp_mapin(). we must do this here 13713 * as we are the "originator" of the shadow buf. 13714 */ 13715 bp_mapout(bp); 13716 13717 /* 13718 * Null out b_iodone before freeing the bp, to ensure that the driver 13719 * never gets confused by a stale value in this field. (Just a little 13720 * extra defensiveness here.) 13721 */ 13722 bp->b_iodone = NULL; 13723 13724 freerbuf(bp); 13725 13726 kmem_free(xp, sizeof (struct sd_xbuf)); 13727 } 13728 13729 /* 13730 * Function: sd_shadow_buf_free 13731 * 13732 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations. 13733 * 13734 * Context: May be called under interrupt context 13735 */ 13736 13737 static void 13738 sd_shadow_buf_free(struct buf *bp) 13739 { 13740 struct sd_xbuf *xp; 13741 13742 ASSERT(bp != NULL); 13743 xp = SD_GET_XBUF(bp); 13744 ASSERT(xp != NULL); 13745 13746 #if defined(__sparc) 13747 /* 13748 * Call bp_mapout() before freeing the buf, in case a lower 13749 * layer or HBA had done a bp_mapin(). we must do this here 13750 * as we are the "originator" of the shadow buf. 13751 */ 13752 bp_mapout(bp); 13753 #endif 13754 13755 /* 13756 * Null out b_iodone before freeing the bp, to ensure that the driver 13757 * never gets confused by a stale value in this field. (Just a little 13758 * extra defensiveness here.) 13759 */ 13760 bp->b_iodone = NULL; 13761 13762 #if defined(__i386) || defined(__amd64) 13763 kmem_free(bp->b_un.b_addr, bp->b_bcount); 13764 freerbuf(bp); 13765 #else 13766 scsi_free_consistent_buf(bp); 13767 #endif 13768 13769 kmem_free(xp, sizeof (struct sd_xbuf)); 13770 } 13771 13772 13773 /* 13774 * Function: sd_print_transport_rejected_message 13775 * 13776 * Description: This implements the ludicrously complex rules for printing 13777 * a "transport rejected" message. This is to address the 13778 * specific problem of having a flood of this error message 13779 * produced when a failover occurs. 13780 * 13781 * Context: Any. 13782 */ 13783 13784 static void 13785 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp, 13786 int code) 13787 { 13788 ASSERT(un != NULL); 13789 ASSERT(mutex_owned(SD_MUTEX(un))); 13790 ASSERT(xp != NULL); 13791 13792 /* 13793 * Print the "transport rejected" message under the following 13794 * conditions: 13795 * 13796 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set 13797 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR. 13798 * - If the error code IS a TRAN_FATAL_ERROR, then the message is 13799 * printed the FIRST time a TRAN_FATAL_ERROR is returned from 13800 * scsi_transport(9F) (which indicates that the target might have 13801 * gone off-line). This uses the un->un_tran_fatal_count 13802 * count, which is incremented whenever a TRAN_FATAL_ERROR is 13803 * received, and reset to zero whenver a TRAN_ACCEPT is returned 13804 * from scsi_transport(). 13805 * 13806 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of 13807 * the preceeding cases in order for the message to be printed. 13808 */ 13809 if ((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) { 13810 if ((sd_level_mask & SD_LOGMASK_DIAG) || 13811 (code != TRAN_FATAL_ERROR) || 13812 (un->un_tran_fatal_count == 1)) { 13813 switch (code) { 13814 case TRAN_BADPKT: 13815 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13816 "transport rejected bad packet\n"); 13817 break; 13818 case TRAN_FATAL_ERROR: 13819 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13820 "transport rejected fatal error\n"); 13821 break; 13822 default: 13823 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13824 "transport rejected (%d)\n", code); 13825 break; 13826 } 13827 } 13828 } 13829 } 13830 13831 13832 /* 13833 * Function: sd_add_buf_to_waitq 13834 * 13835 * Description: Add the given buf(9S) struct to the wait queue for the 13836 * instance. If sorting is enabled, then the buf is added 13837 * to the queue via an elevator sort algorithm (a la 13838 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key. 13839 * If sorting is not enabled, then the buf is just added 13840 * to the end of the wait queue. 13841 * 13842 * Return Code: void 13843 * 13844 * Context: Does not sleep/block, therefore technically can be called 13845 * from any context. However if sorting is enabled then the 13846 * execution time is indeterminate, and may take long if 13847 * the wait queue grows large. 13848 */ 13849 13850 static void 13851 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp) 13852 { 13853 struct buf *ap; 13854 13855 ASSERT(bp != NULL); 13856 ASSERT(un != NULL); 13857 ASSERT(mutex_owned(SD_MUTEX(un))); 13858 13859 /* If the queue is empty, add the buf as the only entry & return. */ 13860 if (un->un_waitq_headp == NULL) { 13861 ASSERT(un->un_waitq_tailp == NULL); 13862 un->un_waitq_headp = un->un_waitq_tailp = bp; 13863 bp->av_forw = NULL; 13864 return; 13865 } 13866 13867 ASSERT(un->un_waitq_tailp != NULL); 13868 13869 /* 13870 * If sorting is disabled, just add the buf to the tail end of 13871 * the wait queue and return. 13872 */ 13873 if (un->un_f_disksort_disabled) { 13874 un->un_waitq_tailp->av_forw = bp; 13875 un->un_waitq_tailp = bp; 13876 bp->av_forw = NULL; 13877 return; 13878 } 13879 13880 /* 13881 * Sort thru the list of requests currently on the wait queue 13882 * and add the new buf request at the appropriate position. 13883 * 13884 * The un->un_waitq_headp is an activity chain pointer on which 13885 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The 13886 * first queue holds those requests which are positioned after 13887 * the current SD_GET_BLKNO() (in the first request); the second holds 13888 * requests which came in after their SD_GET_BLKNO() number was passed. 13889 * Thus we implement a one way scan, retracting after reaching 13890 * the end of the drive to the first request on the second 13891 * queue, at which time it becomes the first queue. 13892 * A one-way scan is natural because of the way UNIX read-ahead 13893 * blocks are allocated. 13894 * 13895 * If we lie after the first request, then we must locate the 13896 * second request list and add ourselves to it. 13897 */ 13898 ap = un->un_waitq_headp; 13899 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) { 13900 while (ap->av_forw != NULL) { 13901 /* 13902 * Look for an "inversion" in the (normally 13903 * ascending) block numbers. This indicates 13904 * the start of the second request list. 13905 */ 13906 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) { 13907 /* 13908 * Search the second request list for the 13909 * first request at a larger block number. 13910 * We go before that; however if there is 13911 * no such request, we go at the end. 13912 */ 13913 do { 13914 if (SD_GET_BLKNO(bp) < 13915 SD_GET_BLKNO(ap->av_forw)) { 13916 goto insert; 13917 } 13918 ap = ap->av_forw; 13919 } while (ap->av_forw != NULL); 13920 goto insert; /* after last */ 13921 } 13922 ap = ap->av_forw; 13923 } 13924 13925 /* 13926 * No inversions... we will go after the last, and 13927 * be the first request in the second request list. 13928 */ 13929 goto insert; 13930 } 13931 13932 /* 13933 * Request is at/after the current request... 13934 * sort in the first request list. 13935 */ 13936 while (ap->av_forw != NULL) { 13937 /* 13938 * We want to go after the current request (1) if 13939 * there is an inversion after it (i.e. it is the end 13940 * of the first request list), or (2) if the next 13941 * request is a larger block no. than our request. 13942 */ 13943 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) || 13944 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) { 13945 goto insert; 13946 } 13947 ap = ap->av_forw; 13948 } 13949 13950 /* 13951 * Neither a second list nor a larger request, therefore 13952 * we go at the end of the first list (which is the same 13953 * as the end of the whole schebang). 13954 */ 13955 insert: 13956 bp->av_forw = ap->av_forw; 13957 ap->av_forw = bp; 13958 13959 /* 13960 * If we inserted onto the tail end of the waitq, make sure the 13961 * tail pointer is updated. 13962 */ 13963 if (ap == un->un_waitq_tailp) { 13964 un->un_waitq_tailp = bp; 13965 } 13966 } 13967 13968 13969 /* 13970 * Function: sd_start_cmds 13971 * 13972 * Description: Remove and transport cmds from the driver queues. 13973 * 13974 * Arguments: un - pointer to the unit (soft state) struct for the target. 13975 * 13976 * immed_bp - ptr to a buf to be transported immediately. Only 13977 * the immed_bp is transported; bufs on the waitq are not 13978 * processed and the un_retry_bp is not checked. If immed_bp is 13979 * NULL, then normal queue processing is performed. 13980 * 13981 * Context: May be called from kernel thread context, interrupt context, 13982 * or runout callback context. This function may not block or 13983 * call routines that block. 13984 */ 13985 13986 static void 13987 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp) 13988 { 13989 struct sd_xbuf *xp; 13990 struct buf *bp; 13991 void (*statp)(kstat_io_t *); 13992 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13993 void (*saved_statp)(kstat_io_t *); 13994 #endif 13995 int rval; 13996 struct sd_fm_internal *sfip = NULL; 13997 13998 ASSERT(un != NULL); 13999 ASSERT(mutex_owned(SD_MUTEX(un))); 14000 ASSERT(un->un_ncmds_in_transport >= 0); 14001 ASSERT(un->un_throttle >= 0); 14002 14003 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); 14004 14005 do { 14006 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14007 saved_statp = NULL; 14008 #endif 14009 14010 /* 14011 * If we are syncing or dumping, fail the command to 14012 * avoid recursively calling back into scsi_transport(). 14013 * The dump I/O itself uses a separate code path so this 14014 * only prevents non-dump I/O from being sent while dumping. 14015 * File system sync takes place before dumping begins. 14016 * During panic, filesystem I/O is allowed provided 14017 * un_in_callback is <= 1. This is to prevent recursion 14018 * such as sd_start_cmds -> scsi_transport -> sdintr -> 14019 * sd_start_cmds and so on. See panic.c for more information 14020 * about the states the system can be in during panic. 14021 */ 14022 if ((un->un_state == SD_STATE_DUMPING) || 14023 (ddi_in_panic() && (un->un_in_callback > 1))) { 14024 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14025 "sd_start_cmds: panicking\n"); 14026 goto exit; 14027 } 14028 14029 if ((bp = immed_bp) != NULL) { 14030 /* 14031 * We have a bp that must be transported immediately. 14032 * It's OK to transport the immed_bp here without doing 14033 * the throttle limit check because the immed_bp is 14034 * always used in a retry/recovery case. This means 14035 * that we know we are not at the throttle limit by 14036 * virtue of the fact that to get here we must have 14037 * already gotten a command back via sdintr(). This also 14038 * relies on (1) the command on un_retry_bp preventing 14039 * further commands from the waitq from being issued; 14040 * and (2) the code in sd_retry_command checking the 14041 * throttle limit before issuing a delayed or immediate 14042 * retry. This holds even if the throttle limit is 14043 * currently ratcheted down from its maximum value. 14044 */ 14045 statp = kstat_runq_enter; 14046 if (bp == un->un_retry_bp) { 14047 ASSERT((un->un_retry_statp == NULL) || 14048 (un->un_retry_statp == kstat_waitq_enter) || 14049 (un->un_retry_statp == 14050 kstat_runq_back_to_waitq)); 14051 /* 14052 * If the waitq kstat was incremented when 14053 * sd_set_retry_bp() queued this bp for a retry, 14054 * then we must set up statp so that the waitq 14055 * count will get decremented correctly below. 14056 * Also we must clear un->un_retry_statp to 14057 * ensure that we do not act on a stale value 14058 * in this field. 14059 */ 14060 if ((un->un_retry_statp == kstat_waitq_enter) || 14061 (un->un_retry_statp == 14062 kstat_runq_back_to_waitq)) { 14063 statp = kstat_waitq_to_runq; 14064 } 14065 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14066 saved_statp = un->un_retry_statp; 14067 #endif 14068 un->un_retry_statp = NULL; 14069 14070 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14071 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p " 14072 "un_throttle:%d un_ncmds_in_transport:%d\n", 14073 un, un->un_retry_bp, un->un_throttle, 14074 un->un_ncmds_in_transport); 14075 } else { 14076 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: " 14077 "processing priority bp:0x%p\n", bp); 14078 } 14079 14080 } else if ((bp = un->un_waitq_headp) != NULL) { 14081 /* 14082 * A command on the waitq is ready to go, but do not 14083 * send it if: 14084 * 14085 * (1) the throttle limit has been reached, or 14086 * (2) a retry is pending, or 14087 * (3) a START_STOP_UNIT callback pending, or 14088 * (4) a callback for a SD_PATH_DIRECT_PRIORITY 14089 * command is pending. 14090 * 14091 * For all of these conditions, IO processing will 14092 * restart after the condition is cleared. 14093 */ 14094 if (un->un_ncmds_in_transport >= un->un_throttle) { 14095 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14096 "sd_start_cmds: exiting, " 14097 "throttle limit reached!\n"); 14098 goto exit; 14099 } 14100 if (un->un_retry_bp != NULL) { 14101 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14102 "sd_start_cmds: exiting, retry pending!\n"); 14103 goto exit; 14104 } 14105 if (un->un_startstop_timeid != NULL) { 14106 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14107 "sd_start_cmds: exiting, " 14108 "START_STOP pending!\n"); 14109 goto exit; 14110 } 14111 if (un->un_direct_priority_timeid != NULL) { 14112 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14113 "sd_start_cmds: exiting, " 14114 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n"); 14115 goto exit; 14116 } 14117 14118 /* Dequeue the command */ 14119 un->un_waitq_headp = bp->av_forw; 14120 if (un->un_waitq_headp == NULL) { 14121 un->un_waitq_tailp = NULL; 14122 } 14123 bp->av_forw = NULL; 14124 statp = kstat_waitq_to_runq; 14125 SD_TRACE(SD_LOG_IO_CORE, un, 14126 "sd_start_cmds: processing waitq bp:0x%p\n", bp); 14127 14128 } else { 14129 /* No work to do so bail out now */ 14130 SD_TRACE(SD_LOG_IO_CORE, un, 14131 "sd_start_cmds: no more work, exiting!\n"); 14132 goto exit; 14133 } 14134 14135 /* 14136 * Reset the state to normal. This is the mechanism by which 14137 * the state transitions from either SD_STATE_RWAIT or 14138 * SD_STATE_OFFLINE to SD_STATE_NORMAL. 14139 * If state is SD_STATE_PM_CHANGING then this command is 14140 * part of the device power control and the state must 14141 * not be put back to normal. Doing so would would 14142 * allow new commands to proceed when they shouldn't, 14143 * the device may be going off. 14144 */ 14145 if ((un->un_state != SD_STATE_SUSPENDED) && 14146 (un->un_state != SD_STATE_PM_CHANGING)) { 14147 New_state(un, SD_STATE_NORMAL); 14148 } 14149 14150 xp = SD_GET_XBUF(bp); 14151 ASSERT(xp != NULL); 14152 14153 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14154 /* 14155 * Allocate the scsi_pkt if we need one, or attach DMA 14156 * resources if we have a scsi_pkt that needs them. The 14157 * latter should only occur for commands that are being 14158 * retried. 14159 */ 14160 if ((xp->xb_pktp == NULL) || 14161 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { 14162 #else 14163 if (xp->xb_pktp == NULL) { 14164 #endif 14165 /* 14166 * There is no scsi_pkt allocated for this buf. Call 14167 * the initpkt function to allocate & init one. 14168 * 14169 * The scsi_init_pkt runout callback functionality is 14170 * implemented as follows: 14171 * 14172 * 1) The initpkt function always calls 14173 * scsi_init_pkt(9F) with sdrunout specified as the 14174 * callback routine. 14175 * 2) A successful packet allocation is initialized and 14176 * the I/O is transported. 14177 * 3) The I/O associated with an allocation resource 14178 * failure is left on its queue to be retried via 14179 * runout or the next I/O. 14180 * 4) The I/O associated with a DMA error is removed 14181 * from the queue and failed with EIO. Processing of 14182 * the transport queues is also halted to be 14183 * restarted via runout or the next I/O. 14184 * 5) The I/O associated with a CDB size or packet 14185 * size error is removed from the queue and failed 14186 * with EIO. Processing of the transport queues is 14187 * continued. 14188 * 14189 * Note: there is no interface for canceling a runout 14190 * callback. To prevent the driver from detaching or 14191 * suspending while a runout is pending the driver 14192 * state is set to SD_STATE_RWAIT 14193 * 14194 * Note: using the scsi_init_pkt callback facility can 14195 * result in an I/O request persisting at the head of 14196 * the list which cannot be satisfied even after 14197 * multiple retries. In the future the driver may 14198 * implement some kind of maximum runout count before 14199 * failing an I/O. 14200 * 14201 * Note: the use of funcp below may seem superfluous, 14202 * but it helps warlock figure out the correct 14203 * initpkt function calls (see [s]sd.wlcmd). 14204 */ 14205 struct scsi_pkt *pktp; 14206 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp); 14207 14208 ASSERT(bp != un->un_rqs_bp); 14209 14210 funcp = sd_initpkt_map[xp->xb_chain_iostart]; 14211 switch ((*funcp)(bp, &pktp)) { 14212 case SD_PKT_ALLOC_SUCCESS: 14213 xp->xb_pktp = pktp; 14214 SD_TRACE(SD_LOG_IO_CORE, un, 14215 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n", 14216 pktp); 14217 goto got_pkt; 14218 14219 case SD_PKT_ALLOC_FAILURE: 14220 /* 14221 * Temporary (hopefully) resource depletion. 14222 * Since retries and RQS commands always have a 14223 * scsi_pkt allocated, these cases should never 14224 * get here. So the only cases this needs to 14225 * handle is a bp from the waitq (which we put 14226 * back onto the waitq for sdrunout), or a bp 14227 * sent as an immed_bp (which we just fail). 14228 */ 14229 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14230 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n"); 14231 14232 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14233 14234 if (bp == immed_bp) { 14235 /* 14236 * If SD_XB_DMA_FREED is clear, then 14237 * this is a failure to allocate a 14238 * scsi_pkt, and we must fail the 14239 * command. 14240 */ 14241 if ((xp->xb_pkt_flags & 14242 SD_XB_DMA_FREED) == 0) { 14243 break; 14244 } 14245 14246 /* 14247 * If this immediate command is NOT our 14248 * un_retry_bp, then we must fail it. 14249 */ 14250 if (bp != un->un_retry_bp) { 14251 break; 14252 } 14253 14254 /* 14255 * We get here if this cmd is our 14256 * un_retry_bp that was DMAFREED, but 14257 * scsi_init_pkt() failed to reallocate 14258 * DMA resources when we attempted to 14259 * retry it. This can happen when an 14260 * mpxio failover is in progress, but 14261 * we don't want to just fail the 14262 * command in this case. 14263 * 14264 * Use timeout(9F) to restart it after 14265 * a 100ms delay. We don't want to 14266 * let sdrunout() restart it, because 14267 * sdrunout() is just supposed to start 14268 * commands that are sitting on the 14269 * wait queue. The un_retry_bp stays 14270 * set until the command completes, but 14271 * sdrunout can be called many times 14272 * before that happens. Since sdrunout 14273 * cannot tell if the un_retry_bp is 14274 * already in the transport, it could 14275 * end up calling scsi_transport() for 14276 * the un_retry_bp multiple times. 14277 * 14278 * Also: don't schedule the callback 14279 * if some other callback is already 14280 * pending. 14281 */ 14282 if (un->un_retry_statp == NULL) { 14283 /* 14284 * restore the kstat pointer to 14285 * keep kstat counts coherent 14286 * when we do retry the command. 14287 */ 14288 un->un_retry_statp = 14289 saved_statp; 14290 } 14291 14292 if ((un->un_startstop_timeid == NULL) && 14293 (un->un_retry_timeid == NULL) && 14294 (un->un_direct_priority_timeid == 14295 NULL)) { 14296 14297 un->un_retry_timeid = 14298 timeout( 14299 sd_start_retry_command, 14300 un, SD_RESTART_TIMEOUT); 14301 } 14302 goto exit; 14303 } 14304 14305 #else 14306 if (bp == immed_bp) { 14307 break; /* Just fail the command */ 14308 } 14309 #endif 14310 14311 /* Add the buf back to the head of the waitq */ 14312 bp->av_forw = un->un_waitq_headp; 14313 un->un_waitq_headp = bp; 14314 if (un->un_waitq_tailp == NULL) { 14315 un->un_waitq_tailp = bp; 14316 } 14317 goto exit; 14318 14319 case SD_PKT_ALLOC_FAILURE_NO_DMA: 14320 /* 14321 * HBA DMA resource failure. Fail the command 14322 * and continue processing of the queues. 14323 */ 14324 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14325 "sd_start_cmds: " 14326 "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); 14327 break; 14328 14329 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: 14330 /* 14331 * Note:x86: Partial DMA mapping not supported 14332 * for USCSI commands, and all the needed DMA 14333 * resources were not allocated. 14334 */ 14335 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14336 "sd_start_cmds: " 14337 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); 14338 break; 14339 14340 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: 14341 /* 14342 * Note:x86: Request cannot fit into CDB based 14343 * on lba and len. 14344 */ 14345 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14346 "sd_start_cmds: " 14347 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); 14348 break; 14349 14350 default: 14351 /* Should NEVER get here! */ 14352 panic("scsi_initpkt error"); 14353 /*NOTREACHED*/ 14354 } 14355 14356 /* 14357 * Fatal error in allocating a scsi_pkt for this buf. 14358 * Update kstats & return the buf with an error code. 14359 * We must use sd_return_failed_command_no_restart() to 14360 * avoid a recursive call back into sd_start_cmds(). 14361 * However this also means that we must keep processing 14362 * the waitq here in order to avoid stalling. 14363 */ 14364 if (statp == kstat_waitq_to_runq) { 14365 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 14366 } 14367 sd_return_failed_command_no_restart(un, bp, EIO); 14368 if (bp == immed_bp) { 14369 /* immed_bp is gone by now, so clear this */ 14370 immed_bp = NULL; 14371 } 14372 continue; 14373 } 14374 got_pkt: 14375 if (bp == immed_bp) { 14376 /* goto the head of the class.... */ 14377 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 14378 } 14379 14380 un->un_ncmds_in_transport++; 14381 SD_UPDATE_KSTATS(un, statp, bp); 14382 14383 /* 14384 * Call scsi_transport() to send the command to the target. 14385 * According to SCSA architecture, we must drop the mutex here 14386 * before calling scsi_transport() in order to avoid deadlock. 14387 * Note that the scsi_pkt's completion routine can be executed 14388 * (from interrupt context) even before the call to 14389 * scsi_transport() returns. 14390 */ 14391 SD_TRACE(SD_LOG_IO_CORE, un, 14392 "sd_start_cmds: calling scsi_transport()\n"); 14393 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp); 14394 14395 mutex_exit(SD_MUTEX(un)); 14396 rval = scsi_transport(xp->xb_pktp); 14397 mutex_enter(SD_MUTEX(un)); 14398 14399 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14400 "sd_start_cmds: scsi_transport() returned %d\n", rval); 14401 14402 switch (rval) { 14403 case TRAN_ACCEPT: 14404 /* Clear this with every pkt accepted by the HBA */ 14405 un->un_tran_fatal_count = 0; 14406 break; /* Success; try the next cmd (if any) */ 14407 14408 case TRAN_BUSY: 14409 un->un_ncmds_in_transport--; 14410 ASSERT(un->un_ncmds_in_transport >= 0); 14411 14412 /* 14413 * Don't retry request sense, the sense data 14414 * is lost when another request is sent. 14415 * Free up the rqs buf and retry 14416 * the original failed cmd. Update kstat. 14417 */ 14418 if (bp == un->un_rqs_bp) { 14419 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14420 bp = sd_mark_rqs_idle(un, xp); 14421 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 14422 NULL, NULL, EIO, un->un_busy_timeout / 500, 14423 kstat_waitq_enter); 14424 goto exit; 14425 } 14426 14427 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14428 /* 14429 * Free the DMA resources for the scsi_pkt. This will 14430 * allow mpxio to select another path the next time 14431 * we call scsi_transport() with this scsi_pkt. 14432 * See sdintr() for the rationalization behind this. 14433 */ 14434 if ((un->un_f_is_fibre == TRUE) && 14435 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 14436 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) { 14437 scsi_dmafree(xp->xb_pktp); 14438 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 14439 } 14440 #endif 14441 14442 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) { 14443 /* 14444 * Commands that are SD_PATH_DIRECT_PRIORITY 14445 * are for error recovery situations. These do 14446 * not use the normal command waitq, so if they 14447 * get a TRAN_BUSY we cannot put them back onto 14448 * the waitq for later retry. One possible 14449 * problem is that there could already be some 14450 * other command on un_retry_bp that is waiting 14451 * for this one to complete, so we would be 14452 * deadlocked if we put this command back onto 14453 * the waitq for later retry (since un_retry_bp 14454 * must complete before the driver gets back to 14455 * commands on the waitq). 14456 * 14457 * To avoid deadlock we must schedule a callback 14458 * that will restart this command after a set 14459 * interval. This should keep retrying for as 14460 * long as the underlying transport keeps 14461 * returning TRAN_BUSY (just like for other 14462 * commands). Use the same timeout interval as 14463 * for the ordinary TRAN_BUSY retry. 14464 */ 14465 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14466 "sd_start_cmds: scsi_transport() returned " 14467 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n"); 14468 14469 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14470 un->un_direct_priority_timeid = 14471 timeout(sd_start_direct_priority_command, 14472 bp, un->un_busy_timeout / 500); 14473 14474 goto exit; 14475 } 14476 14477 /* 14478 * For TRAN_BUSY, we want to reduce the throttle value, 14479 * unless we are retrying a command. 14480 */ 14481 if (bp != un->un_retry_bp) { 14482 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); 14483 } 14484 14485 /* 14486 * Set up the bp to be tried again 10 ms later. 14487 * Note:x86: Is there a timeout value in the sd_lun 14488 * for this condition? 14489 */ 14490 sd_set_retry_bp(un, bp, un->un_busy_timeout / 500, 14491 kstat_runq_back_to_waitq); 14492 goto exit; 14493 14494 case TRAN_FATAL_ERROR: 14495 un->un_tran_fatal_count++; 14496 /* FALLTHRU */ 14497 14498 case TRAN_BADPKT: 14499 default: 14500 un->un_ncmds_in_transport--; 14501 ASSERT(un->un_ncmds_in_transport >= 0); 14502 14503 /* 14504 * If this is our REQUEST SENSE command with a 14505 * transport error, we must get back the pointers 14506 * to the original buf, and mark the REQUEST 14507 * SENSE command as "available". 14508 */ 14509 if (bp == un->un_rqs_bp) { 14510 bp = sd_mark_rqs_idle(un, xp); 14511 xp = SD_GET_XBUF(bp); 14512 } else { 14513 /* 14514 * Legacy behavior: do not update transport 14515 * error count for request sense commands. 14516 */ 14517 SD_UPDATE_ERRSTATS(un, sd_transerrs); 14518 } 14519 14520 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14521 sd_print_transport_rejected_message(un, xp, rval); 14522 14523 /* 14524 * This command will be terminated by SD driver due 14525 * to a fatal transport error. We should post 14526 * ereport.io.scsi.cmd.disk.tran with driver-assessment 14527 * of "fail" for any command to indicate this 14528 * situation. 14529 */ 14530 if (xp->xb_ena > 0) { 14531 ASSERT(un->un_fm_private != NULL); 14532 sfip = un->un_fm_private; 14533 sfip->fm_ssc.ssc_flags |= SSC_FLAGS_TRAN_ABORT; 14534 sd_ssc_extract_info(&sfip->fm_ssc, un, 14535 xp->xb_pktp, bp, xp); 14536 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL); 14537 } 14538 14539 /* 14540 * We must use sd_return_failed_command_no_restart() to 14541 * avoid a recursive call back into sd_start_cmds(). 14542 * However this also means that we must keep processing 14543 * the waitq here in order to avoid stalling. 14544 */ 14545 sd_return_failed_command_no_restart(un, bp, EIO); 14546 14547 /* 14548 * Notify any threads waiting in sd_ddi_suspend() that 14549 * a command completion has occurred. 14550 */ 14551 if (un->un_state == SD_STATE_SUSPENDED) { 14552 cv_broadcast(&un->un_disk_busy_cv); 14553 } 14554 14555 if (bp == immed_bp) { 14556 /* immed_bp is gone by now, so clear this */ 14557 immed_bp = NULL; 14558 } 14559 break; 14560 } 14561 14562 } while (immed_bp == NULL); 14563 14564 exit: 14565 ASSERT(mutex_owned(SD_MUTEX(un))); 14566 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n"); 14567 } 14568 14569 14570 /* 14571 * Function: sd_return_command 14572 * 14573 * Description: Returns a command to its originator (with or without an 14574 * error). Also starts commands waiting to be transported 14575 * to the target. 14576 * 14577 * Context: May be called from interrupt, kernel, or timeout context 14578 */ 14579 14580 static void 14581 sd_return_command(struct sd_lun *un, struct buf *bp) 14582 { 14583 struct sd_xbuf *xp; 14584 struct scsi_pkt *pktp; 14585 struct sd_fm_internal *sfip; 14586 int ssc_invalid_flags = SSC_FLAGS_INVALID_PKT_REASON | 14587 SSC_FLAGS_INVALID_STATUS | 14588 SSC_FLAGS_INVALID_SENSE; 14589 14590 ASSERT(bp != NULL); 14591 ASSERT(un != NULL); 14592 ASSERT(mutex_owned(SD_MUTEX(un))); 14593 ASSERT(bp != un->un_rqs_bp); 14594 xp = SD_GET_XBUF(bp); 14595 ASSERT(xp != NULL); 14596 14597 pktp = SD_GET_PKTP(bp); 14598 sfip = (struct sd_fm_internal *)un->un_fm_private; 14599 ASSERT(sfip != NULL); 14600 14601 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n"); 14602 14603 /* 14604 * Note: check for the "sdrestart failed" case. 14605 */ 14606 if ((un->un_partial_dma_supported == 1) && 14607 ((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) && 14608 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) && 14609 (xp->xb_pktp->pkt_resid == 0)) { 14610 14611 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) { 14612 /* 14613 * Successfully set up next portion of cmd 14614 * transfer, try sending it 14615 */ 14616 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 14617 NULL, NULL, 0, (clock_t)0, NULL); 14618 sd_start_cmds(un, NULL); 14619 return; /* Note:x86: need a return here? */ 14620 } 14621 } 14622 14623 /* 14624 * If this is the failfast bp, clear it from un_failfast_bp. This 14625 * can happen if upon being re-tried the failfast bp either 14626 * succeeded or encountered another error (possibly even a different 14627 * error than the one that precipitated the failfast state, but in 14628 * that case it would have had to exhaust retries as well). Regardless, 14629 * this should not occur whenever the instance is in the active 14630 * failfast state. 14631 */ 14632 if (bp == un->un_failfast_bp) { 14633 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 14634 un->un_failfast_bp = NULL; 14635 } 14636 14637 /* 14638 * Clear the failfast state upon successful completion of ANY cmd. 14639 */ 14640 if (bp->b_error == 0) { 14641 un->un_failfast_state = SD_FAILFAST_INACTIVE; 14642 /* 14643 * If this is a successful command, but used to be retried, 14644 * we will take it as a recovered command and post an 14645 * ereport with driver-assessment of "recovered". 14646 */ 14647 if (xp->xb_ena > 0) { 14648 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 14649 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RECOVERY); 14650 } 14651 } else { 14652 /* 14653 * If this is a failed non-USCSI command or it is a command 14654 * which encountered invalid data(pkt-reason, stat-code, 14655 * sense-data) during execution last time, we will post an 14656 * ereport with driver-assessment set accordingly("fail" or 14657 * "fatal"). 14658 */ 14659 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD) || 14660 (sfip->fm_ssc.ssc_flags & ssc_invalid_flags)) { 14661 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 14662 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL); 14663 } 14664 } 14665 14666 /* 14667 * This is used if the command was retried one or more times. Show that 14668 * we are done with it, and allow processing of the waitq to resume. 14669 */ 14670 if (bp == un->un_retry_bp) { 14671 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14672 "sd_return_command: un:0x%p: " 14673 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 14674 un->un_retry_bp = NULL; 14675 un->un_retry_statp = NULL; 14676 } 14677 14678 SD_UPDATE_RDWR_STATS(un, bp); 14679 SD_UPDATE_PARTITION_STATS(un, bp); 14680 14681 switch (un->un_state) { 14682 case SD_STATE_SUSPENDED: 14683 /* 14684 * Notify any threads waiting in sd_ddi_suspend() that 14685 * a command completion has occurred. 14686 */ 14687 cv_broadcast(&un->un_disk_busy_cv); 14688 break; 14689 default: 14690 sd_start_cmds(un, NULL); 14691 break; 14692 } 14693 14694 /* Return this command up the iodone chain to its originator. */ 14695 mutex_exit(SD_MUTEX(un)); 14696 14697 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 14698 xp->xb_pktp = NULL; 14699 14700 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 14701 14702 ASSERT(!mutex_owned(SD_MUTEX(un))); 14703 mutex_enter(SD_MUTEX(un)); 14704 14705 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n"); 14706 } 14707 14708 14709 /* 14710 * Function: sd_return_failed_command 14711 * 14712 * Description: Command completion when an error occurred. 14713 * 14714 * Context: May be called from interrupt context 14715 */ 14716 14717 static void 14718 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode) 14719 { 14720 ASSERT(bp != NULL); 14721 ASSERT(un != NULL); 14722 ASSERT(mutex_owned(SD_MUTEX(un))); 14723 14724 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14725 "sd_return_failed_command: entry\n"); 14726 14727 /* 14728 * b_resid could already be nonzero due to a partial data 14729 * transfer, so do not change it here. 14730 */ 14731 SD_BIOERROR(bp, errcode); 14732 14733 sd_return_command(un, bp); 14734 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14735 "sd_return_failed_command: exit\n"); 14736 } 14737 14738 14739 /* 14740 * Function: sd_return_failed_command_no_restart 14741 * 14742 * Description: Same as sd_return_failed_command, but ensures that no 14743 * call back into sd_start_cmds will be issued. 14744 * 14745 * Context: May be called from interrupt context 14746 */ 14747 14748 static void 14749 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp, 14750 int errcode) 14751 { 14752 struct sd_xbuf *xp; 14753 14754 ASSERT(bp != NULL); 14755 ASSERT(un != NULL); 14756 ASSERT(mutex_owned(SD_MUTEX(un))); 14757 xp = SD_GET_XBUF(bp); 14758 ASSERT(xp != NULL); 14759 ASSERT(errcode != 0); 14760 14761 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14762 "sd_return_failed_command_no_restart: entry\n"); 14763 14764 /* 14765 * b_resid could already be nonzero due to a partial data 14766 * transfer, so do not change it here. 14767 */ 14768 SD_BIOERROR(bp, errcode); 14769 14770 /* 14771 * If this is the failfast bp, clear it. This can happen if the 14772 * failfast bp encounterd a fatal error when we attempted to 14773 * re-try it (such as a scsi_transport(9F) failure). However 14774 * we should NOT be in an active failfast state if the failfast 14775 * bp is not NULL. 14776 */ 14777 if (bp == un->un_failfast_bp) { 14778 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 14779 un->un_failfast_bp = NULL; 14780 } 14781 14782 if (bp == un->un_retry_bp) { 14783 /* 14784 * This command was retried one or more times. Show that we are 14785 * done with it, and allow processing of the waitq to resume. 14786 */ 14787 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14788 "sd_return_failed_command_no_restart: " 14789 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 14790 un->un_retry_bp = NULL; 14791 un->un_retry_statp = NULL; 14792 } 14793 14794 SD_UPDATE_RDWR_STATS(un, bp); 14795 SD_UPDATE_PARTITION_STATS(un, bp); 14796 14797 mutex_exit(SD_MUTEX(un)); 14798 14799 if (xp->xb_pktp != NULL) { 14800 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 14801 xp->xb_pktp = NULL; 14802 } 14803 14804 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 14805 14806 mutex_enter(SD_MUTEX(un)); 14807 14808 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14809 "sd_return_failed_command_no_restart: exit\n"); 14810 } 14811 14812 14813 /* 14814 * Function: sd_retry_command 14815 * 14816 * Description: queue up a command for retry, or (optionally) fail it 14817 * if retry counts are exhausted. 14818 * 14819 * Arguments: un - Pointer to the sd_lun struct for the target. 14820 * 14821 * bp - Pointer to the buf for the command to be retried. 14822 * 14823 * retry_check_flag - Flag to see which (if any) of the retry 14824 * counts should be decremented/checked. If the indicated 14825 * retry count is exhausted, then the command will not be 14826 * retried; it will be failed instead. This should use a 14827 * value equal to one of the following: 14828 * 14829 * SD_RETRIES_NOCHECK 14830 * SD_RESD_RETRIES_STANDARD 14831 * SD_RETRIES_VICTIM 14832 * 14833 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE 14834 * if the check should be made to see of FLAG_ISOLATE is set 14835 * in the pkt. If FLAG_ISOLATE is set, then the command is 14836 * not retried, it is simply failed. 14837 * 14838 * user_funcp - Ptr to function to call before dispatching the 14839 * command. May be NULL if no action needs to be performed. 14840 * (Primarily intended for printing messages.) 14841 * 14842 * user_arg - Optional argument to be passed along to 14843 * the user_funcp call. 14844 * 14845 * failure_code - errno return code to set in the bp if the 14846 * command is going to be failed. 14847 * 14848 * retry_delay - Retry delay interval in (clock_t) units. May 14849 * be zero which indicates that the retry should be retried 14850 * immediately (ie, without an intervening delay). 14851 * 14852 * statp - Ptr to kstat function to be updated if the command 14853 * is queued for a delayed retry. May be NULL if no kstat 14854 * update is desired. 14855 * 14856 * Context: May be called from interrupt context. 14857 */ 14858 14859 static void 14860 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag, 14861 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int 14862 code), void *user_arg, int failure_code, clock_t retry_delay, 14863 void (*statp)(kstat_io_t *)) 14864 { 14865 struct sd_xbuf *xp; 14866 struct scsi_pkt *pktp; 14867 struct sd_fm_internal *sfip; 14868 int ssc_invalid_flags = SSC_FLAGS_INVALID_PKT_REASON | 14869 SSC_FLAGS_INVALID_STATUS | 14870 SSC_FLAGS_INVALID_SENSE; 14871 14872 ASSERT(un != NULL); 14873 ASSERT(mutex_owned(SD_MUTEX(un))); 14874 ASSERT(bp != NULL); 14875 xp = SD_GET_XBUF(bp); 14876 ASSERT(xp != NULL); 14877 pktp = SD_GET_PKTP(bp); 14878 ASSERT(pktp != NULL); 14879 14880 sfip = (struct sd_fm_internal *)un->un_fm_private; 14881 ASSERT(sfip != NULL); 14882 14883 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14884 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp); 14885 14886 /* 14887 * If we are syncing or dumping, fail the command to avoid 14888 * recursively calling back into scsi_transport(). 14889 */ 14890 if (ddi_in_panic()) { 14891 goto fail_command_no_log; 14892 } 14893 14894 /* 14895 * We should never be be retrying a command with FLAG_DIAGNOSE set, so 14896 * log an error and fail the command. 14897 */ 14898 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 14899 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 14900 "ERROR, retrying FLAG_DIAGNOSE command.\n"); 14901 sd_dump_memory(un, SD_LOG_IO, "CDB", 14902 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 14903 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 14904 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 14905 goto fail_command; 14906 } 14907 14908 /* 14909 * If we are suspended, then put the command onto head of the 14910 * wait queue since we don't want to start more commands, and 14911 * clear the un_retry_bp. Next time when we are resumed, will 14912 * handle the command in the wait queue. 14913 */ 14914 switch (un->un_state) { 14915 case SD_STATE_SUSPENDED: 14916 case SD_STATE_DUMPING: 14917 bp->av_forw = un->un_waitq_headp; 14918 un->un_waitq_headp = bp; 14919 if (un->un_waitq_tailp == NULL) { 14920 un->un_waitq_tailp = bp; 14921 } 14922 if (bp == un->un_retry_bp) { 14923 un->un_retry_bp = NULL; 14924 un->un_retry_statp = NULL; 14925 } 14926 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 14927 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: " 14928 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp); 14929 return; 14930 default: 14931 break; 14932 } 14933 14934 /* 14935 * If the caller wants us to check FLAG_ISOLATE, then see if that 14936 * is set; if it is then we do not want to retry the command. 14937 * Normally, FLAG_ISOLATE is only used with USCSI cmds. 14938 */ 14939 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) { 14940 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) { 14941 goto fail_command; 14942 } 14943 } 14944 14945 14946 /* 14947 * If SD_RETRIES_FAILFAST is set, it indicates that either a 14948 * command timeout or a selection timeout has occurred. This means 14949 * that we were unable to establish an kind of communication with 14950 * the target, and subsequent retries and/or commands are likely 14951 * to encounter similar results and take a long time to complete. 14952 * 14953 * If this is a failfast error condition, we need to update the 14954 * failfast state, even if this bp does not have B_FAILFAST set. 14955 */ 14956 if (retry_check_flag & SD_RETRIES_FAILFAST) { 14957 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) { 14958 ASSERT(un->un_failfast_bp == NULL); 14959 /* 14960 * If we are already in the active failfast state, and 14961 * another failfast error condition has been detected, 14962 * then fail this command if it has B_FAILFAST set. 14963 * If B_FAILFAST is clear, then maintain the legacy 14964 * behavior of retrying heroically, even tho this will 14965 * take a lot more time to fail the command. 14966 */ 14967 if (bp->b_flags & B_FAILFAST) { 14968 goto fail_command; 14969 } 14970 } else { 14971 /* 14972 * We're not in the active failfast state, but we 14973 * have a failfast error condition, so we must begin 14974 * transition to the next state. We do this regardless 14975 * of whether or not this bp has B_FAILFAST set. 14976 */ 14977 if (un->un_failfast_bp == NULL) { 14978 /* 14979 * This is the first bp to meet a failfast 14980 * condition so save it on un_failfast_bp & 14981 * do normal retry processing. Do not enter 14982 * active failfast state yet. This marks 14983 * entry into the "failfast pending" state. 14984 */ 14985 un->un_failfast_bp = bp; 14986 14987 } else if (un->un_failfast_bp == bp) { 14988 /* 14989 * This is the second time *this* bp has 14990 * encountered a failfast error condition, 14991 * so enter active failfast state & flush 14992 * queues as appropriate. 14993 */ 14994 un->un_failfast_state = SD_FAILFAST_ACTIVE; 14995 un->un_failfast_bp = NULL; 14996 sd_failfast_flushq(un); 14997 14998 /* 14999 * Fail this bp now if B_FAILFAST set; 15000 * otherwise continue with retries. (It would 15001 * be pretty ironic if this bp succeeded on a 15002 * subsequent retry after we just flushed all 15003 * the queues). 15004 */ 15005 if (bp->b_flags & B_FAILFAST) { 15006 goto fail_command; 15007 } 15008 15009 #if !defined(lint) && !defined(__lint) 15010 } else { 15011 /* 15012 * If neither of the preceeding conditionals 15013 * was true, it means that there is some 15014 * *other* bp that has met an inital failfast 15015 * condition and is currently either being 15016 * retried or is waiting to be retried. In 15017 * that case we should perform normal retry 15018 * processing on *this* bp, since there is a 15019 * chance that the current failfast condition 15020 * is transient and recoverable. If that does 15021 * not turn out to be the case, then retries 15022 * will be cleared when the wait queue is 15023 * flushed anyway. 15024 */ 15025 #endif 15026 } 15027 } 15028 } else { 15029 /* 15030 * SD_RETRIES_FAILFAST is clear, which indicates that we 15031 * likely were able to at least establish some level of 15032 * communication with the target and subsequent commands 15033 * and/or retries are likely to get through to the target, 15034 * In this case we want to be aggressive about clearing 15035 * the failfast state. Note that this does not affect 15036 * the "failfast pending" condition. 15037 */ 15038 un->un_failfast_state = SD_FAILFAST_INACTIVE; 15039 } 15040 15041 15042 /* 15043 * Check the specified retry count to see if we can still do 15044 * any retries with this pkt before we should fail it. 15045 */ 15046 switch (retry_check_flag & SD_RETRIES_MASK) { 15047 case SD_RETRIES_VICTIM: 15048 /* 15049 * Check the victim retry count. If exhausted, then fall 15050 * thru & check against the standard retry count. 15051 */ 15052 if (xp->xb_victim_retry_count < un->un_victim_retry_count) { 15053 /* Increment count & proceed with the retry */ 15054 xp->xb_victim_retry_count++; 15055 break; 15056 } 15057 /* Victim retries exhausted, fall back to std. retries... */ 15058 /* FALLTHRU */ 15059 15060 case SD_RETRIES_STANDARD: 15061 if (xp->xb_retry_count >= un->un_retry_count) { 15062 /* Retries exhausted, fail the command */ 15063 SD_TRACE(SD_LOG_IO_CORE, un, 15064 "sd_retry_command: retries exhausted!\n"); 15065 /* 15066 * update b_resid for failed SCMD_READ & SCMD_WRITE 15067 * commands with nonzero pkt_resid. 15068 */ 15069 if ((pktp->pkt_reason == CMD_CMPLT) && 15070 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) && 15071 (pktp->pkt_resid != 0)) { 15072 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F; 15073 if ((op == SCMD_READ) || (op == SCMD_WRITE)) { 15074 SD_UPDATE_B_RESID(bp, pktp); 15075 } 15076 } 15077 goto fail_command; 15078 } 15079 xp->xb_retry_count++; 15080 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15081 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 15082 break; 15083 15084 case SD_RETRIES_UA: 15085 if (xp->xb_ua_retry_count >= sd_ua_retry_count) { 15086 /* Retries exhausted, fail the command */ 15087 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15088 "Unit Attention retries exhausted. " 15089 "Check the target.\n"); 15090 goto fail_command; 15091 } 15092 xp->xb_ua_retry_count++; 15093 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15094 "sd_retry_command: retry count:%d\n", 15095 xp->xb_ua_retry_count); 15096 break; 15097 15098 case SD_RETRIES_BUSY: 15099 if (xp->xb_retry_count >= un->un_busy_retry_count) { 15100 /* Retries exhausted, fail the command */ 15101 SD_TRACE(SD_LOG_IO_CORE, un, 15102 "sd_retry_command: retries exhausted!\n"); 15103 goto fail_command; 15104 } 15105 xp->xb_retry_count++; 15106 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15107 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 15108 break; 15109 15110 case SD_RETRIES_NOCHECK: 15111 default: 15112 /* No retry count to check. Just proceed with the retry */ 15113 break; 15114 } 15115 15116 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 15117 15118 /* 15119 * If this is a non-USCSI command being retried or it is a command 15120 * which encountered invalid data(pkt-reason, stat-code, sense-data) 15121 * during execution last time, we should post an ereport with 15122 * driver-assessment of the value "retry". 15123 */ 15124 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD) || 15125 (sfip->fm_ssc.ssc_flags & ssc_invalid_flags)) { 15126 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 15127 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RETRY); 15128 } 15129 15130 /* 15131 * If we were given a zero timeout, we must attempt to retry the 15132 * command immediately (ie, without a delay). 15133 */ 15134 if (retry_delay == 0) { 15135 /* 15136 * Check some limiting conditions to see if we can actually 15137 * do the immediate retry. If we cannot, then we must 15138 * fall back to queueing up a delayed retry. 15139 */ 15140 if (un->un_ncmds_in_transport >= un->un_throttle) { 15141 /* 15142 * We are at the throttle limit for the target, 15143 * fall back to delayed retry. 15144 */ 15145 retry_delay = un->un_busy_timeout; 15146 statp = kstat_waitq_enter; 15147 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15148 "sd_retry_command: immed. retry hit " 15149 "throttle!\n"); 15150 } else { 15151 /* 15152 * We're clear to proceed with the immediate retry. 15153 * First call the user-provided function (if any) 15154 */ 15155 if (user_funcp != NULL) { 15156 (*user_funcp)(un, bp, user_arg, 15157 SD_IMMEDIATE_RETRY_ISSUED); 15158 #ifdef __lock_lint 15159 sd_print_incomplete_msg(un, bp, user_arg, 15160 SD_IMMEDIATE_RETRY_ISSUED); 15161 sd_print_cmd_incomplete_msg(un, bp, user_arg, 15162 SD_IMMEDIATE_RETRY_ISSUED); 15163 sd_print_sense_failed_msg(un, bp, user_arg, 15164 SD_IMMEDIATE_RETRY_ISSUED); 15165 #endif 15166 } 15167 15168 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15169 "sd_retry_command: issuing immediate retry\n"); 15170 15171 /* 15172 * Call sd_start_cmds() to transport the command to 15173 * the target. 15174 */ 15175 sd_start_cmds(un, bp); 15176 15177 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15178 "sd_retry_command exit\n"); 15179 return; 15180 } 15181 } 15182 15183 /* 15184 * Set up to retry the command after a delay. 15185 * First call the user-provided function (if any) 15186 */ 15187 if (user_funcp != NULL) { 15188 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED); 15189 } 15190 15191 sd_set_retry_bp(un, bp, retry_delay, statp); 15192 15193 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15194 return; 15195 15196 fail_command: 15197 15198 if (user_funcp != NULL) { 15199 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED); 15200 } 15201 15202 fail_command_no_log: 15203 15204 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15205 "sd_retry_command: returning failed command\n"); 15206 15207 sd_return_failed_command(un, bp, failure_code); 15208 15209 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15210 } 15211 15212 15213 /* 15214 * Function: sd_set_retry_bp 15215 * 15216 * Description: Set up the given bp for retry. 15217 * 15218 * Arguments: un - ptr to associated softstate 15219 * bp - ptr to buf(9S) for the command 15220 * retry_delay - time interval before issuing retry (may be 0) 15221 * statp - optional pointer to kstat function 15222 * 15223 * Context: May be called under interrupt context 15224 */ 15225 15226 static void 15227 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, 15228 void (*statp)(kstat_io_t *)) 15229 { 15230 ASSERT(un != NULL); 15231 ASSERT(mutex_owned(SD_MUTEX(un))); 15232 ASSERT(bp != NULL); 15233 15234 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15235 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp); 15236 15237 /* 15238 * Indicate that the command is being retried. This will not allow any 15239 * other commands on the wait queue to be transported to the target 15240 * until this command has been completed (success or failure). The 15241 * "retry command" is not transported to the target until the given 15242 * time delay expires, unless the user specified a 0 retry_delay. 15243 * 15244 * Note: the timeout(9F) callback routine is what actually calls 15245 * sd_start_cmds() to transport the command, with the exception of a 15246 * zero retry_delay. The only current implementor of a zero retry delay 15247 * is the case where a START_STOP_UNIT is sent to spin-up a device. 15248 */ 15249 if (un->un_retry_bp == NULL) { 15250 ASSERT(un->un_retry_statp == NULL); 15251 un->un_retry_bp = bp; 15252 15253 /* 15254 * If the user has not specified a delay the command should 15255 * be queued and no timeout should be scheduled. 15256 */ 15257 if (retry_delay == 0) { 15258 /* 15259 * Save the kstat pointer that will be used in the 15260 * call to SD_UPDATE_KSTATS() below, so that 15261 * sd_start_cmds() can correctly decrement the waitq 15262 * count when it is time to transport this command. 15263 */ 15264 un->un_retry_statp = statp; 15265 goto done; 15266 } 15267 } 15268 15269 if (un->un_retry_bp == bp) { 15270 /* 15271 * Save the kstat pointer that will be used in the call to 15272 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can 15273 * correctly decrement the waitq count when it is time to 15274 * transport this command. 15275 */ 15276 un->un_retry_statp = statp; 15277 15278 /* 15279 * Schedule a timeout if: 15280 * 1) The user has specified a delay. 15281 * 2) There is not a START_STOP_UNIT callback pending. 15282 * 15283 * If no delay has been specified, then it is up to the caller 15284 * to ensure that IO processing continues without stalling. 15285 * Effectively, this means that the caller will issue the 15286 * required call to sd_start_cmds(). The START_STOP_UNIT 15287 * callback does this after the START STOP UNIT command has 15288 * completed. In either of these cases we should not schedule 15289 * a timeout callback here. Also don't schedule the timeout if 15290 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart. 15291 */ 15292 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) && 15293 (un->un_direct_priority_timeid == NULL)) { 15294 un->un_retry_timeid = 15295 timeout(sd_start_retry_command, un, retry_delay); 15296 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15297 "sd_set_retry_bp: setting timeout: un: 0x%p" 15298 " bp:0x%p un_retry_timeid:0x%p\n", 15299 un, bp, un->un_retry_timeid); 15300 } 15301 } else { 15302 /* 15303 * We only get in here if there is already another command 15304 * waiting to be retried. In this case, we just put the 15305 * given command onto the wait queue, so it can be transported 15306 * after the current retry command has completed. 15307 * 15308 * Also we have to make sure that if the command at the head 15309 * of the wait queue is the un_failfast_bp, that we do not 15310 * put ahead of it any other commands that are to be retried. 15311 */ 15312 if ((un->un_failfast_bp != NULL) && 15313 (un->un_failfast_bp == un->un_waitq_headp)) { 15314 /* 15315 * Enqueue this command AFTER the first command on 15316 * the wait queue (which is also un_failfast_bp). 15317 */ 15318 bp->av_forw = un->un_waitq_headp->av_forw; 15319 un->un_waitq_headp->av_forw = bp; 15320 if (un->un_waitq_headp == un->un_waitq_tailp) { 15321 un->un_waitq_tailp = bp; 15322 } 15323 } else { 15324 /* Enqueue this command at the head of the waitq. */ 15325 bp->av_forw = un->un_waitq_headp; 15326 un->un_waitq_headp = bp; 15327 if (un->un_waitq_tailp == NULL) { 15328 un->un_waitq_tailp = bp; 15329 } 15330 } 15331 15332 if (statp == NULL) { 15333 statp = kstat_waitq_enter; 15334 } 15335 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15336 "sd_set_retry_bp: un:0x%p already delayed retry\n", un); 15337 } 15338 15339 done: 15340 if (statp != NULL) { 15341 SD_UPDATE_KSTATS(un, statp, bp); 15342 } 15343 15344 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15345 "sd_set_retry_bp: exit un:0x%p\n", un); 15346 } 15347 15348 15349 /* 15350 * Function: sd_start_retry_command 15351 * 15352 * Description: Start the command that has been waiting on the target's 15353 * retry queue. Called from timeout(9F) context after the 15354 * retry delay interval has expired. 15355 * 15356 * Arguments: arg - pointer to associated softstate for the device. 15357 * 15358 * Context: timeout(9F) thread context. May not sleep. 15359 */ 15360 15361 static void 15362 sd_start_retry_command(void *arg) 15363 { 15364 struct sd_lun *un = arg; 15365 15366 ASSERT(un != NULL); 15367 ASSERT(!mutex_owned(SD_MUTEX(un))); 15368 15369 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15370 "sd_start_retry_command: entry\n"); 15371 15372 mutex_enter(SD_MUTEX(un)); 15373 15374 un->un_retry_timeid = NULL; 15375 15376 if (un->un_retry_bp != NULL) { 15377 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15378 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n", 15379 un, un->un_retry_bp); 15380 sd_start_cmds(un, un->un_retry_bp); 15381 } 15382 15383 mutex_exit(SD_MUTEX(un)); 15384 15385 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15386 "sd_start_retry_command: exit\n"); 15387 } 15388 15389 15390 /* 15391 * Function: sd_start_direct_priority_command 15392 * 15393 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had 15394 * received TRAN_BUSY when we called scsi_transport() to send it 15395 * to the underlying HBA. This function is called from timeout(9F) 15396 * context after the delay interval has expired. 15397 * 15398 * Arguments: arg - pointer to associated buf(9S) to be restarted. 15399 * 15400 * Context: timeout(9F) thread context. May not sleep. 15401 */ 15402 15403 static void 15404 sd_start_direct_priority_command(void *arg) 15405 { 15406 struct buf *priority_bp = arg; 15407 struct sd_lun *un; 15408 15409 ASSERT(priority_bp != NULL); 15410 un = SD_GET_UN(priority_bp); 15411 ASSERT(un != NULL); 15412 ASSERT(!mutex_owned(SD_MUTEX(un))); 15413 15414 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15415 "sd_start_direct_priority_command: entry\n"); 15416 15417 mutex_enter(SD_MUTEX(un)); 15418 un->un_direct_priority_timeid = NULL; 15419 sd_start_cmds(un, priority_bp); 15420 mutex_exit(SD_MUTEX(un)); 15421 15422 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15423 "sd_start_direct_priority_command: exit\n"); 15424 } 15425 15426 15427 /* 15428 * Function: sd_send_request_sense_command 15429 * 15430 * Description: Sends a REQUEST SENSE command to the target 15431 * 15432 * Context: May be called from interrupt context. 15433 */ 15434 15435 static void 15436 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 15437 struct scsi_pkt *pktp) 15438 { 15439 ASSERT(bp != NULL); 15440 ASSERT(un != NULL); 15441 ASSERT(mutex_owned(SD_MUTEX(un))); 15442 15443 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: " 15444 "entry: buf:0x%p\n", bp); 15445 15446 /* 15447 * If we are syncing or dumping, then fail the command to avoid a 15448 * recursive callback into scsi_transport(). Also fail the command 15449 * if we are suspended (legacy behavior). 15450 */ 15451 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 15452 (un->un_state == SD_STATE_DUMPING)) { 15453 sd_return_failed_command(un, bp, EIO); 15454 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15455 "sd_send_request_sense_command: syncing/dumping, exit\n"); 15456 return; 15457 } 15458 15459 /* 15460 * Retry the failed command and don't issue the request sense if: 15461 * 1) the sense buf is busy 15462 * 2) we have 1 or more outstanding commands on the target 15463 * (the sense data will be cleared or invalidated any way) 15464 * 15465 * Note: There could be an issue with not checking a retry limit here, 15466 * the problem is determining which retry limit to check. 15467 */ 15468 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { 15469 /* Don't retry if the command is flagged as non-retryable */ 15470 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15471 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 15472 NULL, NULL, 0, un->un_busy_timeout, 15473 kstat_waitq_enter); 15474 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15475 "sd_send_request_sense_command: " 15476 "at full throttle, retrying exit\n"); 15477 } else { 15478 sd_return_failed_command(un, bp, EIO); 15479 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15480 "sd_send_request_sense_command: " 15481 "at full throttle, non-retryable exit\n"); 15482 } 15483 return; 15484 } 15485 15486 sd_mark_rqs_busy(un, bp); 15487 sd_start_cmds(un, un->un_rqs_bp); 15488 15489 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15490 "sd_send_request_sense_command: exit\n"); 15491 } 15492 15493 15494 /* 15495 * Function: sd_mark_rqs_busy 15496 * 15497 * Description: Indicate that the request sense bp for this instance is 15498 * in use. 15499 * 15500 * Context: May be called under interrupt context 15501 */ 15502 15503 static void 15504 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp) 15505 { 15506 struct sd_xbuf *sense_xp; 15507 15508 ASSERT(un != NULL); 15509 ASSERT(bp != NULL); 15510 ASSERT(mutex_owned(SD_MUTEX(un))); 15511 ASSERT(un->un_sense_isbusy == 0); 15512 15513 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: " 15514 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un); 15515 15516 sense_xp = SD_GET_XBUF(un->un_rqs_bp); 15517 ASSERT(sense_xp != NULL); 15518 15519 SD_INFO(SD_LOG_IO, un, 15520 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp); 15521 15522 ASSERT(sense_xp->xb_pktp != NULL); 15523 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) 15524 == (FLAG_SENSING | FLAG_HEAD)); 15525 15526 un->un_sense_isbusy = 1; 15527 un->un_rqs_bp->b_resid = 0; 15528 sense_xp->xb_pktp->pkt_resid = 0; 15529 sense_xp->xb_pktp->pkt_reason = 0; 15530 15531 /* So we can get back the bp at interrupt time! */ 15532 sense_xp->xb_sense_bp = bp; 15533 15534 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH); 15535 15536 /* 15537 * Mark this buf as awaiting sense data. (This is already set in 15538 * the pkt_flags for the RQS packet.) 15539 */ 15540 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING; 15541 15542 /* Request sense down same path */ 15543 if (scsi_pkt_allocated_correctly((SD_GET_XBUF(bp))->xb_pktp) && 15544 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance) 15545 sense_xp->xb_pktp->pkt_path_instance = 15546 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance; 15547 15548 sense_xp->xb_retry_count = 0; 15549 sense_xp->xb_victim_retry_count = 0; 15550 sense_xp->xb_ua_retry_count = 0; 15551 sense_xp->xb_nr_retry_count = 0; 15552 sense_xp->xb_dma_resid = 0; 15553 15554 /* Clean up the fields for auto-request sense */ 15555 sense_xp->xb_sense_status = 0; 15556 sense_xp->xb_sense_state = 0; 15557 sense_xp->xb_sense_resid = 0; 15558 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data)); 15559 15560 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n"); 15561 } 15562 15563 15564 /* 15565 * Function: sd_mark_rqs_idle 15566 * 15567 * Description: SD_MUTEX must be held continuously through this routine 15568 * to prevent reuse of the rqs struct before the caller can 15569 * complete it's processing. 15570 * 15571 * Return Code: Pointer to the RQS buf 15572 * 15573 * Context: May be called under interrupt context 15574 */ 15575 15576 static struct buf * 15577 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp) 15578 { 15579 struct buf *bp; 15580 ASSERT(un != NULL); 15581 ASSERT(sense_xp != NULL); 15582 ASSERT(mutex_owned(SD_MUTEX(un))); 15583 ASSERT(un->un_sense_isbusy != 0); 15584 15585 un->un_sense_isbusy = 0; 15586 bp = sense_xp->xb_sense_bp; 15587 sense_xp->xb_sense_bp = NULL; 15588 15589 /* This pkt is no longer interested in getting sense data */ 15590 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING; 15591 15592 return (bp); 15593 } 15594 15595 15596 15597 /* 15598 * Function: sd_alloc_rqs 15599 * 15600 * Description: Set up the unit to receive auto request sense data 15601 * 15602 * Return Code: DDI_SUCCESS or DDI_FAILURE 15603 * 15604 * Context: Called under attach(9E) context 15605 */ 15606 15607 static int 15608 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un) 15609 { 15610 struct sd_xbuf *xp; 15611 15612 ASSERT(un != NULL); 15613 ASSERT(!mutex_owned(SD_MUTEX(un))); 15614 ASSERT(un->un_rqs_bp == NULL); 15615 ASSERT(un->un_rqs_pktp == NULL); 15616 15617 /* 15618 * First allocate the required buf and scsi_pkt structs, then set up 15619 * the CDB in the scsi_pkt for a REQUEST SENSE command. 15620 */ 15621 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL, 15622 MAX_SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL); 15623 if (un->un_rqs_bp == NULL) { 15624 return (DDI_FAILURE); 15625 } 15626 15627 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp, 15628 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL); 15629 15630 if (un->un_rqs_pktp == NULL) { 15631 sd_free_rqs(un); 15632 return (DDI_FAILURE); 15633 } 15634 15635 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */ 15636 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp, 15637 SCMD_REQUEST_SENSE, 0, MAX_SENSE_LENGTH, 0); 15638 15639 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp); 15640 15641 /* Set up the other needed members in the ARQ scsi_pkt. */ 15642 un->un_rqs_pktp->pkt_comp = sdintr; 15643 un->un_rqs_pktp->pkt_time = sd_io_time; 15644 un->un_rqs_pktp->pkt_flags |= 15645 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */ 15646 15647 /* 15648 * Allocate & init the sd_xbuf struct for the RQS command. Do not 15649 * provide any intpkt, destroypkt routines as we take care of 15650 * scsi_pkt allocation/freeing here and in sd_free_rqs(). 15651 */ 15652 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 15653 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL); 15654 xp->xb_pktp = un->un_rqs_pktp; 15655 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15656 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n", 15657 un, xp, un->un_rqs_pktp, un->un_rqs_bp); 15658 15659 /* 15660 * Save the pointer to the request sense private bp so it can 15661 * be retrieved in sdintr. 15662 */ 15663 un->un_rqs_pktp->pkt_private = un->un_rqs_bp; 15664 ASSERT(un->un_rqs_bp->b_private == xp); 15665 15666 /* 15667 * See if the HBA supports auto-request sense for the specified 15668 * target/lun. If it does, then try to enable it (if not already 15669 * enabled). 15670 * 15671 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return 15672 * failure, while for other HBAs (pln) scsi_ifsetcap will always 15673 * return success. However, in both of these cases ARQ is always 15674 * enabled and scsi_ifgetcap will always return true. The best approach 15675 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). 15676 * 15677 * The 3rd case is the HBA (adp) always return enabled on 15678 * scsi_ifgetgetcap even when it's not enable, the best approach 15679 * is issue a scsi_ifsetcap then a scsi_ifgetcap 15680 * Note: this case is to circumvent the Adaptec bug. (x86 only) 15681 */ 15682 15683 if (un->un_f_is_fibre == TRUE) { 15684 un->un_f_arq_enabled = TRUE; 15685 } else { 15686 #if defined(__i386) || defined(__amd64) 15687 /* 15688 * Circumvent the Adaptec bug, remove this code when 15689 * the bug is fixed 15690 */ 15691 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); 15692 #endif 15693 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { 15694 case 0: 15695 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15696 "sd_alloc_rqs: HBA supports ARQ\n"); 15697 /* 15698 * ARQ is supported by this HBA but currently is not 15699 * enabled. Attempt to enable it and if successful then 15700 * mark this instance as ARQ enabled. 15701 */ 15702 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1) 15703 == 1) { 15704 /* Successfully enabled ARQ in the HBA */ 15705 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15706 "sd_alloc_rqs: ARQ enabled\n"); 15707 un->un_f_arq_enabled = TRUE; 15708 } else { 15709 /* Could not enable ARQ in the HBA */ 15710 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15711 "sd_alloc_rqs: failed ARQ enable\n"); 15712 un->un_f_arq_enabled = FALSE; 15713 } 15714 break; 15715 case 1: 15716 /* 15717 * ARQ is supported by this HBA and is already enabled. 15718 * Just mark ARQ as enabled for this instance. 15719 */ 15720 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15721 "sd_alloc_rqs: ARQ already enabled\n"); 15722 un->un_f_arq_enabled = TRUE; 15723 break; 15724 default: 15725 /* 15726 * ARQ is not supported by this HBA; disable it for this 15727 * instance. 15728 */ 15729 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15730 "sd_alloc_rqs: HBA does not support ARQ\n"); 15731 un->un_f_arq_enabled = FALSE; 15732 break; 15733 } 15734 } 15735 15736 return (DDI_SUCCESS); 15737 } 15738 15739 15740 /* 15741 * Function: sd_free_rqs 15742 * 15743 * Description: Cleanup for the pre-instance RQS command. 15744 * 15745 * Context: Kernel thread context 15746 */ 15747 15748 static void 15749 sd_free_rqs(struct sd_lun *un) 15750 { 15751 ASSERT(un != NULL); 15752 15753 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n"); 15754 15755 /* 15756 * If consistent memory is bound to a scsi_pkt, the pkt 15757 * has to be destroyed *before* freeing the consistent memory. 15758 * Don't change the sequence of this operations. 15759 * scsi_destroy_pkt() might access memory, which isn't allowed, 15760 * after it was freed in scsi_free_consistent_buf(). 15761 */ 15762 if (un->un_rqs_pktp != NULL) { 15763 scsi_destroy_pkt(un->un_rqs_pktp); 15764 un->un_rqs_pktp = NULL; 15765 } 15766 15767 if (un->un_rqs_bp != NULL) { 15768 struct sd_xbuf *xp = SD_GET_XBUF(un->un_rqs_bp); 15769 if (xp != NULL) { 15770 kmem_free(xp, sizeof (struct sd_xbuf)); 15771 } 15772 scsi_free_consistent_buf(un->un_rqs_bp); 15773 un->un_rqs_bp = NULL; 15774 } 15775 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n"); 15776 } 15777 15778 15779 15780 /* 15781 * Function: sd_reduce_throttle 15782 * 15783 * Description: Reduces the maximum # of outstanding commands on a 15784 * target to the current number of outstanding commands. 15785 * Queues a tiemout(9F) callback to restore the limit 15786 * after a specified interval has elapsed. 15787 * Typically used when we get a TRAN_BUSY return code 15788 * back from scsi_transport(). 15789 * 15790 * Arguments: un - ptr to the sd_lun softstate struct 15791 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL 15792 * 15793 * Context: May be called from interrupt context 15794 */ 15795 15796 static void 15797 sd_reduce_throttle(struct sd_lun *un, int throttle_type) 15798 { 15799 ASSERT(un != NULL); 15800 ASSERT(mutex_owned(SD_MUTEX(un))); 15801 ASSERT(un->un_ncmds_in_transport >= 0); 15802 15803 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 15804 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n", 15805 un, un->un_throttle, un->un_ncmds_in_transport); 15806 15807 if (un->un_throttle > 1) { 15808 if (un->un_f_use_adaptive_throttle == TRUE) { 15809 switch (throttle_type) { 15810 case SD_THROTTLE_TRAN_BUSY: 15811 if (un->un_busy_throttle == 0) { 15812 un->un_busy_throttle = un->un_throttle; 15813 } 15814 break; 15815 case SD_THROTTLE_QFULL: 15816 un->un_busy_throttle = 0; 15817 break; 15818 default: 15819 ASSERT(FALSE); 15820 } 15821 15822 if (un->un_ncmds_in_transport > 0) { 15823 un->un_throttle = un->un_ncmds_in_transport; 15824 } 15825 15826 } else { 15827 if (un->un_ncmds_in_transport == 0) { 15828 un->un_throttle = 1; 15829 } else { 15830 un->un_throttle = un->un_ncmds_in_transport; 15831 } 15832 } 15833 } 15834 15835 /* Reschedule the timeout if none is currently active */ 15836 if (un->un_reset_throttle_timeid == NULL) { 15837 un->un_reset_throttle_timeid = timeout(sd_restore_throttle, 15838 un, SD_THROTTLE_RESET_INTERVAL); 15839 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15840 "sd_reduce_throttle: timeout scheduled!\n"); 15841 } 15842 15843 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 15844 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle); 15845 } 15846 15847 15848 15849 /* 15850 * Function: sd_restore_throttle 15851 * 15852 * Description: Callback function for timeout(9F). Resets the current 15853 * value of un->un_throttle to its default. 15854 * 15855 * Arguments: arg - pointer to associated softstate for the device. 15856 * 15857 * Context: May be called from interrupt context 15858 */ 15859 15860 static void 15861 sd_restore_throttle(void *arg) 15862 { 15863 struct sd_lun *un = arg; 15864 15865 ASSERT(un != NULL); 15866 ASSERT(!mutex_owned(SD_MUTEX(un))); 15867 15868 mutex_enter(SD_MUTEX(un)); 15869 15870 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 15871 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle); 15872 15873 un->un_reset_throttle_timeid = NULL; 15874 15875 if (un->un_f_use_adaptive_throttle == TRUE) { 15876 /* 15877 * If un_busy_throttle is nonzero, then it contains the 15878 * value that un_throttle was when we got a TRAN_BUSY back 15879 * from scsi_transport(). We want to revert back to this 15880 * value. 15881 * 15882 * In the QFULL case, the throttle limit will incrementally 15883 * increase until it reaches max throttle. 15884 */ 15885 if (un->un_busy_throttle > 0) { 15886 un->un_throttle = un->un_busy_throttle; 15887 un->un_busy_throttle = 0; 15888 } else { 15889 /* 15890 * increase throttle by 10% open gate slowly, schedule 15891 * another restore if saved throttle has not been 15892 * reached 15893 */ 15894 short throttle; 15895 if (sd_qfull_throttle_enable) { 15896 throttle = un->un_throttle + 15897 max((un->un_throttle / 10), 1); 15898 un->un_throttle = 15899 (throttle < un->un_saved_throttle) ? 15900 throttle : un->un_saved_throttle; 15901 if (un->un_throttle < un->un_saved_throttle) { 15902 un->un_reset_throttle_timeid = 15903 timeout(sd_restore_throttle, 15904 un, 15905 SD_QFULL_THROTTLE_RESET_INTERVAL); 15906 } 15907 } 15908 } 15909 15910 /* 15911 * If un_throttle has fallen below the low-water mark, we 15912 * restore the maximum value here (and allow it to ratchet 15913 * down again if necessary). 15914 */ 15915 if (un->un_throttle < un->un_min_throttle) { 15916 un->un_throttle = un->un_saved_throttle; 15917 } 15918 } else { 15919 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 15920 "restoring limit from 0x%x to 0x%x\n", 15921 un->un_throttle, un->un_saved_throttle); 15922 un->un_throttle = un->un_saved_throttle; 15923 } 15924 15925 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15926 "sd_restore_throttle: calling sd_start_cmds!\n"); 15927 15928 sd_start_cmds(un, NULL); 15929 15930 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15931 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n", 15932 un, un->un_throttle); 15933 15934 mutex_exit(SD_MUTEX(un)); 15935 15936 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n"); 15937 } 15938 15939 /* 15940 * Function: sdrunout 15941 * 15942 * Description: Callback routine for scsi_init_pkt when a resource allocation 15943 * fails. 15944 * 15945 * Arguments: arg - a pointer to the sd_lun unit struct for the particular 15946 * soft state instance. 15947 * 15948 * Return Code: The scsi_init_pkt routine allows for the callback function to 15949 * return a 0 indicating the callback should be rescheduled or a 1 15950 * indicating not to reschedule. This routine always returns 1 15951 * because the driver always provides a callback function to 15952 * scsi_init_pkt. This results in a callback always being scheduled 15953 * (via the scsi_init_pkt callback implementation) if a resource 15954 * failure occurs. 15955 * 15956 * Context: This callback function may not block or call routines that block 15957 * 15958 * Note: Using the scsi_init_pkt callback facility can result in an I/O 15959 * request persisting at the head of the list which cannot be 15960 * satisfied even after multiple retries. In the future the driver 15961 * may implement some time of maximum runout count before failing 15962 * an I/O. 15963 */ 15964 15965 static int 15966 sdrunout(caddr_t arg) 15967 { 15968 struct sd_lun *un = (struct sd_lun *)arg; 15969 15970 ASSERT(un != NULL); 15971 ASSERT(!mutex_owned(SD_MUTEX(un))); 15972 15973 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n"); 15974 15975 mutex_enter(SD_MUTEX(un)); 15976 sd_start_cmds(un, NULL); 15977 mutex_exit(SD_MUTEX(un)); 15978 /* 15979 * This callback routine always returns 1 (i.e. do not reschedule) 15980 * because we always specify sdrunout as the callback handler for 15981 * scsi_init_pkt inside the call to sd_start_cmds. 15982 */ 15983 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n"); 15984 return (1); 15985 } 15986 15987 15988 /* 15989 * Function: sdintr 15990 * 15991 * Description: Completion callback routine for scsi_pkt(9S) structs 15992 * sent to the HBA driver via scsi_transport(9F). 15993 * 15994 * Context: Interrupt context 15995 */ 15996 15997 static void 15998 sdintr(struct scsi_pkt *pktp) 15999 { 16000 struct buf *bp; 16001 struct sd_xbuf *xp; 16002 struct sd_lun *un; 16003 size_t actual_len; 16004 sd_ssc_t *sscp; 16005 16006 ASSERT(pktp != NULL); 16007 bp = (struct buf *)pktp->pkt_private; 16008 ASSERT(bp != NULL); 16009 xp = SD_GET_XBUF(bp); 16010 ASSERT(xp != NULL); 16011 ASSERT(xp->xb_pktp != NULL); 16012 un = SD_GET_UN(bp); 16013 ASSERT(un != NULL); 16014 ASSERT(!mutex_owned(SD_MUTEX(un))); 16015 16016 #ifdef SD_FAULT_INJECTION 16017 16018 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n"); 16019 /* SD FaultInjection */ 16020 sd_faultinjection(pktp); 16021 16022 #endif /* SD_FAULT_INJECTION */ 16023 16024 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p," 16025 " xp:0x%p, un:0x%p\n", bp, xp, un); 16026 16027 mutex_enter(SD_MUTEX(un)); 16028 16029 ASSERT(un->un_fm_private != NULL); 16030 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc; 16031 ASSERT(sscp != NULL); 16032 16033 /* Reduce the count of the #commands currently in transport */ 16034 un->un_ncmds_in_transport--; 16035 ASSERT(un->un_ncmds_in_transport >= 0); 16036 16037 /* Increment counter to indicate that the callback routine is active */ 16038 un->un_in_callback++; 16039 16040 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 16041 16042 #ifdef SDDEBUG 16043 if (bp == un->un_retry_bp) { 16044 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: " 16045 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n", 16046 un, un->un_retry_bp, un->un_ncmds_in_transport); 16047 } 16048 #endif 16049 16050 /* 16051 * If pkt_reason is CMD_DEV_GONE, fail the command, and update the media 16052 * state if needed. 16053 */ 16054 if (pktp->pkt_reason == CMD_DEV_GONE) { 16055 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16056 "Command failed to complete...Device is gone\n"); 16057 if (un->un_mediastate != DKIO_DEV_GONE) { 16058 un->un_mediastate = DKIO_DEV_GONE; 16059 cv_broadcast(&un->un_state_cv); 16060 } 16061 sd_return_failed_command(un, bp, EIO); 16062 goto exit; 16063 } 16064 16065 if (pktp->pkt_state & STATE_XARQ_DONE) { 16066 SD_TRACE(SD_LOG_COMMON, un, 16067 "sdintr: extra sense data received. pkt=%p\n", pktp); 16068 } 16069 16070 /* 16071 * First see if the pkt has auto-request sense data with it.... 16072 * Look at the packet state first so we don't take a performance 16073 * hit looking at the arq enabled flag unless absolutely necessary. 16074 */ 16075 if ((pktp->pkt_state & STATE_ARQ_DONE) && 16076 (un->un_f_arq_enabled == TRUE)) { 16077 /* 16078 * The HBA did an auto request sense for this command so check 16079 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 16080 * driver command that should not be retried. 16081 */ 16082 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 16083 /* 16084 * Save the relevant sense info into the xp for the 16085 * original cmd. 16086 */ 16087 struct scsi_arq_status *asp; 16088 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 16089 xp->xb_sense_status = 16090 *((uchar_t *)(&(asp->sts_rqpkt_status))); 16091 xp->xb_sense_state = asp->sts_rqpkt_state; 16092 xp->xb_sense_resid = asp->sts_rqpkt_resid; 16093 if (pktp->pkt_state & STATE_XARQ_DONE) { 16094 actual_len = MAX_SENSE_LENGTH - 16095 xp->xb_sense_resid; 16096 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16097 MAX_SENSE_LENGTH); 16098 } else { 16099 if (xp->xb_sense_resid > SENSE_LENGTH) { 16100 actual_len = MAX_SENSE_LENGTH - 16101 xp->xb_sense_resid; 16102 } else { 16103 actual_len = SENSE_LENGTH - 16104 xp->xb_sense_resid; 16105 } 16106 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 16107 if ((((struct uscsi_cmd *) 16108 (xp->xb_pktinfo))->uscsi_rqlen) > 16109 actual_len) { 16110 xp->xb_sense_resid = 16111 (((struct uscsi_cmd *) 16112 (xp->xb_pktinfo))-> 16113 uscsi_rqlen) - actual_len; 16114 } else { 16115 xp->xb_sense_resid = 0; 16116 } 16117 } 16118 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16119 SENSE_LENGTH); 16120 } 16121 16122 /* fail the command */ 16123 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16124 "sdintr: arq done and FLAG_DIAGNOSE set\n"); 16125 sd_return_failed_command(un, bp, EIO); 16126 goto exit; 16127 } 16128 16129 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 16130 /* 16131 * We want to either retry or fail this command, so free 16132 * the DMA resources here. If we retry the command then 16133 * the DMA resources will be reallocated in sd_start_cmds(). 16134 * Note that when PKT_DMA_PARTIAL is used, this reallocation 16135 * causes the *entire* transfer to start over again from the 16136 * beginning of the request, even for PARTIAL chunks that 16137 * have already transferred successfully. 16138 */ 16139 if ((un->un_f_is_fibre == TRUE) && 16140 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 16141 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 16142 scsi_dmafree(pktp); 16143 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 16144 } 16145 #endif 16146 16147 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16148 "sdintr: arq done, sd_handle_auto_request_sense\n"); 16149 16150 sd_handle_auto_request_sense(un, bp, xp, pktp); 16151 goto exit; 16152 } 16153 16154 /* Next see if this is the REQUEST SENSE pkt for the instance */ 16155 if (pktp->pkt_flags & FLAG_SENSING) { 16156 /* This pktp is from the unit's REQUEST_SENSE command */ 16157 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16158 "sdintr: sd_handle_request_sense\n"); 16159 sd_handle_request_sense(un, bp, xp, pktp); 16160 goto exit; 16161 } 16162 16163 /* 16164 * Check to see if the command successfully completed as requested; 16165 * this is the most common case (and also the hot performance path). 16166 * 16167 * Requirements for successful completion are: 16168 * pkt_reason is CMD_CMPLT and packet status is status good. 16169 * In addition: 16170 * - A residual of zero indicates successful completion no matter what 16171 * the command is. 16172 * - If the residual is not zero and the command is not a read or 16173 * write, then it's still defined as successful completion. In other 16174 * words, if the command is a read or write the residual must be 16175 * zero for successful completion. 16176 * - If the residual is not zero and the command is a read or 16177 * write, and it's a USCSICMD, then it's still defined as 16178 * successful completion. 16179 */ 16180 if ((pktp->pkt_reason == CMD_CMPLT) && 16181 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) { 16182 16183 /* 16184 * Since this command is returned with a good status, we 16185 * can reset the count for Sonoma failover. 16186 */ 16187 un->un_sonoma_failure_count = 0; 16188 16189 /* 16190 * Return all USCSI commands on good status 16191 */ 16192 if (pktp->pkt_resid == 0) { 16193 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16194 "sdintr: returning command for resid == 0\n"); 16195 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) && 16196 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) { 16197 SD_UPDATE_B_RESID(bp, pktp); 16198 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16199 "sdintr: returning command for resid != 0\n"); 16200 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 16201 SD_UPDATE_B_RESID(bp, pktp); 16202 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16203 "sdintr: returning uscsi command\n"); 16204 } else { 16205 goto not_successful; 16206 } 16207 sd_return_command(un, bp); 16208 16209 /* 16210 * Decrement counter to indicate that the callback routine 16211 * is done. 16212 */ 16213 un->un_in_callback--; 16214 ASSERT(un->un_in_callback >= 0); 16215 mutex_exit(SD_MUTEX(un)); 16216 16217 return; 16218 } 16219 16220 not_successful: 16221 16222 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 16223 /* 16224 * The following is based upon knowledge of the underlying transport 16225 * and its use of DMA resources. This code should be removed when 16226 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor 16227 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf() 16228 * and sd_start_cmds(). 16229 * 16230 * Free any DMA resources associated with this command if there 16231 * is a chance it could be retried or enqueued for later retry. 16232 * If we keep the DMA binding then mpxio cannot reissue the 16233 * command on another path whenever a path failure occurs. 16234 * 16235 * Note that when PKT_DMA_PARTIAL is used, free/reallocation 16236 * causes the *entire* transfer to start over again from the 16237 * beginning of the request, even for PARTIAL chunks that 16238 * have already transferred successfully. 16239 * 16240 * This is only done for non-uscsi commands (and also skipped for the 16241 * driver's internal RQS command). Also just do this for Fibre Channel 16242 * devices as these are the only ones that support mpxio. 16243 */ 16244 if ((un->un_f_is_fibre == TRUE) && 16245 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 16246 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 16247 scsi_dmafree(pktp); 16248 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 16249 } 16250 #endif 16251 16252 /* 16253 * The command did not successfully complete as requested so check 16254 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 16255 * driver command that should not be retried so just return. If 16256 * FLAG_DIAGNOSE is not set the error will be processed below. 16257 */ 16258 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 16259 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16260 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n"); 16261 /* 16262 * Issue a request sense if a check condition caused the error 16263 * (we handle the auto request sense case above), otherwise 16264 * just fail the command. 16265 */ 16266 if ((pktp->pkt_reason == CMD_CMPLT) && 16267 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { 16268 sd_send_request_sense_command(un, bp, pktp); 16269 } else { 16270 sd_return_failed_command(un, bp, EIO); 16271 } 16272 goto exit; 16273 } 16274 16275 /* 16276 * The command did not successfully complete as requested so process 16277 * the error, retry, and/or attempt recovery. 16278 */ 16279 switch (pktp->pkt_reason) { 16280 case CMD_CMPLT: 16281 switch (SD_GET_PKT_STATUS(pktp)) { 16282 case STATUS_GOOD: 16283 /* 16284 * The command completed successfully with a non-zero 16285 * residual 16286 */ 16287 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16288 "sdintr: STATUS_GOOD \n"); 16289 sd_pkt_status_good(un, bp, xp, pktp); 16290 break; 16291 16292 case STATUS_CHECK: 16293 case STATUS_TERMINATED: 16294 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16295 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n"); 16296 sd_pkt_status_check_condition(un, bp, xp, pktp); 16297 break; 16298 16299 case STATUS_BUSY: 16300 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16301 "sdintr: STATUS_BUSY\n"); 16302 sd_pkt_status_busy(un, bp, xp, pktp); 16303 break; 16304 16305 case STATUS_RESERVATION_CONFLICT: 16306 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16307 "sdintr: STATUS_RESERVATION_CONFLICT\n"); 16308 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 16309 break; 16310 16311 case STATUS_QFULL: 16312 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16313 "sdintr: STATUS_QFULL\n"); 16314 sd_pkt_status_qfull(un, bp, xp, pktp); 16315 break; 16316 16317 case STATUS_MET: 16318 case STATUS_INTERMEDIATE: 16319 case STATUS_SCSI2: 16320 case STATUS_INTERMEDIATE_MET: 16321 case STATUS_ACA_ACTIVE: 16322 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16323 "Unexpected SCSI status received: 0x%x\n", 16324 SD_GET_PKT_STATUS(pktp)); 16325 /* 16326 * Mark the ssc_flags for detecting invalid status 16327 * code. 16328 */ 16329 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS, 16330 "stat-code"); 16331 sd_return_failed_command(un, bp, EIO); 16332 break; 16333 16334 default: 16335 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16336 "Invalid SCSI status received: 0x%x\n", 16337 SD_GET_PKT_STATUS(pktp)); 16338 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS, 16339 "stat-code"); 16340 sd_return_failed_command(un, bp, EIO); 16341 break; 16342 16343 } 16344 break; 16345 16346 case CMD_INCOMPLETE: 16347 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16348 "sdintr: CMD_INCOMPLETE\n"); 16349 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp); 16350 break; 16351 case CMD_TRAN_ERR: 16352 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16353 "sdintr: CMD_TRAN_ERR\n"); 16354 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp); 16355 break; 16356 case CMD_RESET: 16357 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16358 "sdintr: CMD_RESET \n"); 16359 sd_pkt_reason_cmd_reset(un, bp, xp, pktp); 16360 break; 16361 case CMD_ABORTED: 16362 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16363 "sdintr: CMD_ABORTED \n"); 16364 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp); 16365 break; 16366 case CMD_TIMEOUT: 16367 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16368 "sdintr: CMD_TIMEOUT\n"); 16369 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp); 16370 break; 16371 case CMD_UNX_BUS_FREE: 16372 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16373 "sdintr: CMD_UNX_BUS_FREE \n"); 16374 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp); 16375 break; 16376 case CMD_TAG_REJECT: 16377 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16378 "sdintr: CMD_TAG_REJECT\n"); 16379 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp); 16380 break; 16381 default: 16382 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16383 "sdintr: default\n"); 16384 /* 16385 * Mark the ssc_flags for detecting invliad pkt_reason. 16386 */ 16387 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_PKT_REASON, 16388 "pkt-reason"); 16389 sd_pkt_reason_default(un, bp, xp, pktp); 16390 break; 16391 } 16392 16393 exit: 16394 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n"); 16395 16396 /* Decrement counter to indicate that the callback routine is done. */ 16397 un->un_in_callback--; 16398 ASSERT(un->un_in_callback >= 0); 16399 16400 /* 16401 * At this point, the pkt has been dispatched, ie, it is either 16402 * being re-tried or has been returned to its caller and should 16403 * not be referenced. 16404 */ 16405 16406 mutex_exit(SD_MUTEX(un)); 16407 } 16408 16409 16410 /* 16411 * Function: sd_print_incomplete_msg 16412 * 16413 * Description: Prints the error message for a CMD_INCOMPLETE error. 16414 * 16415 * Arguments: un - ptr to associated softstate for the device. 16416 * bp - ptr to the buf(9S) for the command. 16417 * arg - message string ptr 16418 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED, 16419 * or SD_NO_RETRY_ISSUED. 16420 * 16421 * Context: May be called under interrupt context 16422 */ 16423 16424 static void 16425 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 16426 { 16427 struct scsi_pkt *pktp; 16428 char *msgp; 16429 char *cmdp = arg; 16430 16431 ASSERT(un != NULL); 16432 ASSERT(mutex_owned(SD_MUTEX(un))); 16433 ASSERT(bp != NULL); 16434 ASSERT(arg != NULL); 16435 pktp = SD_GET_PKTP(bp); 16436 ASSERT(pktp != NULL); 16437 16438 switch (code) { 16439 case SD_DELAYED_RETRY_ISSUED: 16440 case SD_IMMEDIATE_RETRY_ISSUED: 16441 msgp = "retrying"; 16442 break; 16443 case SD_NO_RETRY_ISSUED: 16444 default: 16445 msgp = "giving up"; 16446 break; 16447 } 16448 16449 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 16450 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16451 "incomplete %s- %s\n", cmdp, msgp); 16452 } 16453 } 16454 16455 16456 16457 /* 16458 * Function: sd_pkt_status_good 16459 * 16460 * Description: Processing for a STATUS_GOOD code in pkt_status. 16461 * 16462 * Context: May be called under interrupt context 16463 */ 16464 16465 static void 16466 sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 16467 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16468 { 16469 char *cmdp; 16470 16471 ASSERT(un != NULL); 16472 ASSERT(mutex_owned(SD_MUTEX(un))); 16473 ASSERT(bp != NULL); 16474 ASSERT(xp != NULL); 16475 ASSERT(pktp != NULL); 16476 ASSERT(pktp->pkt_reason == CMD_CMPLT); 16477 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD); 16478 ASSERT(pktp->pkt_resid != 0); 16479 16480 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n"); 16481 16482 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16483 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) { 16484 case SCMD_READ: 16485 cmdp = "read"; 16486 break; 16487 case SCMD_WRITE: 16488 cmdp = "write"; 16489 break; 16490 default: 16491 SD_UPDATE_B_RESID(bp, pktp); 16492 sd_return_command(un, bp); 16493 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 16494 return; 16495 } 16496 16497 /* 16498 * See if we can retry the read/write, preferrably immediately. 16499 * If retries are exhaused, then sd_retry_command() will update 16500 * the b_resid count. 16501 */ 16502 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg, 16503 cmdp, EIO, (clock_t)0, NULL); 16504 16505 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 16506 } 16507 16508 16509 16510 16511 16512 /* 16513 * Function: sd_handle_request_sense 16514 * 16515 * Description: Processing for non-auto Request Sense command. 16516 * 16517 * Arguments: un - ptr to associated softstate 16518 * sense_bp - ptr to buf(9S) for the RQS command 16519 * sense_xp - ptr to the sd_xbuf for the RQS command 16520 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command 16521 * 16522 * Context: May be called under interrupt context 16523 */ 16524 16525 static void 16526 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp, 16527 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp) 16528 { 16529 struct buf *cmd_bp; /* buf for the original command */ 16530 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */ 16531 struct scsi_pkt *cmd_pktp; /* pkt for the original command */ 16532 size_t actual_len; /* actual sense data length */ 16533 16534 ASSERT(un != NULL); 16535 ASSERT(mutex_owned(SD_MUTEX(un))); 16536 ASSERT(sense_bp != NULL); 16537 ASSERT(sense_xp != NULL); 16538 ASSERT(sense_pktp != NULL); 16539 16540 /* 16541 * Note the sense_bp, sense_xp, and sense_pktp here are for the 16542 * RQS command and not the original command. 16543 */ 16544 ASSERT(sense_pktp == un->un_rqs_pktp); 16545 ASSERT(sense_bp == un->un_rqs_bp); 16546 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) == 16547 (FLAG_SENSING | FLAG_HEAD)); 16548 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) & 16549 FLAG_SENSING) == FLAG_SENSING); 16550 16551 /* These are the bp, xp, and pktp for the original command */ 16552 cmd_bp = sense_xp->xb_sense_bp; 16553 cmd_xp = SD_GET_XBUF(cmd_bp); 16554 cmd_pktp = SD_GET_PKTP(cmd_bp); 16555 16556 if (sense_pktp->pkt_reason != CMD_CMPLT) { 16557 /* 16558 * The REQUEST SENSE command failed. Release the REQUEST 16559 * SENSE command for re-use, get back the bp for the original 16560 * command, and attempt to re-try the original command if 16561 * FLAG_DIAGNOSE is not set in the original packet. 16562 */ 16563 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16564 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 16565 cmd_bp = sd_mark_rqs_idle(un, sense_xp); 16566 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD, 16567 NULL, NULL, EIO, (clock_t)0, NULL); 16568 return; 16569 } 16570 } 16571 16572 /* 16573 * Save the relevant sense info into the xp for the original cmd. 16574 * 16575 * Note: if the request sense failed the state info will be zero 16576 * as set in sd_mark_rqs_busy() 16577 */ 16578 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp); 16579 cmd_xp->xb_sense_state = sense_pktp->pkt_state; 16580 actual_len = MAX_SENSE_LENGTH - sense_pktp->pkt_resid; 16581 if ((cmd_xp->xb_pkt_flags & SD_XB_USCSICMD) && 16582 (((struct uscsi_cmd *)cmd_xp->xb_pktinfo)->uscsi_rqlen > 16583 SENSE_LENGTH)) { 16584 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 16585 MAX_SENSE_LENGTH); 16586 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid; 16587 } else { 16588 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 16589 SENSE_LENGTH); 16590 if (actual_len < SENSE_LENGTH) { 16591 cmd_xp->xb_sense_resid = SENSE_LENGTH - actual_len; 16592 } else { 16593 cmd_xp->xb_sense_resid = 0; 16594 } 16595 } 16596 16597 /* 16598 * Free up the RQS command.... 16599 * NOTE: 16600 * Must do this BEFORE calling sd_validate_sense_data! 16601 * sd_validate_sense_data may return the original command in 16602 * which case the pkt will be freed and the flags can no 16603 * longer be touched. 16604 * SD_MUTEX is held through this process until the command 16605 * is dispatched based upon the sense data, so there are 16606 * no race conditions. 16607 */ 16608 (void) sd_mark_rqs_idle(un, sense_xp); 16609 16610 /* 16611 * For a retryable command see if we have valid sense data, if so then 16612 * turn it over to sd_decode_sense() to figure out the right course of 16613 * action. Just fail a non-retryable command. 16614 */ 16615 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 16616 if (sd_validate_sense_data(un, cmd_bp, cmd_xp, actual_len) == 16617 SD_SENSE_DATA_IS_VALID) { 16618 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp); 16619 } 16620 } else { 16621 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB", 16622 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 16623 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data", 16624 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 16625 sd_return_failed_command(un, cmd_bp, EIO); 16626 } 16627 } 16628 16629 16630 16631 16632 /* 16633 * Function: sd_handle_auto_request_sense 16634 * 16635 * Description: Processing for auto-request sense information. 16636 * 16637 * Arguments: un - ptr to associated softstate 16638 * bp - ptr to buf(9S) for the command 16639 * xp - ptr to the sd_xbuf for the command 16640 * pktp - ptr to the scsi_pkt(9S) for the command 16641 * 16642 * Context: May be called under interrupt context 16643 */ 16644 16645 static void 16646 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 16647 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16648 { 16649 struct scsi_arq_status *asp; 16650 size_t actual_len; 16651 16652 ASSERT(un != NULL); 16653 ASSERT(mutex_owned(SD_MUTEX(un))); 16654 ASSERT(bp != NULL); 16655 ASSERT(xp != NULL); 16656 ASSERT(pktp != NULL); 16657 ASSERT(pktp != un->un_rqs_pktp); 16658 ASSERT(bp != un->un_rqs_bp); 16659 16660 /* 16661 * For auto-request sense, we get a scsi_arq_status back from 16662 * the HBA, with the sense data in the sts_sensedata member. 16663 * The pkt_scbp of the packet points to this scsi_arq_status. 16664 */ 16665 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 16666 16667 if (asp->sts_rqpkt_reason != CMD_CMPLT) { 16668 /* 16669 * The auto REQUEST SENSE failed; see if we can re-try 16670 * the original command. 16671 */ 16672 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16673 "auto request sense failed (reason=%s)\n", 16674 scsi_rname(asp->sts_rqpkt_reason)); 16675 16676 sd_reset_target(un, pktp); 16677 16678 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 16679 NULL, NULL, EIO, (clock_t)0, NULL); 16680 return; 16681 } 16682 16683 /* Save the relevant sense info into the xp for the original cmd. */ 16684 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status))); 16685 xp->xb_sense_state = asp->sts_rqpkt_state; 16686 xp->xb_sense_resid = asp->sts_rqpkt_resid; 16687 if (xp->xb_sense_state & STATE_XARQ_DONE) { 16688 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 16689 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16690 MAX_SENSE_LENGTH); 16691 } else { 16692 if (xp->xb_sense_resid > SENSE_LENGTH) { 16693 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 16694 } else { 16695 actual_len = SENSE_LENGTH - xp->xb_sense_resid; 16696 } 16697 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 16698 if ((((struct uscsi_cmd *) 16699 (xp->xb_pktinfo))->uscsi_rqlen) > actual_len) { 16700 xp->xb_sense_resid = (((struct uscsi_cmd *) 16701 (xp->xb_pktinfo))->uscsi_rqlen) - 16702 actual_len; 16703 } else { 16704 xp->xb_sense_resid = 0; 16705 } 16706 } 16707 bcopy(&asp->sts_sensedata, xp->xb_sense_data, SENSE_LENGTH); 16708 } 16709 16710 /* 16711 * See if we have valid sense data, if so then turn it over to 16712 * sd_decode_sense() to figure out the right course of action. 16713 */ 16714 if (sd_validate_sense_data(un, bp, xp, actual_len) == 16715 SD_SENSE_DATA_IS_VALID) { 16716 sd_decode_sense(un, bp, xp, pktp); 16717 } 16718 } 16719 16720 16721 /* 16722 * Function: sd_print_sense_failed_msg 16723 * 16724 * Description: Print log message when RQS has failed. 16725 * 16726 * Arguments: un - ptr to associated softstate 16727 * bp - ptr to buf(9S) for the command 16728 * arg - generic message string ptr 16729 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16730 * or SD_NO_RETRY_ISSUED 16731 * 16732 * Context: May be called from interrupt context 16733 */ 16734 16735 static void 16736 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg, 16737 int code) 16738 { 16739 char *msgp = arg; 16740 16741 ASSERT(un != NULL); 16742 ASSERT(mutex_owned(SD_MUTEX(un))); 16743 ASSERT(bp != NULL); 16744 16745 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) { 16746 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp); 16747 } 16748 } 16749 16750 16751 /* 16752 * Function: sd_validate_sense_data 16753 * 16754 * Description: Check the given sense data for validity. 16755 * If the sense data is not valid, the command will 16756 * be either failed or retried! 16757 * 16758 * Return Code: SD_SENSE_DATA_IS_INVALID 16759 * SD_SENSE_DATA_IS_VALID 16760 * 16761 * Context: May be called from interrupt context 16762 */ 16763 16764 static int 16765 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 16766 size_t actual_len) 16767 { 16768 struct scsi_extended_sense *esp; 16769 struct scsi_pkt *pktp; 16770 char *msgp = NULL; 16771 sd_ssc_t *sscp; 16772 16773 ASSERT(un != NULL); 16774 ASSERT(mutex_owned(SD_MUTEX(un))); 16775 ASSERT(bp != NULL); 16776 ASSERT(bp != un->un_rqs_bp); 16777 ASSERT(xp != NULL); 16778 ASSERT(un->un_fm_private != NULL); 16779 16780 pktp = SD_GET_PKTP(bp); 16781 ASSERT(pktp != NULL); 16782 16783 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc; 16784 ASSERT(sscp != NULL); 16785 16786 /* 16787 * Check the status of the RQS command (auto or manual). 16788 */ 16789 switch (xp->xb_sense_status & STATUS_MASK) { 16790 case STATUS_GOOD: 16791 break; 16792 16793 case STATUS_RESERVATION_CONFLICT: 16794 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 16795 return (SD_SENSE_DATA_IS_INVALID); 16796 16797 case STATUS_BUSY: 16798 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16799 "Busy Status on REQUEST SENSE\n"); 16800 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL, 16801 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); 16802 return (SD_SENSE_DATA_IS_INVALID); 16803 16804 case STATUS_QFULL: 16805 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16806 "QFULL Status on REQUEST SENSE\n"); 16807 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, 16808 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); 16809 return (SD_SENSE_DATA_IS_INVALID); 16810 16811 case STATUS_CHECK: 16812 case STATUS_TERMINATED: 16813 msgp = "Check Condition on REQUEST SENSE\n"; 16814 goto sense_failed; 16815 16816 default: 16817 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n"; 16818 goto sense_failed; 16819 } 16820 16821 /* 16822 * See if we got the minimum required amount of sense data. 16823 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes 16824 * or less. 16825 */ 16826 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) || 16827 (actual_len == 0)) { 16828 msgp = "Request Sense couldn't get sense data\n"; 16829 goto sense_failed; 16830 } 16831 16832 if (actual_len < SUN_MIN_SENSE_LENGTH) { 16833 msgp = "Not enough sense information\n"; 16834 /* Mark the ssc_flags for detecting invalid sense data */ 16835 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 16836 "sense-data"); 16837 goto sense_failed; 16838 } 16839 16840 /* 16841 * We require the extended sense data 16842 */ 16843 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 16844 if (esp->es_class != CLASS_EXTENDED_SENSE) { 16845 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 16846 static char tmp[8]; 16847 static char buf[148]; 16848 char *p = (char *)(xp->xb_sense_data); 16849 int i; 16850 16851 mutex_enter(&sd_sense_mutex); 16852 (void) strcpy(buf, "undecodable sense information:"); 16853 for (i = 0; i < actual_len; i++) { 16854 (void) sprintf(tmp, " 0x%x", *(p++)&0xff); 16855 (void) strcpy(&buf[strlen(buf)], tmp); 16856 } 16857 i = strlen(buf); 16858 (void) strcpy(&buf[i], "-(assumed fatal)\n"); 16859 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, buf); 16860 mutex_exit(&sd_sense_mutex); 16861 } 16862 16863 /* Mark the ssc_flags for detecting invalid sense data */ 16864 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 16865 "sense-data"); 16866 16867 /* Note: Legacy behavior, fail the command with no retry */ 16868 sd_return_failed_command(un, bp, EIO); 16869 return (SD_SENSE_DATA_IS_INVALID); 16870 } 16871 16872 /* 16873 * Check that es_code is valid (es_class concatenated with es_code 16874 * make up the "response code" field. es_class will always be 7, so 16875 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the 16876 * format. 16877 */ 16878 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) && 16879 (esp->es_code != CODE_FMT_FIXED_DEFERRED) && 16880 (esp->es_code != CODE_FMT_DESCR_CURRENT) && 16881 (esp->es_code != CODE_FMT_DESCR_DEFERRED) && 16882 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) { 16883 /* Mark the ssc_flags for detecting invalid sense data */ 16884 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 16885 "sense-data"); 16886 goto sense_failed; 16887 } 16888 16889 return (SD_SENSE_DATA_IS_VALID); 16890 16891 sense_failed: 16892 /* 16893 * If the request sense failed (for whatever reason), attempt 16894 * to retry the original command. 16895 */ 16896 #if defined(__i386) || defined(__amd64) 16897 /* 16898 * SD_RETRY_DELAY is conditionally compile (#if fibre) in 16899 * sddef.h for Sparc platform, and x86 uses 1 binary 16900 * for both SCSI/FC. 16901 * The SD_RETRY_DELAY value need to be adjusted here 16902 * when SD_RETRY_DELAY change in sddef.h 16903 */ 16904 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 16905 sd_print_sense_failed_msg, msgp, EIO, 16906 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL); 16907 #else 16908 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 16909 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL); 16910 #endif 16911 16912 return (SD_SENSE_DATA_IS_INVALID); 16913 } 16914 16915 /* 16916 * Function: sd_decode_sense 16917 * 16918 * Description: Take recovery action(s) when SCSI Sense Data is received. 16919 * 16920 * Context: Interrupt context. 16921 */ 16922 16923 static void 16924 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 16925 struct scsi_pkt *pktp) 16926 { 16927 uint8_t sense_key; 16928 16929 ASSERT(un != NULL); 16930 ASSERT(mutex_owned(SD_MUTEX(un))); 16931 ASSERT(bp != NULL); 16932 ASSERT(bp != un->un_rqs_bp); 16933 ASSERT(xp != NULL); 16934 ASSERT(pktp != NULL); 16935 16936 sense_key = scsi_sense_key(xp->xb_sense_data); 16937 16938 switch (sense_key) { 16939 case KEY_NO_SENSE: 16940 sd_sense_key_no_sense(un, bp, xp, pktp); 16941 break; 16942 case KEY_RECOVERABLE_ERROR: 16943 sd_sense_key_recoverable_error(un, xp->xb_sense_data, 16944 bp, xp, pktp); 16945 break; 16946 case KEY_NOT_READY: 16947 sd_sense_key_not_ready(un, xp->xb_sense_data, 16948 bp, xp, pktp); 16949 break; 16950 case KEY_MEDIUM_ERROR: 16951 case KEY_HARDWARE_ERROR: 16952 sd_sense_key_medium_or_hardware_error(un, 16953 xp->xb_sense_data, bp, xp, pktp); 16954 break; 16955 case KEY_ILLEGAL_REQUEST: 16956 sd_sense_key_illegal_request(un, bp, xp, pktp); 16957 break; 16958 case KEY_UNIT_ATTENTION: 16959 sd_sense_key_unit_attention(un, xp->xb_sense_data, 16960 bp, xp, pktp); 16961 break; 16962 case KEY_WRITE_PROTECT: 16963 case KEY_VOLUME_OVERFLOW: 16964 case KEY_MISCOMPARE: 16965 sd_sense_key_fail_command(un, bp, xp, pktp); 16966 break; 16967 case KEY_BLANK_CHECK: 16968 sd_sense_key_blank_check(un, bp, xp, pktp); 16969 break; 16970 case KEY_ABORTED_COMMAND: 16971 sd_sense_key_aborted_command(un, bp, xp, pktp); 16972 break; 16973 case KEY_VENDOR_UNIQUE: 16974 case KEY_COPY_ABORTED: 16975 case KEY_EQUAL: 16976 case KEY_RESERVED: 16977 default: 16978 sd_sense_key_default(un, xp->xb_sense_data, 16979 bp, xp, pktp); 16980 break; 16981 } 16982 } 16983 16984 16985 /* 16986 * Function: sd_dump_memory 16987 * 16988 * Description: Debug logging routine to print the contents of a user provided 16989 * buffer. The output of the buffer is broken up into 256 byte 16990 * segments due to a size constraint of the scsi_log. 16991 * implementation. 16992 * 16993 * Arguments: un - ptr to softstate 16994 * comp - component mask 16995 * title - "title" string to preceed data when printed 16996 * data - ptr to data block to be printed 16997 * len - size of data block to be printed 16998 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c) 16999 * 17000 * Context: May be called from interrupt context 17001 */ 17002 17003 #define SD_DUMP_MEMORY_BUF_SIZE 256 17004 17005 static char *sd_dump_format_string[] = { 17006 " 0x%02x", 17007 " %c" 17008 }; 17009 17010 static void 17011 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data, 17012 int len, int fmt) 17013 { 17014 int i, j; 17015 int avail_count; 17016 int start_offset; 17017 int end_offset; 17018 size_t entry_len; 17019 char *bufp; 17020 char *local_buf; 17021 char *format_string; 17022 17023 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR)); 17024 17025 /* 17026 * In the debug version of the driver, this function is called from a 17027 * number of places which are NOPs in the release driver. 17028 * The debug driver therefore has additional methods of filtering 17029 * debug output. 17030 */ 17031 #ifdef SDDEBUG 17032 /* 17033 * In the debug version of the driver we can reduce the amount of debug 17034 * messages by setting sd_error_level to something other than 17035 * SCSI_ERR_ALL and clearing bits in sd_level_mask and 17036 * sd_component_mask. 17037 */ 17038 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) || 17039 (sd_error_level != SCSI_ERR_ALL)) { 17040 return; 17041 } 17042 if (((sd_component_mask & comp) == 0) || 17043 (sd_error_level != SCSI_ERR_ALL)) { 17044 return; 17045 } 17046 #else 17047 if (sd_error_level != SCSI_ERR_ALL) { 17048 return; 17049 } 17050 #endif 17051 17052 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP); 17053 bufp = local_buf; 17054 /* 17055 * Available length is the length of local_buf[], minus the 17056 * length of the title string, minus one for the ":", minus 17057 * one for the newline, minus one for the NULL terminator. 17058 * This gives the #bytes available for holding the printed 17059 * values from the given data buffer. 17060 */ 17061 if (fmt == SD_LOG_HEX) { 17062 format_string = sd_dump_format_string[0]; 17063 } else /* SD_LOG_CHAR */ { 17064 format_string = sd_dump_format_string[1]; 17065 } 17066 /* 17067 * Available count is the number of elements from the given 17068 * data buffer that we can fit into the available length. 17069 * This is based upon the size of the format string used. 17070 * Make one entry and find it's size. 17071 */ 17072 (void) sprintf(bufp, format_string, data[0]); 17073 entry_len = strlen(bufp); 17074 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len; 17075 17076 j = 0; 17077 while (j < len) { 17078 bufp = local_buf; 17079 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE); 17080 start_offset = j; 17081 17082 end_offset = start_offset + avail_count; 17083 17084 (void) sprintf(bufp, "%s:", title); 17085 bufp += strlen(bufp); 17086 for (i = start_offset; ((i < end_offset) && (j < len)); 17087 i++, j++) { 17088 (void) sprintf(bufp, format_string, data[i]); 17089 bufp += entry_len; 17090 } 17091 (void) sprintf(bufp, "\n"); 17092 17093 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf); 17094 } 17095 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE); 17096 } 17097 17098 /* 17099 * Function: sd_print_sense_msg 17100 * 17101 * Description: Log a message based upon the given sense data. 17102 * 17103 * Arguments: un - ptr to associated softstate 17104 * bp - ptr to buf(9S) for the command 17105 * arg - ptr to associate sd_sense_info struct 17106 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17107 * or SD_NO_RETRY_ISSUED 17108 * 17109 * Context: May be called from interrupt context 17110 */ 17111 17112 static void 17113 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 17114 { 17115 struct sd_xbuf *xp; 17116 struct scsi_pkt *pktp; 17117 uint8_t *sensep; 17118 daddr_t request_blkno; 17119 diskaddr_t err_blkno; 17120 int severity; 17121 int pfa_flag; 17122 extern struct scsi_key_strings scsi_cmds[]; 17123 17124 ASSERT(un != NULL); 17125 ASSERT(mutex_owned(SD_MUTEX(un))); 17126 ASSERT(bp != NULL); 17127 xp = SD_GET_XBUF(bp); 17128 ASSERT(xp != NULL); 17129 pktp = SD_GET_PKTP(bp); 17130 ASSERT(pktp != NULL); 17131 ASSERT(arg != NULL); 17132 17133 severity = ((struct sd_sense_info *)(arg))->ssi_severity; 17134 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag; 17135 17136 if ((code == SD_DELAYED_RETRY_ISSUED) || 17137 (code == SD_IMMEDIATE_RETRY_ISSUED)) { 17138 severity = SCSI_ERR_RETRYABLE; 17139 } 17140 17141 /* Use absolute block number for the request block number */ 17142 request_blkno = xp->xb_blkno; 17143 17144 /* 17145 * Now try to get the error block number from the sense data 17146 */ 17147 sensep = xp->xb_sense_data; 17148 17149 if (scsi_sense_info_uint64(sensep, SENSE_LENGTH, 17150 (uint64_t *)&err_blkno)) { 17151 /* 17152 * We retrieved the error block number from the information 17153 * portion of the sense data. 17154 * 17155 * For USCSI commands we are better off using the error 17156 * block no. as the requested block no. (This is the best 17157 * we can estimate.) 17158 */ 17159 if ((SD_IS_BUFIO(xp) == FALSE) && 17160 ((pktp->pkt_flags & FLAG_SILENT) == 0)) { 17161 request_blkno = err_blkno; 17162 } 17163 } else { 17164 /* 17165 * Without the es_valid bit set (for fixed format) or an 17166 * information descriptor (for descriptor format) we cannot 17167 * be certain of the error blkno, so just use the 17168 * request_blkno. 17169 */ 17170 err_blkno = (diskaddr_t)request_blkno; 17171 } 17172 17173 /* 17174 * The following will log the buffer contents for the release driver 17175 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error 17176 * level is set to verbose. 17177 */ 17178 sd_dump_memory(un, SD_LOG_IO, "Failed CDB", 17179 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 17180 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 17181 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX); 17182 17183 if (pfa_flag == FALSE) { 17184 /* This is normally only set for USCSI */ 17185 if ((pktp->pkt_flags & FLAG_SILENT) != 0) { 17186 return; 17187 } 17188 17189 if ((SD_IS_BUFIO(xp) == TRUE) && 17190 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) && 17191 (severity < sd_error_level))) { 17192 return; 17193 } 17194 } 17195 /* 17196 * Check for Sonoma Failover and keep a count of how many failed I/O's 17197 */ 17198 if ((SD_IS_LSI(un)) && 17199 (scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) && 17200 (scsi_sense_asc(sensep) == 0x94) && 17201 (scsi_sense_ascq(sensep) == 0x01)) { 17202 un->un_sonoma_failure_count++; 17203 if (un->un_sonoma_failure_count > 1) { 17204 return; 17205 } 17206 } 17207 17208 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity, 17209 request_blkno, err_blkno, scsi_cmds, 17210 (struct scsi_extended_sense *)sensep, 17211 un->un_additional_codes, NULL); 17212 } 17213 17214 /* 17215 * Function: sd_sense_key_no_sense 17216 * 17217 * Description: Recovery action when sense data was not received. 17218 * 17219 * Context: May be called from interrupt context 17220 */ 17221 17222 static void 17223 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 17224 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17225 { 17226 struct sd_sense_info si; 17227 17228 ASSERT(un != NULL); 17229 ASSERT(mutex_owned(SD_MUTEX(un))); 17230 ASSERT(bp != NULL); 17231 ASSERT(xp != NULL); 17232 ASSERT(pktp != NULL); 17233 17234 si.ssi_severity = SCSI_ERR_FATAL; 17235 si.ssi_pfa_flag = FALSE; 17236 17237 SD_UPDATE_ERRSTATS(un, sd_softerrs); 17238 17239 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17240 &si, EIO, (clock_t)0, NULL); 17241 } 17242 17243 17244 /* 17245 * Function: sd_sense_key_recoverable_error 17246 * 17247 * Description: Recovery actions for a SCSI "Recovered Error" sense key. 17248 * 17249 * Context: May be called from interrupt context 17250 */ 17251 17252 static void 17253 sd_sense_key_recoverable_error(struct sd_lun *un, 17254 uint8_t *sense_datap, 17255 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17256 { 17257 struct sd_sense_info si; 17258 uint8_t asc = scsi_sense_asc(sense_datap); 17259 17260 ASSERT(un != NULL); 17261 ASSERT(mutex_owned(SD_MUTEX(un))); 17262 ASSERT(bp != NULL); 17263 ASSERT(xp != NULL); 17264 ASSERT(pktp != NULL); 17265 17266 /* 17267 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED 17268 */ 17269 if ((asc == 0x5D) && (sd_report_pfa != 0)) { 17270 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 17271 si.ssi_severity = SCSI_ERR_INFO; 17272 si.ssi_pfa_flag = TRUE; 17273 } else { 17274 SD_UPDATE_ERRSTATS(un, sd_softerrs); 17275 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err); 17276 si.ssi_severity = SCSI_ERR_RECOVERED; 17277 si.ssi_pfa_flag = FALSE; 17278 } 17279 17280 if (pktp->pkt_resid == 0) { 17281 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17282 sd_return_command(un, bp); 17283 return; 17284 } 17285 17286 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17287 &si, EIO, (clock_t)0, NULL); 17288 } 17289 17290 17291 17292 17293 /* 17294 * Function: sd_sense_key_not_ready 17295 * 17296 * Description: Recovery actions for a SCSI "Not Ready" sense key. 17297 * 17298 * Context: May be called from interrupt context 17299 */ 17300 17301 static void 17302 sd_sense_key_not_ready(struct sd_lun *un, 17303 uint8_t *sense_datap, 17304 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17305 { 17306 struct sd_sense_info si; 17307 uint8_t asc = scsi_sense_asc(sense_datap); 17308 uint8_t ascq = scsi_sense_ascq(sense_datap); 17309 17310 ASSERT(un != NULL); 17311 ASSERT(mutex_owned(SD_MUTEX(un))); 17312 ASSERT(bp != NULL); 17313 ASSERT(xp != NULL); 17314 ASSERT(pktp != NULL); 17315 17316 si.ssi_severity = SCSI_ERR_FATAL; 17317 si.ssi_pfa_flag = FALSE; 17318 17319 /* 17320 * Update error stats after first NOT READY error. Disks may have 17321 * been powered down and may need to be restarted. For CDROMs, 17322 * report NOT READY errors only if media is present. 17323 */ 17324 if ((ISCD(un) && (asc == 0x3A)) || 17325 (xp->xb_nr_retry_count > 0)) { 17326 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17327 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err); 17328 } 17329 17330 /* 17331 * Just fail if the "not ready" retry limit has been reached. 17332 */ 17333 if (xp->xb_nr_retry_count >= un->un_notready_retry_count) { 17334 /* Special check for error message printing for removables. */ 17335 if (un->un_f_has_removable_media && (asc == 0x04) && 17336 (ascq >= 0x04)) { 17337 si.ssi_severity = SCSI_ERR_ALL; 17338 } 17339 goto fail_command; 17340 } 17341 17342 /* 17343 * Check the ASC and ASCQ in the sense data as needed, to determine 17344 * what to do. 17345 */ 17346 switch (asc) { 17347 case 0x04: /* LOGICAL UNIT NOT READY */ 17348 /* 17349 * disk drives that don't spin up result in a very long delay 17350 * in format without warning messages. We will log a message 17351 * if the error level is set to verbose. 17352 */ 17353 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17354 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17355 "logical unit not ready, resetting disk\n"); 17356 } 17357 17358 /* 17359 * There are different requirements for CDROMs and disks for 17360 * the number of retries. If a CD-ROM is giving this, it is 17361 * probably reading TOC and is in the process of getting 17362 * ready, so we should keep on trying for a long time to make 17363 * sure that all types of media are taken in account (for 17364 * some media the drive takes a long time to read TOC). For 17365 * disks we do not want to retry this too many times as this 17366 * can cause a long hang in format when the drive refuses to 17367 * spin up (a very common failure). 17368 */ 17369 switch (ascq) { 17370 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */ 17371 /* 17372 * Disk drives frequently refuse to spin up which 17373 * results in a very long hang in format without 17374 * warning messages. 17375 * 17376 * Note: This code preserves the legacy behavior of 17377 * comparing xb_nr_retry_count against zero for fibre 17378 * channel targets instead of comparing against the 17379 * un_reset_retry_count value. The reason for this 17380 * discrepancy has been so utterly lost beneath the 17381 * Sands of Time that even Indiana Jones could not 17382 * find it. 17383 */ 17384 if (un->un_f_is_fibre == TRUE) { 17385 if (((sd_level_mask & SD_LOGMASK_DIAG) || 17386 (xp->xb_nr_retry_count > 0)) && 17387 (un->un_startstop_timeid == NULL)) { 17388 scsi_log(SD_DEVINFO(un), sd_label, 17389 CE_WARN, "logical unit not ready, " 17390 "resetting disk\n"); 17391 sd_reset_target(un, pktp); 17392 } 17393 } else { 17394 if (((sd_level_mask & SD_LOGMASK_DIAG) || 17395 (xp->xb_nr_retry_count > 17396 un->un_reset_retry_count)) && 17397 (un->un_startstop_timeid == NULL)) { 17398 scsi_log(SD_DEVINFO(un), sd_label, 17399 CE_WARN, "logical unit not ready, " 17400 "resetting disk\n"); 17401 sd_reset_target(un, pktp); 17402 } 17403 } 17404 break; 17405 17406 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */ 17407 /* 17408 * If the target is in the process of becoming 17409 * ready, just proceed with the retry. This can 17410 * happen with CD-ROMs that take a long time to 17411 * read TOC after a power cycle or reset. 17412 */ 17413 goto do_retry; 17414 17415 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */ 17416 break; 17417 17418 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */ 17419 /* 17420 * Retries cannot help here so just fail right away. 17421 */ 17422 goto fail_command; 17423 17424 case 0x88: 17425 /* 17426 * Vendor-unique code for T3/T4: it indicates a 17427 * path problem in a mutipathed config, but as far as 17428 * the target driver is concerned it equates to a fatal 17429 * error, so we should just fail the command right away 17430 * (without printing anything to the console). If this 17431 * is not a T3/T4, fall thru to the default recovery 17432 * action. 17433 * T3/T4 is FC only, don't need to check is_fibre 17434 */ 17435 if (SD_IS_T3(un) || SD_IS_T4(un)) { 17436 sd_return_failed_command(un, bp, EIO); 17437 return; 17438 } 17439 /* FALLTHRU */ 17440 17441 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */ 17442 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */ 17443 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */ 17444 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */ 17445 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */ 17446 default: /* Possible future codes in SCSI spec? */ 17447 /* 17448 * For removable-media devices, do not retry if 17449 * ASCQ > 2 as these result mostly from USCSI commands 17450 * on MMC devices issued to check status of an 17451 * operation initiated in immediate mode. Also for 17452 * ASCQ >= 4 do not print console messages as these 17453 * mainly represent a user-initiated operation 17454 * instead of a system failure. 17455 */ 17456 if (un->un_f_has_removable_media) { 17457 si.ssi_severity = SCSI_ERR_ALL; 17458 goto fail_command; 17459 } 17460 break; 17461 } 17462 17463 /* 17464 * As part of our recovery attempt for the NOT READY 17465 * condition, we issue a START STOP UNIT command. However 17466 * we want to wait for a short delay before attempting this 17467 * as there may still be more commands coming back from the 17468 * target with the check condition. To do this we use 17469 * timeout(9F) to call sd_start_stop_unit_callback() after 17470 * the delay interval expires. (sd_start_stop_unit_callback() 17471 * dispatches sd_start_stop_unit_task(), which will issue 17472 * the actual START STOP UNIT command. The delay interval 17473 * is one-half of the delay that we will use to retry the 17474 * command that generated the NOT READY condition. 17475 * 17476 * Note that we could just dispatch sd_start_stop_unit_task() 17477 * from here and allow it to sleep for the delay interval, 17478 * but then we would be tying up the taskq thread 17479 * uncesessarily for the duration of the delay. 17480 * 17481 * Do not issue the START STOP UNIT if the current command 17482 * is already a START STOP UNIT. 17483 */ 17484 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) { 17485 break; 17486 } 17487 17488 /* 17489 * Do not schedule the timeout if one is already pending. 17490 */ 17491 if (un->un_startstop_timeid != NULL) { 17492 SD_INFO(SD_LOG_ERROR, un, 17493 "sd_sense_key_not_ready: restart already issued to" 17494 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)), 17495 ddi_get_instance(SD_DEVINFO(un))); 17496 break; 17497 } 17498 17499 /* 17500 * Schedule the START STOP UNIT command, then queue the command 17501 * for a retry. 17502 * 17503 * Note: A timeout is not scheduled for this retry because we 17504 * want the retry to be serial with the START_STOP_UNIT. The 17505 * retry will be started when the START_STOP_UNIT is completed 17506 * in sd_start_stop_unit_task. 17507 */ 17508 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback, 17509 un, un->un_busy_timeout / 2); 17510 xp->xb_nr_retry_count++; 17511 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter); 17512 return; 17513 17514 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */ 17515 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17516 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17517 "unit does not respond to selection\n"); 17518 } 17519 break; 17520 17521 case 0x3A: /* MEDIUM NOT PRESENT */ 17522 if (sd_error_level >= SCSI_ERR_FATAL) { 17523 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17524 "Caddy not inserted in drive\n"); 17525 } 17526 17527 sr_ejected(un); 17528 un->un_mediastate = DKIO_EJECTED; 17529 /* The state has changed, inform the media watch routines */ 17530 cv_broadcast(&un->un_state_cv); 17531 /* Just fail if no media is present in the drive. */ 17532 goto fail_command; 17533 17534 default: 17535 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17536 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 17537 "Unit not Ready. Additional sense code 0x%x\n", 17538 asc); 17539 } 17540 break; 17541 } 17542 17543 do_retry: 17544 17545 /* 17546 * Retry the command, as some targets may report NOT READY for 17547 * several seconds after being reset. 17548 */ 17549 xp->xb_nr_retry_count++; 17550 si.ssi_severity = SCSI_ERR_RETRYABLE; 17551 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 17552 &si, EIO, un->un_busy_timeout, NULL); 17553 17554 return; 17555 17556 fail_command: 17557 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17558 sd_return_failed_command(un, bp, EIO); 17559 } 17560 17561 17562 17563 /* 17564 * Function: sd_sense_key_medium_or_hardware_error 17565 * 17566 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error" 17567 * sense key. 17568 * 17569 * Context: May be called from interrupt context 17570 */ 17571 17572 static void 17573 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 17574 uint8_t *sense_datap, 17575 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17576 { 17577 struct sd_sense_info si; 17578 uint8_t sense_key = scsi_sense_key(sense_datap); 17579 uint8_t asc = scsi_sense_asc(sense_datap); 17580 17581 ASSERT(un != NULL); 17582 ASSERT(mutex_owned(SD_MUTEX(un))); 17583 ASSERT(bp != NULL); 17584 ASSERT(xp != NULL); 17585 ASSERT(pktp != NULL); 17586 17587 si.ssi_severity = SCSI_ERR_FATAL; 17588 si.ssi_pfa_flag = FALSE; 17589 17590 if (sense_key == KEY_MEDIUM_ERROR) { 17591 SD_UPDATE_ERRSTATS(un, sd_rq_media_err); 17592 } 17593 17594 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17595 17596 if ((un->un_reset_retry_count != 0) && 17597 (xp->xb_retry_count == un->un_reset_retry_count)) { 17598 mutex_exit(SD_MUTEX(un)); 17599 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */ 17600 if (un->un_f_allow_bus_device_reset == TRUE) { 17601 17602 boolean_t try_resetting_target = B_TRUE; 17603 17604 /* 17605 * We need to be able to handle specific ASC when we are 17606 * handling a KEY_HARDWARE_ERROR. In particular 17607 * taking the default action of resetting the target may 17608 * not be the appropriate way to attempt recovery. 17609 * Resetting a target because of a single LUN failure 17610 * victimizes all LUNs on that target. 17611 * 17612 * This is true for the LSI arrays, if an LSI 17613 * array controller returns an ASC of 0x84 (LUN Dead) we 17614 * should trust it. 17615 */ 17616 17617 if (sense_key == KEY_HARDWARE_ERROR) { 17618 switch (asc) { 17619 case 0x84: 17620 if (SD_IS_LSI(un)) { 17621 try_resetting_target = B_FALSE; 17622 } 17623 break; 17624 default: 17625 break; 17626 } 17627 } 17628 17629 if (try_resetting_target == B_TRUE) { 17630 int reset_retval = 0; 17631 if (un->un_f_lun_reset_enabled == TRUE) { 17632 SD_TRACE(SD_LOG_IO_CORE, un, 17633 "sd_sense_key_medium_or_hardware_" 17634 "error: issuing RESET_LUN\n"); 17635 reset_retval = 17636 scsi_reset(SD_ADDRESS(un), 17637 RESET_LUN); 17638 } 17639 if (reset_retval == 0) { 17640 SD_TRACE(SD_LOG_IO_CORE, un, 17641 "sd_sense_key_medium_or_hardware_" 17642 "error: issuing RESET_TARGET\n"); 17643 (void) scsi_reset(SD_ADDRESS(un), 17644 RESET_TARGET); 17645 } 17646 } 17647 } 17648 mutex_enter(SD_MUTEX(un)); 17649 } 17650 17651 /* 17652 * This really ought to be a fatal error, but we will retry anyway 17653 * as some drives report this as a spurious error. 17654 */ 17655 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17656 &si, EIO, (clock_t)0, NULL); 17657 } 17658 17659 17660 17661 /* 17662 * Function: sd_sense_key_illegal_request 17663 * 17664 * Description: Recovery actions for a SCSI "Illegal Request" sense key. 17665 * 17666 * Context: May be called from interrupt context 17667 */ 17668 17669 static void 17670 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 17671 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17672 { 17673 struct sd_sense_info si; 17674 17675 ASSERT(un != NULL); 17676 ASSERT(mutex_owned(SD_MUTEX(un))); 17677 ASSERT(bp != NULL); 17678 ASSERT(xp != NULL); 17679 ASSERT(pktp != NULL); 17680 17681 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err); 17682 17683 si.ssi_severity = SCSI_ERR_INFO; 17684 si.ssi_pfa_flag = FALSE; 17685 17686 /* Pointless to retry if the target thinks it's an illegal request */ 17687 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17688 sd_return_failed_command(un, bp, EIO); 17689 } 17690 17691 17692 17693 17694 /* 17695 * Function: sd_sense_key_unit_attention 17696 * 17697 * Description: Recovery actions for a SCSI "Unit Attention" sense key. 17698 * 17699 * Context: May be called from interrupt context 17700 */ 17701 17702 static void 17703 sd_sense_key_unit_attention(struct sd_lun *un, 17704 uint8_t *sense_datap, 17705 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17706 { 17707 /* 17708 * For UNIT ATTENTION we allow retries for one minute. Devices 17709 * like Sonoma can return UNIT ATTENTION close to a minute 17710 * under certain conditions. 17711 */ 17712 int retry_check_flag = SD_RETRIES_UA; 17713 boolean_t kstat_updated = B_FALSE; 17714 struct sd_sense_info si; 17715 uint8_t asc = scsi_sense_asc(sense_datap); 17716 uint8_t ascq = scsi_sense_ascq(sense_datap); 17717 17718 ASSERT(un != NULL); 17719 ASSERT(mutex_owned(SD_MUTEX(un))); 17720 ASSERT(bp != NULL); 17721 ASSERT(xp != NULL); 17722 ASSERT(pktp != NULL); 17723 17724 si.ssi_severity = SCSI_ERR_INFO; 17725 si.ssi_pfa_flag = FALSE; 17726 17727 17728 switch (asc) { 17729 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */ 17730 if (sd_report_pfa != 0) { 17731 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 17732 si.ssi_pfa_flag = TRUE; 17733 retry_check_flag = SD_RETRIES_STANDARD; 17734 goto do_retry; 17735 } 17736 17737 break; 17738 17739 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */ 17740 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 17741 un->un_resvd_status |= 17742 (SD_LOST_RESERVE | SD_WANT_RESERVE); 17743 } 17744 #ifdef _LP64 17745 if (un->un_blockcount + 1 > SD_GROUP1_MAX_ADDRESS) { 17746 if (taskq_dispatch(sd_tq, sd_reenable_dsense_task, 17747 un, KM_NOSLEEP) == 0) { 17748 /* 17749 * If we can't dispatch the task we'll just 17750 * live without descriptor sense. We can 17751 * try again on the next "unit attention" 17752 */ 17753 SD_ERROR(SD_LOG_ERROR, un, 17754 "sd_sense_key_unit_attention: " 17755 "Could not dispatch " 17756 "sd_reenable_dsense_task\n"); 17757 } 17758 } 17759 #endif /* _LP64 */ 17760 /* FALLTHRU */ 17761 17762 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */ 17763 if (!un->un_f_has_removable_media) { 17764 break; 17765 } 17766 17767 /* 17768 * When we get a unit attention from a removable-media device, 17769 * it may be in a state that will take a long time to recover 17770 * (e.g., from a reset). Since we are executing in interrupt 17771 * context here, we cannot wait around for the device to come 17772 * back. So hand this command off to sd_media_change_task() 17773 * for deferred processing under taskq thread context. (Note 17774 * that the command still may be failed if a problem is 17775 * encountered at a later time.) 17776 */ 17777 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp, 17778 KM_NOSLEEP) == 0) { 17779 /* 17780 * Cannot dispatch the request so fail the command. 17781 */ 17782 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17783 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 17784 si.ssi_severity = SCSI_ERR_FATAL; 17785 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17786 sd_return_failed_command(un, bp, EIO); 17787 } 17788 17789 /* 17790 * If failed to dispatch sd_media_change_task(), we already 17791 * updated kstat. If succeed to dispatch sd_media_change_task(), 17792 * we should update kstat later if it encounters an error. So, 17793 * we update kstat_updated flag here. 17794 */ 17795 kstat_updated = B_TRUE; 17796 17797 /* 17798 * Either the command has been successfully dispatched to a 17799 * task Q for retrying, or the dispatch failed. In either case 17800 * do NOT retry again by calling sd_retry_command. This sets up 17801 * two retries of the same command and when one completes and 17802 * frees the resources the other will access freed memory, 17803 * a bad thing. 17804 */ 17805 return; 17806 17807 default: 17808 break; 17809 } 17810 17811 /* 17812 * ASC ASCQ 17813 * 2A 09 Capacity data has changed 17814 * 2A 01 Mode parameters changed 17815 * 3F 0E Reported luns data has changed 17816 * Arrays that support logical unit expansion should report 17817 * capacity changes(2Ah/09). Mode parameters changed and 17818 * reported luns data has changed are the approximation. 17819 */ 17820 if (((asc == 0x2a) && (ascq == 0x09)) || 17821 ((asc == 0x2a) && (ascq == 0x01)) || 17822 ((asc == 0x3f) && (ascq == 0x0e))) { 17823 if (taskq_dispatch(sd_tq, sd_target_change_task, un, 17824 KM_NOSLEEP) == 0) { 17825 SD_ERROR(SD_LOG_ERROR, un, 17826 "sd_sense_key_unit_attention: " 17827 "Could not dispatch sd_target_change_task\n"); 17828 } 17829 } 17830 17831 /* 17832 * Update kstat if we haven't done that. 17833 */ 17834 if (!kstat_updated) { 17835 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17836 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 17837 } 17838 17839 do_retry: 17840 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si, 17841 EIO, SD_UA_RETRY_DELAY, NULL); 17842 } 17843 17844 17845 17846 /* 17847 * Function: sd_sense_key_fail_command 17848 * 17849 * Description: Use to fail a command when we don't like the sense key that 17850 * was returned. 17851 * 17852 * Context: May be called from interrupt context 17853 */ 17854 17855 static void 17856 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 17857 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17858 { 17859 struct sd_sense_info si; 17860 17861 ASSERT(un != NULL); 17862 ASSERT(mutex_owned(SD_MUTEX(un))); 17863 ASSERT(bp != NULL); 17864 ASSERT(xp != NULL); 17865 ASSERT(pktp != NULL); 17866 17867 si.ssi_severity = SCSI_ERR_FATAL; 17868 si.ssi_pfa_flag = FALSE; 17869 17870 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17871 sd_return_failed_command(un, bp, EIO); 17872 } 17873 17874 17875 17876 /* 17877 * Function: sd_sense_key_blank_check 17878 * 17879 * Description: Recovery actions for a SCSI "Blank Check" sense key. 17880 * Has no monetary connotation. 17881 * 17882 * Context: May be called from interrupt context 17883 */ 17884 17885 static void 17886 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 17887 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17888 { 17889 struct sd_sense_info si; 17890 17891 ASSERT(un != NULL); 17892 ASSERT(mutex_owned(SD_MUTEX(un))); 17893 ASSERT(bp != NULL); 17894 ASSERT(xp != NULL); 17895 ASSERT(pktp != NULL); 17896 17897 /* 17898 * Blank check is not fatal for removable devices, therefore 17899 * it does not require a console message. 17900 */ 17901 si.ssi_severity = (un->un_f_has_removable_media) ? SCSI_ERR_ALL : 17902 SCSI_ERR_FATAL; 17903 si.ssi_pfa_flag = FALSE; 17904 17905 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17906 sd_return_failed_command(un, bp, EIO); 17907 } 17908 17909 17910 17911 17912 /* 17913 * Function: sd_sense_key_aborted_command 17914 * 17915 * Description: Recovery actions for a SCSI "Aborted Command" sense key. 17916 * 17917 * Context: May be called from interrupt context 17918 */ 17919 17920 static void 17921 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 17922 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17923 { 17924 struct sd_sense_info si; 17925 17926 ASSERT(un != NULL); 17927 ASSERT(mutex_owned(SD_MUTEX(un))); 17928 ASSERT(bp != NULL); 17929 ASSERT(xp != NULL); 17930 ASSERT(pktp != NULL); 17931 17932 si.ssi_severity = SCSI_ERR_FATAL; 17933 si.ssi_pfa_flag = FALSE; 17934 17935 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17936 17937 /* 17938 * This really ought to be a fatal error, but we will retry anyway 17939 * as some drives report this as a spurious error. 17940 */ 17941 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17942 &si, EIO, drv_usectohz(100000), NULL); 17943 } 17944 17945 17946 17947 /* 17948 * Function: sd_sense_key_default 17949 * 17950 * Description: Default recovery action for several SCSI sense keys (basically 17951 * attempts a retry). 17952 * 17953 * Context: May be called from interrupt context 17954 */ 17955 17956 static void 17957 sd_sense_key_default(struct sd_lun *un, 17958 uint8_t *sense_datap, 17959 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17960 { 17961 struct sd_sense_info si; 17962 uint8_t sense_key = scsi_sense_key(sense_datap); 17963 17964 ASSERT(un != NULL); 17965 ASSERT(mutex_owned(SD_MUTEX(un))); 17966 ASSERT(bp != NULL); 17967 ASSERT(xp != NULL); 17968 ASSERT(pktp != NULL); 17969 17970 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17971 17972 /* 17973 * Undecoded sense key. Attempt retries and hope that will fix 17974 * the problem. Otherwise, we're dead. 17975 */ 17976 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 17977 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17978 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]); 17979 } 17980 17981 si.ssi_severity = SCSI_ERR_FATAL; 17982 si.ssi_pfa_flag = FALSE; 17983 17984 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17985 &si, EIO, (clock_t)0, NULL); 17986 } 17987 17988 17989 17990 /* 17991 * Function: sd_print_retry_msg 17992 * 17993 * Description: Print a message indicating the retry action being taken. 17994 * 17995 * Arguments: un - ptr to associated softstate 17996 * bp - ptr to buf(9S) for the command 17997 * arg - not used. 17998 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17999 * or SD_NO_RETRY_ISSUED 18000 * 18001 * Context: May be called from interrupt context 18002 */ 18003 /* ARGSUSED */ 18004 static void 18005 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag) 18006 { 18007 struct sd_xbuf *xp; 18008 struct scsi_pkt *pktp; 18009 char *reasonp; 18010 char *msgp; 18011 18012 ASSERT(un != NULL); 18013 ASSERT(mutex_owned(SD_MUTEX(un))); 18014 ASSERT(bp != NULL); 18015 pktp = SD_GET_PKTP(bp); 18016 ASSERT(pktp != NULL); 18017 xp = SD_GET_XBUF(bp); 18018 ASSERT(xp != NULL); 18019 18020 ASSERT(!mutex_owned(&un->un_pm_mutex)); 18021 mutex_enter(&un->un_pm_mutex); 18022 if ((un->un_state == SD_STATE_SUSPENDED) || 18023 (SD_DEVICE_IS_IN_LOW_POWER(un)) || 18024 (pktp->pkt_flags & FLAG_SILENT)) { 18025 mutex_exit(&un->un_pm_mutex); 18026 goto update_pkt_reason; 18027 } 18028 mutex_exit(&un->un_pm_mutex); 18029 18030 /* 18031 * Suppress messages if they are all the same pkt_reason; with 18032 * TQ, many (up to 256) are returned with the same pkt_reason. 18033 * If we are in panic, then suppress the retry messages. 18034 */ 18035 switch (flag) { 18036 case SD_NO_RETRY_ISSUED: 18037 msgp = "giving up"; 18038 break; 18039 case SD_IMMEDIATE_RETRY_ISSUED: 18040 case SD_DELAYED_RETRY_ISSUED: 18041 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) || 18042 ((pktp->pkt_reason == un->un_last_pkt_reason) && 18043 (sd_error_level != SCSI_ERR_ALL))) { 18044 return; 18045 } 18046 msgp = "retrying command"; 18047 break; 18048 default: 18049 goto update_pkt_reason; 18050 } 18051 18052 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" : 18053 scsi_rname(pktp->pkt_reason)); 18054 18055 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18056 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp); 18057 18058 update_pkt_reason: 18059 /* 18060 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason. 18061 * This is to prevent multiple console messages for the same failure 18062 * condition. Note that un->un_last_pkt_reason is NOT restored if & 18063 * when the command is retried successfully because there still may be 18064 * more commands coming back with the same value of pktp->pkt_reason. 18065 */ 18066 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) { 18067 un->un_last_pkt_reason = pktp->pkt_reason; 18068 } 18069 } 18070 18071 18072 /* 18073 * Function: sd_print_cmd_incomplete_msg 18074 * 18075 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason. 18076 * 18077 * Arguments: un - ptr to associated softstate 18078 * bp - ptr to buf(9S) for the command 18079 * arg - passed to sd_print_retry_msg() 18080 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 18081 * or SD_NO_RETRY_ISSUED 18082 * 18083 * Context: May be called from interrupt context 18084 */ 18085 18086 static void 18087 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, 18088 int code) 18089 { 18090 dev_info_t *dip; 18091 18092 ASSERT(un != NULL); 18093 ASSERT(mutex_owned(SD_MUTEX(un))); 18094 ASSERT(bp != NULL); 18095 18096 switch (code) { 18097 case SD_NO_RETRY_ISSUED: 18098 /* Command was failed. Someone turned off this target? */ 18099 if (un->un_state != SD_STATE_OFFLINE) { 18100 /* 18101 * Suppress message if we are detaching and 18102 * device has been disconnected 18103 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation 18104 * private interface and not part of the DDI 18105 */ 18106 dip = un->un_sd->sd_dev; 18107 if (!(DEVI_IS_DETACHING(dip) && 18108 DEVI_IS_DEVICE_REMOVED(dip))) { 18109 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18110 "disk not responding to selection\n"); 18111 } 18112 New_state(un, SD_STATE_OFFLINE); 18113 } 18114 break; 18115 18116 case SD_DELAYED_RETRY_ISSUED: 18117 case SD_IMMEDIATE_RETRY_ISSUED: 18118 default: 18119 /* Command was successfully queued for retry */ 18120 sd_print_retry_msg(un, bp, arg, code); 18121 break; 18122 } 18123 } 18124 18125 18126 /* 18127 * Function: sd_pkt_reason_cmd_incomplete 18128 * 18129 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason. 18130 * 18131 * Context: May be called from interrupt context 18132 */ 18133 18134 static void 18135 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 18136 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18137 { 18138 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE; 18139 18140 ASSERT(un != NULL); 18141 ASSERT(mutex_owned(SD_MUTEX(un))); 18142 ASSERT(bp != NULL); 18143 ASSERT(xp != NULL); 18144 ASSERT(pktp != NULL); 18145 18146 /* Do not do a reset if selection did not complete */ 18147 /* Note: Should this not just check the bit? */ 18148 if (pktp->pkt_state != STATE_GOT_BUS) { 18149 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18150 sd_reset_target(un, pktp); 18151 } 18152 18153 /* 18154 * If the target was not successfully selected, then set 18155 * SD_RETRIES_FAILFAST to indicate that we lost communication 18156 * with the target, and further retries and/or commands are 18157 * likely to take a long time. 18158 */ 18159 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) { 18160 flag |= SD_RETRIES_FAILFAST; 18161 } 18162 18163 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18164 18165 sd_retry_command(un, bp, flag, 18166 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18167 } 18168 18169 18170 18171 /* 18172 * Function: sd_pkt_reason_cmd_tran_err 18173 * 18174 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason. 18175 * 18176 * Context: May be called from interrupt context 18177 */ 18178 18179 static void 18180 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 18181 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18182 { 18183 ASSERT(un != NULL); 18184 ASSERT(mutex_owned(SD_MUTEX(un))); 18185 ASSERT(bp != NULL); 18186 ASSERT(xp != NULL); 18187 ASSERT(pktp != NULL); 18188 18189 /* 18190 * Do not reset if we got a parity error, or if 18191 * selection did not complete. 18192 */ 18193 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18194 /* Note: Should this not just check the bit for pkt_state? */ 18195 if (((pktp->pkt_statistics & STAT_PERR) == 0) && 18196 (pktp->pkt_state != STATE_GOT_BUS)) { 18197 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18198 sd_reset_target(un, pktp); 18199 } 18200 18201 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18202 18203 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18204 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18205 } 18206 18207 18208 18209 /* 18210 * Function: sd_pkt_reason_cmd_reset 18211 * 18212 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason. 18213 * 18214 * Context: May be called from interrupt context 18215 */ 18216 18217 static void 18218 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 18219 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18220 { 18221 ASSERT(un != NULL); 18222 ASSERT(mutex_owned(SD_MUTEX(un))); 18223 ASSERT(bp != NULL); 18224 ASSERT(xp != NULL); 18225 ASSERT(pktp != NULL); 18226 18227 /* The target may still be running the command, so try to reset. */ 18228 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18229 sd_reset_target(un, pktp); 18230 18231 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18232 18233 /* 18234 * If pkt_reason is CMD_RESET chances are that this pkt got 18235 * reset because another target on this bus caused it. The target 18236 * that caused it should get CMD_TIMEOUT with pkt_statistics 18237 * of STAT_TIMEOUT/STAT_DEV_RESET. 18238 */ 18239 18240 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 18241 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18242 } 18243 18244 18245 18246 18247 /* 18248 * Function: sd_pkt_reason_cmd_aborted 18249 * 18250 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason. 18251 * 18252 * Context: May be called from interrupt context 18253 */ 18254 18255 static void 18256 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 18257 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18258 { 18259 ASSERT(un != NULL); 18260 ASSERT(mutex_owned(SD_MUTEX(un))); 18261 ASSERT(bp != NULL); 18262 ASSERT(xp != NULL); 18263 ASSERT(pktp != NULL); 18264 18265 /* The target may still be running the command, so try to reset. */ 18266 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18267 sd_reset_target(un, pktp); 18268 18269 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18270 18271 /* 18272 * If pkt_reason is CMD_ABORTED chances are that this pkt got 18273 * aborted because another target on this bus caused it. The target 18274 * that caused it should get CMD_TIMEOUT with pkt_statistics 18275 * of STAT_TIMEOUT/STAT_DEV_RESET. 18276 */ 18277 18278 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 18279 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18280 } 18281 18282 18283 18284 /* 18285 * Function: sd_pkt_reason_cmd_timeout 18286 * 18287 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason. 18288 * 18289 * Context: May be called from interrupt context 18290 */ 18291 18292 static void 18293 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 18294 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18295 { 18296 ASSERT(un != NULL); 18297 ASSERT(mutex_owned(SD_MUTEX(un))); 18298 ASSERT(bp != NULL); 18299 ASSERT(xp != NULL); 18300 ASSERT(pktp != NULL); 18301 18302 18303 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18304 sd_reset_target(un, pktp); 18305 18306 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18307 18308 /* 18309 * A command timeout indicates that we could not establish 18310 * communication with the target, so set SD_RETRIES_FAILFAST 18311 * as further retries/commands are likely to take a long time. 18312 */ 18313 sd_retry_command(un, bp, 18314 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST), 18315 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18316 } 18317 18318 18319 18320 /* 18321 * Function: sd_pkt_reason_cmd_unx_bus_free 18322 * 18323 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason. 18324 * 18325 * Context: May be called from interrupt context 18326 */ 18327 18328 static void 18329 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 18330 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18331 { 18332 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code); 18333 18334 ASSERT(un != NULL); 18335 ASSERT(mutex_owned(SD_MUTEX(un))); 18336 ASSERT(bp != NULL); 18337 ASSERT(xp != NULL); 18338 ASSERT(pktp != NULL); 18339 18340 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18341 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18342 18343 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? 18344 sd_print_retry_msg : NULL; 18345 18346 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18347 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18348 } 18349 18350 18351 /* 18352 * Function: sd_pkt_reason_cmd_tag_reject 18353 * 18354 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason. 18355 * 18356 * Context: May be called from interrupt context 18357 */ 18358 18359 static void 18360 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 18361 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18362 { 18363 ASSERT(un != NULL); 18364 ASSERT(mutex_owned(SD_MUTEX(un))); 18365 ASSERT(bp != NULL); 18366 ASSERT(xp != NULL); 18367 ASSERT(pktp != NULL); 18368 18369 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18370 pktp->pkt_flags = 0; 18371 un->un_tagflags = 0; 18372 if (un->un_f_opt_queueing == TRUE) { 18373 un->un_throttle = min(un->un_throttle, 3); 18374 } else { 18375 un->un_throttle = 1; 18376 } 18377 mutex_exit(SD_MUTEX(un)); 18378 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 18379 mutex_enter(SD_MUTEX(un)); 18380 18381 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18382 18383 /* Legacy behavior not to check retry counts here. */ 18384 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE), 18385 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18386 } 18387 18388 18389 /* 18390 * Function: sd_pkt_reason_default 18391 * 18392 * Description: Default recovery actions for SCSA pkt_reason values that 18393 * do not have more explicit recovery actions. 18394 * 18395 * Context: May be called from interrupt context 18396 */ 18397 18398 static void 18399 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 18400 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18401 { 18402 ASSERT(un != NULL); 18403 ASSERT(mutex_owned(SD_MUTEX(un))); 18404 ASSERT(bp != NULL); 18405 ASSERT(xp != NULL); 18406 ASSERT(pktp != NULL); 18407 18408 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18409 sd_reset_target(un, pktp); 18410 18411 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18412 18413 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18414 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18415 } 18416 18417 18418 18419 /* 18420 * Function: sd_pkt_status_check_condition 18421 * 18422 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status. 18423 * 18424 * Context: May be called from interrupt context 18425 */ 18426 18427 static void 18428 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 18429 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18430 { 18431 ASSERT(un != NULL); 18432 ASSERT(mutex_owned(SD_MUTEX(un))); 18433 ASSERT(bp != NULL); 18434 ASSERT(xp != NULL); 18435 ASSERT(pktp != NULL); 18436 18437 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: " 18438 "entry: buf:0x%p xp:0x%p\n", bp, xp); 18439 18440 /* 18441 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the 18442 * command will be retried after the request sense). Otherwise, retry 18443 * the command. Note: we are issuing the request sense even though the 18444 * retry limit may have been reached for the failed command. 18445 */ 18446 if (un->un_f_arq_enabled == FALSE) { 18447 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 18448 "no ARQ, sending request sense command\n"); 18449 sd_send_request_sense_command(un, bp, pktp); 18450 } else { 18451 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 18452 "ARQ,retrying request sense command\n"); 18453 #if defined(__i386) || defined(__amd64) 18454 /* 18455 * The SD_RETRY_DELAY value need to be adjusted here 18456 * when SD_RETRY_DELAY change in sddef.h 18457 */ 18458 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 18459 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, 18460 NULL); 18461 #else 18462 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 18463 EIO, SD_RETRY_DELAY, NULL); 18464 #endif 18465 } 18466 18467 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); 18468 } 18469 18470 18471 /* 18472 * Function: sd_pkt_status_busy 18473 * 18474 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status. 18475 * 18476 * Context: May be called from interrupt context 18477 */ 18478 18479 static void 18480 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 18481 struct scsi_pkt *pktp) 18482 { 18483 ASSERT(un != NULL); 18484 ASSERT(mutex_owned(SD_MUTEX(un))); 18485 ASSERT(bp != NULL); 18486 ASSERT(xp != NULL); 18487 ASSERT(pktp != NULL); 18488 18489 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18490 "sd_pkt_status_busy: entry\n"); 18491 18492 /* If retries are exhausted, just fail the command. */ 18493 if (xp->xb_retry_count >= un->un_busy_retry_count) { 18494 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18495 "device busy too long\n"); 18496 sd_return_failed_command(un, bp, EIO); 18497 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18498 "sd_pkt_status_busy: exit\n"); 18499 return; 18500 } 18501 xp->xb_retry_count++; 18502 18503 /* 18504 * Try to reset the target. However, we do not want to perform 18505 * more than one reset if the device continues to fail. The reset 18506 * will be performed when the retry count reaches the reset 18507 * threshold. This threshold should be set such that at least 18508 * one retry is issued before the reset is performed. 18509 */ 18510 if (xp->xb_retry_count == 18511 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) { 18512 int rval = 0; 18513 mutex_exit(SD_MUTEX(un)); 18514 if (un->un_f_allow_bus_device_reset == TRUE) { 18515 /* 18516 * First try to reset the LUN; if we cannot then 18517 * try to reset the target. 18518 */ 18519 if (un->un_f_lun_reset_enabled == TRUE) { 18520 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18521 "sd_pkt_status_busy: RESET_LUN\n"); 18522 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 18523 } 18524 if (rval == 0) { 18525 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18526 "sd_pkt_status_busy: RESET_TARGET\n"); 18527 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 18528 } 18529 } 18530 if (rval == 0) { 18531 /* 18532 * If the RESET_LUN and/or RESET_TARGET failed, 18533 * try RESET_ALL 18534 */ 18535 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18536 "sd_pkt_status_busy: RESET_ALL\n"); 18537 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL); 18538 } 18539 mutex_enter(SD_MUTEX(un)); 18540 if (rval == 0) { 18541 /* 18542 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed. 18543 * At this point we give up & fail the command. 18544 */ 18545 sd_return_failed_command(un, bp, EIO); 18546 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18547 "sd_pkt_status_busy: exit (failed cmd)\n"); 18548 return; 18549 } 18550 } 18551 18552 /* 18553 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as 18554 * we have already checked the retry counts above. 18555 */ 18556 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 18557 EIO, un->un_busy_timeout, NULL); 18558 18559 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18560 "sd_pkt_status_busy: exit\n"); 18561 } 18562 18563 18564 /* 18565 * Function: sd_pkt_status_reservation_conflict 18566 * 18567 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI 18568 * command status. 18569 * 18570 * Context: May be called from interrupt context 18571 */ 18572 18573 static void 18574 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp, 18575 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18576 { 18577 ASSERT(un != NULL); 18578 ASSERT(mutex_owned(SD_MUTEX(un))); 18579 ASSERT(bp != NULL); 18580 ASSERT(xp != NULL); 18581 ASSERT(pktp != NULL); 18582 18583 /* 18584 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation 18585 * conflict could be due to various reasons like incorrect keys, not 18586 * registered or not reserved etc. So, we return EACCES to the caller. 18587 */ 18588 if (un->un_reservation_type == SD_SCSI3_RESERVATION) { 18589 int cmd = SD_GET_PKT_OPCODE(pktp); 18590 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) || 18591 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) { 18592 sd_return_failed_command(un, bp, EACCES); 18593 return; 18594 } 18595 } 18596 18597 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 18598 18599 if ((un->un_resvd_status & SD_FAILFAST) != 0) { 18600 if (sd_failfast_enable != 0) { 18601 /* By definition, we must panic here.... */ 18602 sd_panic_for_res_conflict(un); 18603 /*NOTREACHED*/ 18604 } 18605 SD_ERROR(SD_LOG_IO, un, 18606 "sd_handle_resv_conflict: Disk Reserved\n"); 18607 sd_return_failed_command(un, bp, EACCES); 18608 return; 18609 } 18610 18611 /* 18612 * 1147670: retry only if sd_retry_on_reservation_conflict 18613 * property is set (default is 1). Retries will not succeed 18614 * on a disk reserved by another initiator. HA systems 18615 * may reset this via sd.conf to avoid these retries. 18616 * 18617 * Note: The legacy return code for this failure is EIO, however EACCES 18618 * seems more appropriate for a reservation conflict. 18619 */ 18620 if (sd_retry_on_reservation_conflict == 0) { 18621 SD_ERROR(SD_LOG_IO, un, 18622 "sd_handle_resv_conflict: Device Reserved\n"); 18623 sd_return_failed_command(un, bp, EIO); 18624 return; 18625 } 18626 18627 /* 18628 * Retry the command if we can. 18629 * 18630 * Note: The legacy return code for this failure is EIO, however EACCES 18631 * seems more appropriate for a reservation conflict. 18632 */ 18633 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 18634 (clock_t)2, NULL); 18635 } 18636 18637 18638 18639 /* 18640 * Function: sd_pkt_status_qfull 18641 * 18642 * Description: Handle a QUEUE FULL condition from the target. This can 18643 * occur if the HBA does not handle the queue full condition. 18644 * (Basically this means third-party HBAs as Sun HBAs will 18645 * handle the queue full condition.) Note that if there are 18646 * some commands already in the transport, then the queue full 18647 * has occurred because the queue for this nexus is actually 18648 * full. If there are no commands in the transport, then the 18649 * queue full is resulting from some other initiator or lun 18650 * consuming all the resources at the target. 18651 * 18652 * Context: May be called from interrupt context 18653 */ 18654 18655 static void 18656 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 18657 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18658 { 18659 ASSERT(un != NULL); 18660 ASSERT(mutex_owned(SD_MUTEX(un))); 18661 ASSERT(bp != NULL); 18662 ASSERT(xp != NULL); 18663 ASSERT(pktp != NULL); 18664 18665 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18666 "sd_pkt_status_qfull: entry\n"); 18667 18668 /* 18669 * Just lower the QFULL throttle and retry the command. Note that 18670 * we do not limit the number of retries here. 18671 */ 18672 sd_reduce_throttle(un, SD_THROTTLE_QFULL); 18673 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, 18674 SD_RESTART_TIMEOUT, NULL); 18675 18676 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18677 "sd_pkt_status_qfull: exit\n"); 18678 } 18679 18680 18681 /* 18682 * Function: sd_reset_target 18683 * 18684 * Description: Issue a scsi_reset(9F), with either RESET_LUN, 18685 * RESET_TARGET, or RESET_ALL. 18686 * 18687 * Context: May be called under interrupt context. 18688 */ 18689 18690 static void 18691 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp) 18692 { 18693 int rval = 0; 18694 18695 ASSERT(un != NULL); 18696 ASSERT(mutex_owned(SD_MUTEX(un))); 18697 ASSERT(pktp != NULL); 18698 18699 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n"); 18700 18701 /* 18702 * No need to reset if the transport layer has already done so. 18703 */ 18704 if ((pktp->pkt_statistics & 18705 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) { 18706 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18707 "sd_reset_target: no reset\n"); 18708 return; 18709 } 18710 18711 mutex_exit(SD_MUTEX(un)); 18712 18713 if (un->un_f_allow_bus_device_reset == TRUE) { 18714 if (un->un_f_lun_reset_enabled == TRUE) { 18715 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18716 "sd_reset_target: RESET_LUN\n"); 18717 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 18718 } 18719 if (rval == 0) { 18720 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18721 "sd_reset_target: RESET_TARGET\n"); 18722 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 18723 } 18724 } 18725 18726 if (rval == 0) { 18727 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18728 "sd_reset_target: RESET_ALL\n"); 18729 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 18730 } 18731 18732 mutex_enter(SD_MUTEX(un)); 18733 18734 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n"); 18735 } 18736 18737 /* 18738 * Function: sd_target_change_task 18739 * 18740 * Description: Handle dynamic target change 18741 * 18742 * Context: Executes in a taskq() thread context 18743 */ 18744 static void 18745 sd_target_change_task(void *arg) 18746 { 18747 struct sd_lun *un = arg; 18748 uint64_t capacity; 18749 diskaddr_t label_cap; 18750 uint_t lbasize; 18751 sd_ssc_t *ssc; 18752 18753 ASSERT(un != NULL); 18754 ASSERT(!mutex_owned(SD_MUTEX(un))); 18755 18756 if ((un->un_f_blockcount_is_valid == FALSE) || 18757 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 18758 return; 18759 } 18760 18761 ssc = sd_ssc_init(un); 18762 18763 if (sd_send_scsi_READ_CAPACITY(ssc, &capacity, 18764 &lbasize, SD_PATH_DIRECT) != 0) { 18765 SD_ERROR(SD_LOG_ERROR, un, 18766 "sd_target_change_task: fail to read capacity\n"); 18767 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 18768 goto task_exit; 18769 } 18770 18771 mutex_enter(SD_MUTEX(un)); 18772 if (capacity <= un->un_blockcount) { 18773 mutex_exit(SD_MUTEX(un)); 18774 goto task_exit; 18775 } 18776 18777 sd_update_block_info(un, lbasize, capacity); 18778 mutex_exit(SD_MUTEX(un)); 18779 18780 /* 18781 * If lun is EFI labeled and lun capacity is greater than the 18782 * capacity contained in the label, log a sys event. 18783 */ 18784 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 18785 (void*)SD_PATH_DIRECT) == 0) { 18786 mutex_enter(SD_MUTEX(un)); 18787 if (un->un_f_blockcount_is_valid && 18788 un->un_blockcount > label_cap) { 18789 mutex_exit(SD_MUTEX(un)); 18790 sd_log_lun_expansion_event(un, KM_SLEEP); 18791 } else { 18792 mutex_exit(SD_MUTEX(un)); 18793 } 18794 } 18795 18796 task_exit: 18797 sd_ssc_fini(ssc); 18798 } 18799 18800 /* 18801 * Function: sd_log_lun_expansion_event 18802 * 18803 * Description: Log lun expansion sys event 18804 * 18805 * Context: Never called from interrupt context 18806 */ 18807 static void 18808 sd_log_lun_expansion_event(struct sd_lun *un, int km_flag) 18809 { 18810 int err; 18811 char *path; 18812 nvlist_t *dle_attr_list; 18813 18814 /* Allocate and build sysevent attribute list */ 18815 err = nvlist_alloc(&dle_attr_list, NV_UNIQUE_NAME_TYPE, km_flag); 18816 if (err != 0) { 18817 SD_ERROR(SD_LOG_ERROR, un, 18818 "sd_log_lun_expansion_event: fail to allocate space\n"); 18819 return; 18820 } 18821 18822 path = kmem_alloc(MAXPATHLEN, km_flag); 18823 if (path == NULL) { 18824 nvlist_free(dle_attr_list); 18825 SD_ERROR(SD_LOG_ERROR, un, 18826 "sd_log_lun_expansion_event: fail to allocate space\n"); 18827 return; 18828 } 18829 /* 18830 * Add path attribute to identify the lun. 18831 * We are using minor node 'a' as the sysevent attribute. 18832 */ 18833 (void) snprintf(path, MAXPATHLEN, "/devices"); 18834 (void) ddi_pathname(SD_DEVINFO(un), path + strlen(path)); 18835 (void) snprintf(path + strlen(path), MAXPATHLEN - strlen(path), 18836 ":a"); 18837 18838 err = nvlist_add_string(dle_attr_list, DEV_PHYS_PATH, path); 18839 if (err != 0) { 18840 nvlist_free(dle_attr_list); 18841 kmem_free(path, MAXPATHLEN); 18842 SD_ERROR(SD_LOG_ERROR, un, 18843 "sd_log_lun_expansion_event: fail to add attribute\n"); 18844 return; 18845 } 18846 18847 /* Log dynamic lun expansion sysevent */ 18848 err = ddi_log_sysevent(SD_DEVINFO(un), SUNW_VENDOR, EC_DEV_STATUS, 18849 ESC_DEV_DLE, dle_attr_list, NULL, km_flag); 18850 if (err != DDI_SUCCESS) { 18851 SD_ERROR(SD_LOG_ERROR, un, 18852 "sd_log_lun_expansion_event: fail to log sysevent\n"); 18853 } 18854 18855 nvlist_free(dle_attr_list); 18856 kmem_free(path, MAXPATHLEN); 18857 } 18858 18859 /* 18860 * Function: sd_media_change_task 18861 * 18862 * Description: Recovery action for CDROM to become available. 18863 * 18864 * Context: Executes in a taskq() thread context 18865 */ 18866 18867 static void 18868 sd_media_change_task(void *arg) 18869 { 18870 struct scsi_pkt *pktp = arg; 18871 struct sd_lun *un; 18872 struct buf *bp; 18873 struct sd_xbuf *xp; 18874 int err = 0; 18875 int retry_count = 0; 18876 int retry_limit = SD_UNIT_ATTENTION_RETRY/10; 18877 struct sd_sense_info si; 18878 18879 ASSERT(pktp != NULL); 18880 bp = (struct buf *)pktp->pkt_private; 18881 ASSERT(bp != NULL); 18882 xp = SD_GET_XBUF(bp); 18883 ASSERT(xp != NULL); 18884 un = SD_GET_UN(bp); 18885 ASSERT(un != NULL); 18886 ASSERT(!mutex_owned(SD_MUTEX(un))); 18887 ASSERT(un->un_f_monitor_media_state); 18888 18889 si.ssi_severity = SCSI_ERR_INFO; 18890 si.ssi_pfa_flag = FALSE; 18891 18892 /* 18893 * When a reset is issued on a CDROM, it takes a long time to 18894 * recover. First few attempts to read capacity and other things 18895 * related to handling unit attention fail (with a ASC 0x4 and 18896 * ASCQ 0x1). In that case we want to do enough retries and we want 18897 * to limit the retries in other cases of genuine failures like 18898 * no media in drive. 18899 */ 18900 while (retry_count++ < retry_limit) { 18901 if ((err = sd_handle_mchange(un)) == 0) { 18902 break; 18903 } 18904 if (err == EAGAIN) { 18905 retry_limit = SD_UNIT_ATTENTION_RETRY; 18906 } 18907 /* Sleep for 0.5 sec. & try again */ 18908 delay(drv_usectohz(500000)); 18909 } 18910 18911 /* 18912 * Dispatch (retry or fail) the original command here, 18913 * along with appropriate console messages.... 18914 * 18915 * Must grab the mutex before calling sd_retry_command, 18916 * sd_print_sense_msg and sd_return_failed_command. 18917 */ 18918 mutex_enter(SD_MUTEX(un)); 18919 if (err != SD_CMD_SUCCESS) { 18920 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18921 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 18922 si.ssi_severity = SCSI_ERR_FATAL; 18923 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18924 sd_return_failed_command(un, bp, EIO); 18925 } else { 18926 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 18927 &si, EIO, (clock_t)0, NULL); 18928 } 18929 mutex_exit(SD_MUTEX(un)); 18930 } 18931 18932 18933 18934 /* 18935 * Function: sd_handle_mchange 18936 * 18937 * Description: Perform geometry validation & other recovery when CDROM 18938 * has been removed from drive. 18939 * 18940 * Return Code: 0 for success 18941 * errno-type return code of either sd_send_scsi_DOORLOCK() or 18942 * sd_send_scsi_READ_CAPACITY() 18943 * 18944 * Context: Executes in a taskq() thread context 18945 */ 18946 18947 static int 18948 sd_handle_mchange(struct sd_lun *un) 18949 { 18950 uint64_t capacity; 18951 uint32_t lbasize; 18952 int rval; 18953 sd_ssc_t *ssc; 18954 18955 ASSERT(!mutex_owned(SD_MUTEX(un))); 18956 ASSERT(un->un_f_monitor_media_state); 18957 18958 ssc = sd_ssc_init(un); 18959 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize, 18960 SD_PATH_DIRECT_PRIORITY); 18961 18962 if (rval != 0) 18963 goto failed; 18964 18965 mutex_enter(SD_MUTEX(un)); 18966 sd_update_block_info(un, lbasize, capacity); 18967 18968 if (un->un_errstats != NULL) { 18969 struct sd_errstats *stp = 18970 (struct sd_errstats *)un->un_errstats->ks_data; 18971 stp->sd_capacity.value.ui64 = (uint64_t) 18972 ((uint64_t)un->un_blockcount * 18973 (uint64_t)un->un_tgt_blocksize); 18974 } 18975 18976 /* 18977 * Check if the media in the device is writable or not 18978 */ 18979 if (ISCD(un)) { 18980 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT_PRIORITY); 18981 } 18982 18983 /* 18984 * Note: Maybe let the strategy/partitioning chain worry about getting 18985 * valid geometry. 18986 */ 18987 mutex_exit(SD_MUTEX(un)); 18988 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 18989 18990 18991 if (cmlb_validate(un->un_cmlbhandle, 0, 18992 (void *)SD_PATH_DIRECT_PRIORITY) != 0) { 18993 sd_ssc_fini(ssc); 18994 return (EIO); 18995 } else { 18996 if (un->un_f_pkstats_enabled) { 18997 sd_set_pstats(un); 18998 SD_TRACE(SD_LOG_IO_PARTITION, un, 18999 "sd_handle_mchange: un:0x%p pstats created and " 19000 "set\n", un); 19001 } 19002 } 19003 19004 /* 19005 * Try to lock the door 19006 */ 19007 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 19008 SD_PATH_DIRECT_PRIORITY); 19009 failed: 19010 if (rval != 0) 19011 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19012 sd_ssc_fini(ssc); 19013 return (rval); 19014 } 19015 19016 19017 /* 19018 * Function: sd_send_scsi_DOORLOCK 19019 * 19020 * Description: Issue the scsi DOOR LOCK command 19021 * 19022 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 19023 * structure for this target. 19024 * flag - SD_REMOVAL_ALLOW 19025 * SD_REMOVAL_PREVENT 19026 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19027 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19028 * to use the USCSI "direct" chain and bypass the normal 19029 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19030 * command is issued as part of an error recovery action. 19031 * 19032 * Return Code: 0 - Success 19033 * errno return code from sd_ssc_send() 19034 * 19035 * Context: Can sleep. 19036 */ 19037 19038 static int 19039 sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag) 19040 { 19041 struct scsi_extended_sense sense_buf; 19042 union scsi_cdb cdb; 19043 struct uscsi_cmd ucmd_buf; 19044 int status; 19045 struct sd_lun *un; 19046 19047 ASSERT(ssc != NULL); 19048 un = ssc->ssc_un; 19049 ASSERT(un != NULL); 19050 ASSERT(!mutex_owned(SD_MUTEX(un))); 19051 19052 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un); 19053 19054 /* already determined doorlock is not supported, fake success */ 19055 if (un->un_f_doorlock_supported == FALSE) { 19056 return (0); 19057 } 19058 19059 /* 19060 * If we are ejecting and see an SD_REMOVAL_PREVENT 19061 * ignore the command so we can complete the eject 19062 * operation. 19063 */ 19064 if (flag == SD_REMOVAL_PREVENT) { 19065 mutex_enter(SD_MUTEX(un)); 19066 if (un->un_f_ejecting == TRUE) { 19067 mutex_exit(SD_MUTEX(un)); 19068 return (EAGAIN); 19069 } 19070 mutex_exit(SD_MUTEX(un)); 19071 } 19072 19073 bzero(&cdb, sizeof (cdb)); 19074 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19075 19076 cdb.scc_cmd = SCMD_DOORLOCK; 19077 cdb.cdb_opaque[4] = (uchar_t)flag; 19078 19079 ucmd_buf.uscsi_cdb = (char *)&cdb; 19080 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19081 ucmd_buf.uscsi_bufaddr = NULL; 19082 ucmd_buf.uscsi_buflen = 0; 19083 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19084 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19085 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19086 ucmd_buf.uscsi_timeout = 15; 19087 19088 SD_TRACE(SD_LOG_IO, un, 19089 "sd_send_scsi_DOORLOCK: returning sd_ssc_send\n"); 19090 19091 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19092 UIO_SYSSPACE, path_flag); 19093 19094 if (status == 0) 19095 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19096 19097 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) && 19098 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19099 (scsi_sense_key((uint8_t *)&sense_buf) == KEY_ILLEGAL_REQUEST)) { 19100 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19101 19102 /* fake success and skip subsequent doorlock commands */ 19103 un->un_f_doorlock_supported = FALSE; 19104 return (0); 19105 } 19106 19107 return (status); 19108 } 19109 19110 /* 19111 * Function: sd_send_scsi_READ_CAPACITY 19112 * 19113 * Description: This routine uses the scsi READ CAPACITY command to determine 19114 * the device capacity in number of blocks and the device native 19115 * block size. If this function returns a failure, then the 19116 * values in *capp and *lbap are undefined. If the capacity 19117 * returned is 0xffffffff then the lun is too large for a 19118 * normal READ CAPACITY command and the results of a 19119 * READ CAPACITY 16 will be used instead. 19120 * 19121 * Arguments: ssc - ssc contains ptr to soft state struct for the target 19122 * capp - ptr to unsigned 64-bit variable to receive the 19123 * capacity value from the command. 19124 * lbap - ptr to unsigned 32-bit varaible to receive the 19125 * block size value from the command 19126 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19127 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19128 * to use the USCSI "direct" chain and bypass the normal 19129 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19130 * command is issued as part of an error recovery action. 19131 * 19132 * Return Code: 0 - Success 19133 * EIO - IO error 19134 * EACCES - Reservation conflict detected 19135 * EAGAIN - Device is becoming ready 19136 * errno return code from sd_ssc_send() 19137 * 19138 * Context: Can sleep. Blocks until command completes. 19139 */ 19140 19141 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity) 19142 19143 static int 19144 sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp, uint32_t *lbap, 19145 int path_flag) 19146 { 19147 struct scsi_extended_sense sense_buf; 19148 struct uscsi_cmd ucmd_buf; 19149 union scsi_cdb cdb; 19150 uint32_t *capacity_buf; 19151 uint64_t capacity; 19152 uint32_t lbasize; 19153 int status; 19154 struct sd_lun *un; 19155 19156 ASSERT(ssc != NULL); 19157 19158 un = ssc->ssc_un; 19159 ASSERT(un != NULL); 19160 ASSERT(!mutex_owned(SD_MUTEX(un))); 19161 ASSERT(capp != NULL); 19162 ASSERT(lbap != NULL); 19163 19164 SD_TRACE(SD_LOG_IO, un, 19165 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 19166 19167 /* 19168 * First send a READ_CAPACITY command to the target. 19169 * (This command is mandatory under SCSI-2.) 19170 * 19171 * Set up the CDB for the READ_CAPACITY command. The Partial 19172 * Medium Indicator bit is cleared. The address field must be 19173 * zero if the PMI bit is zero. 19174 */ 19175 bzero(&cdb, sizeof (cdb)); 19176 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19177 19178 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP); 19179 19180 cdb.scc_cmd = SCMD_READ_CAPACITY; 19181 19182 ucmd_buf.uscsi_cdb = (char *)&cdb; 19183 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19184 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; 19185 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; 19186 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19187 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19188 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19189 ucmd_buf.uscsi_timeout = 60; 19190 19191 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19192 UIO_SYSSPACE, path_flag); 19193 19194 switch (status) { 19195 case 0: 19196 /* Return failure if we did not get valid capacity data. */ 19197 if (ucmd_buf.uscsi_resid != 0) { 19198 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 19199 "sd_send_scsi_READ_CAPACITY received " 19200 "invalid capacity data"); 19201 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19202 return (EIO); 19203 } 19204 19205 /* 19206 * Read capacity and block size from the READ CAPACITY 10 data. 19207 * This data may be adjusted later due to device specific 19208 * issues. 19209 * 19210 * According to the SCSI spec, the READ CAPACITY 10 19211 * command returns the following: 19212 * 19213 * bytes 0-3: Maximum logical block address available. 19214 * (MSB in byte:0 & LSB in byte:3) 19215 * 19216 * bytes 4-7: Block length in bytes 19217 * (MSB in byte:4 & LSB in byte:7) 19218 * 19219 */ 19220 capacity = BE_32(capacity_buf[0]); 19221 lbasize = BE_32(capacity_buf[1]); 19222 19223 /* 19224 * Done with capacity_buf 19225 */ 19226 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19227 19228 /* 19229 * if the reported capacity is set to all 0xf's, then 19230 * this disk is too large and requires SBC-2 commands. 19231 * Reissue the request using READ CAPACITY 16. 19232 */ 19233 if (capacity == 0xffffffff) { 19234 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19235 status = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, 19236 &lbasize, path_flag); 19237 if (status != 0) { 19238 return (status); 19239 } 19240 } 19241 break; /* Success! */ 19242 case EIO: 19243 switch (ucmd_buf.uscsi_status) { 19244 case STATUS_RESERVATION_CONFLICT: 19245 status = EACCES; 19246 break; 19247 case STATUS_CHECK: 19248 /* 19249 * Check condition; look for ASC/ASCQ of 0x04/0x01 19250 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 19251 */ 19252 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19253 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 19254 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 19255 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19256 return (EAGAIN); 19257 } 19258 break; 19259 default: 19260 break; 19261 } 19262 /* FALLTHRU */ 19263 default: 19264 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19265 return (status); 19266 } 19267 19268 /* 19269 * Some ATAPI CD-ROM drives report inaccurate LBA size values 19270 * (2352 and 0 are common) so for these devices always force the value 19271 * to 2048 as required by the ATAPI specs. 19272 */ 19273 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 19274 lbasize = 2048; 19275 } 19276 19277 /* 19278 * Get the maximum LBA value from the READ CAPACITY data. 19279 * Here we assume that the Partial Medium Indicator (PMI) bit 19280 * was cleared when issuing the command. This means that the LBA 19281 * returned from the device is the LBA of the last logical block 19282 * on the logical unit. The actual logical block count will be 19283 * this value plus one. 19284 * 19285 * Currently the capacity is saved in terms of un->un_sys_blocksize, 19286 * so scale the capacity value to reflect this. 19287 */ 19288 capacity = (capacity + 1) * (lbasize / un->un_sys_blocksize); 19289 19290 /* 19291 * Copy the values from the READ CAPACITY command into the space 19292 * provided by the caller. 19293 */ 19294 *capp = capacity; 19295 *lbap = lbasize; 19296 19297 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: " 19298 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 19299 19300 /* 19301 * Both the lbasize and capacity from the device must be nonzero, 19302 * otherwise we assume that the values are not valid and return 19303 * failure to the caller. (4203735) 19304 */ 19305 if ((capacity == 0) || (lbasize == 0)) { 19306 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 19307 "sd_send_scsi_READ_CAPACITY received invalid value " 19308 "capacity %llu lbasize %d", capacity, lbasize); 19309 return (EIO); 19310 } 19311 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19312 return (0); 19313 } 19314 19315 /* 19316 * Function: sd_send_scsi_READ_CAPACITY_16 19317 * 19318 * Description: This routine uses the scsi READ CAPACITY 16 command to 19319 * determine the device capacity in number of blocks and the 19320 * device native block size. If this function returns a failure, 19321 * then the values in *capp and *lbap are undefined. 19322 * This routine should always be called by 19323 * sd_send_scsi_READ_CAPACITY which will appy any device 19324 * specific adjustments to capacity and lbasize. 19325 * 19326 * Arguments: ssc - ssc contains ptr to soft state struct for the target 19327 * capp - ptr to unsigned 64-bit variable to receive the 19328 * capacity value from the command. 19329 * lbap - ptr to unsigned 32-bit varaible to receive the 19330 * block size value from the command 19331 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19332 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19333 * to use the USCSI "direct" chain and bypass the normal 19334 * command waitq. SD_PATH_DIRECT_PRIORITY is used when 19335 * this command is issued as part of an error recovery 19336 * action. 19337 * 19338 * Return Code: 0 - Success 19339 * EIO - IO error 19340 * EACCES - Reservation conflict detected 19341 * EAGAIN - Device is becoming ready 19342 * errno return code from sd_ssc_send() 19343 * 19344 * Context: Can sleep. Blocks until command completes. 19345 */ 19346 19347 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16) 19348 19349 static int 19350 sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp, 19351 uint32_t *lbap, int path_flag) 19352 { 19353 struct scsi_extended_sense sense_buf; 19354 struct uscsi_cmd ucmd_buf; 19355 union scsi_cdb cdb; 19356 uint64_t *capacity16_buf; 19357 uint64_t capacity; 19358 uint32_t lbasize; 19359 int status; 19360 struct sd_lun *un; 19361 19362 ASSERT(ssc != NULL); 19363 19364 un = ssc->ssc_un; 19365 ASSERT(un != NULL); 19366 ASSERT(!mutex_owned(SD_MUTEX(un))); 19367 ASSERT(capp != NULL); 19368 ASSERT(lbap != NULL); 19369 19370 SD_TRACE(SD_LOG_IO, un, 19371 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 19372 19373 /* 19374 * First send a READ_CAPACITY_16 command to the target. 19375 * 19376 * Set up the CDB for the READ_CAPACITY_16 command. The Partial 19377 * Medium Indicator bit is cleared. The address field must be 19378 * zero if the PMI bit is zero. 19379 */ 19380 bzero(&cdb, sizeof (cdb)); 19381 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19382 19383 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP); 19384 19385 ucmd_buf.uscsi_cdb = (char *)&cdb; 19386 ucmd_buf.uscsi_cdblen = CDB_GROUP4; 19387 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; 19388 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; 19389 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19390 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19391 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19392 ucmd_buf.uscsi_timeout = 60; 19393 19394 /* 19395 * Read Capacity (16) is a Service Action In command. One 19396 * command byte (0x9E) is overloaded for multiple operations, 19397 * with the second CDB byte specifying the desired operation 19398 */ 19399 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4; 19400 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4; 19401 19402 /* 19403 * Fill in allocation length field 19404 */ 19405 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen); 19406 19407 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19408 UIO_SYSSPACE, path_flag); 19409 19410 switch (status) { 19411 case 0: 19412 /* Return failure if we did not get valid capacity data. */ 19413 if (ucmd_buf.uscsi_resid > 20) { 19414 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 19415 "sd_send_scsi_READ_CAPACITY_16 received " 19416 "invalid capacity data"); 19417 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19418 return (EIO); 19419 } 19420 19421 /* 19422 * Read capacity and block size from the READ CAPACITY 10 data. 19423 * This data may be adjusted later due to device specific 19424 * issues. 19425 * 19426 * According to the SCSI spec, the READ CAPACITY 10 19427 * command returns the following: 19428 * 19429 * bytes 0-7: Maximum logical block address available. 19430 * (MSB in byte:0 & LSB in byte:7) 19431 * 19432 * bytes 8-11: Block length in bytes 19433 * (MSB in byte:8 & LSB in byte:11) 19434 * 19435 */ 19436 capacity = BE_64(capacity16_buf[0]); 19437 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]); 19438 19439 /* 19440 * Done with capacity16_buf 19441 */ 19442 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19443 19444 /* 19445 * if the reported capacity is set to all 0xf's, then 19446 * this disk is too large. This could only happen with 19447 * a device that supports LBAs larger than 64 bits which 19448 * are not defined by any current T10 standards. 19449 */ 19450 if (capacity == 0xffffffffffffffff) { 19451 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 19452 "disk is too large"); 19453 return (EIO); 19454 } 19455 break; /* Success! */ 19456 case EIO: 19457 switch (ucmd_buf.uscsi_status) { 19458 case STATUS_RESERVATION_CONFLICT: 19459 status = EACCES; 19460 break; 19461 case STATUS_CHECK: 19462 /* 19463 * Check condition; look for ASC/ASCQ of 0x04/0x01 19464 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 19465 */ 19466 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19467 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 19468 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 19469 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19470 return (EAGAIN); 19471 } 19472 break; 19473 default: 19474 break; 19475 } 19476 /* FALLTHRU */ 19477 default: 19478 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19479 return (status); 19480 } 19481 19482 *capp = capacity; 19483 *lbap = lbasize; 19484 19485 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: " 19486 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 19487 19488 return (0); 19489 } 19490 19491 19492 /* 19493 * Function: sd_send_scsi_START_STOP_UNIT 19494 * 19495 * Description: Issue a scsi START STOP UNIT command to the target. 19496 * 19497 * Arguments: ssc - ssc contatins pointer to driver soft state (unit) 19498 * structure for this target. 19499 * flag - SD_TARGET_START 19500 * SD_TARGET_STOP 19501 * SD_TARGET_EJECT 19502 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19503 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19504 * to use the USCSI "direct" chain and bypass the normal 19505 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19506 * command is issued as part of an error recovery action. 19507 * 19508 * Return Code: 0 - Success 19509 * EIO - IO error 19510 * EACCES - Reservation conflict detected 19511 * ENXIO - Not Ready, medium not present 19512 * errno return code from sd_ssc_send() 19513 * 19514 * Context: Can sleep. 19515 */ 19516 19517 static int 19518 sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int flag, int path_flag) 19519 { 19520 struct scsi_extended_sense sense_buf; 19521 union scsi_cdb cdb; 19522 struct uscsi_cmd ucmd_buf; 19523 int status; 19524 struct sd_lun *un; 19525 19526 ASSERT(ssc != NULL); 19527 un = ssc->ssc_un; 19528 ASSERT(un != NULL); 19529 ASSERT(!mutex_owned(SD_MUTEX(un))); 19530 19531 SD_TRACE(SD_LOG_IO, un, 19532 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un); 19533 19534 if (un->un_f_check_start_stop && 19535 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) && 19536 (un->un_f_start_stop_supported != TRUE)) { 19537 return (0); 19538 } 19539 19540 /* 19541 * If we are performing an eject operation and 19542 * we receive any command other than SD_TARGET_EJECT 19543 * we should immediately return. 19544 */ 19545 if (flag != SD_TARGET_EJECT) { 19546 mutex_enter(SD_MUTEX(un)); 19547 if (un->un_f_ejecting == TRUE) { 19548 mutex_exit(SD_MUTEX(un)); 19549 return (EAGAIN); 19550 } 19551 mutex_exit(SD_MUTEX(un)); 19552 } 19553 19554 bzero(&cdb, sizeof (cdb)); 19555 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19556 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19557 19558 cdb.scc_cmd = SCMD_START_STOP; 19559 cdb.cdb_opaque[4] = (uchar_t)flag; 19560 19561 ucmd_buf.uscsi_cdb = (char *)&cdb; 19562 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19563 ucmd_buf.uscsi_bufaddr = NULL; 19564 ucmd_buf.uscsi_buflen = 0; 19565 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19566 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19567 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19568 ucmd_buf.uscsi_timeout = 200; 19569 19570 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19571 UIO_SYSSPACE, path_flag); 19572 19573 switch (status) { 19574 case 0: 19575 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19576 break; /* Success! */ 19577 case EIO: 19578 switch (ucmd_buf.uscsi_status) { 19579 case STATUS_RESERVATION_CONFLICT: 19580 status = EACCES; 19581 break; 19582 case STATUS_CHECK: 19583 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) { 19584 switch (scsi_sense_key( 19585 (uint8_t *)&sense_buf)) { 19586 case KEY_ILLEGAL_REQUEST: 19587 status = ENOTSUP; 19588 break; 19589 case KEY_NOT_READY: 19590 if (scsi_sense_asc( 19591 (uint8_t *)&sense_buf) 19592 == 0x3A) { 19593 status = ENXIO; 19594 } 19595 break; 19596 default: 19597 break; 19598 } 19599 } 19600 break; 19601 default: 19602 break; 19603 } 19604 break; 19605 default: 19606 break; 19607 } 19608 19609 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n"); 19610 19611 return (status); 19612 } 19613 19614 19615 /* 19616 * Function: sd_start_stop_unit_callback 19617 * 19618 * Description: timeout(9F) callback to begin recovery process for a 19619 * device that has spun down. 19620 * 19621 * Arguments: arg - pointer to associated softstate struct. 19622 * 19623 * Context: Executes in a timeout(9F) thread context 19624 */ 19625 19626 static void 19627 sd_start_stop_unit_callback(void *arg) 19628 { 19629 struct sd_lun *un = arg; 19630 ASSERT(un != NULL); 19631 ASSERT(!mutex_owned(SD_MUTEX(un))); 19632 19633 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n"); 19634 19635 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP); 19636 } 19637 19638 19639 /* 19640 * Function: sd_start_stop_unit_task 19641 * 19642 * Description: Recovery procedure when a drive is spun down. 19643 * 19644 * Arguments: arg - pointer to associated softstate struct. 19645 * 19646 * Context: Executes in a taskq() thread context 19647 */ 19648 19649 static void 19650 sd_start_stop_unit_task(void *arg) 19651 { 19652 struct sd_lun *un = arg; 19653 sd_ssc_t *ssc; 19654 int rval; 19655 19656 ASSERT(un != NULL); 19657 ASSERT(!mutex_owned(SD_MUTEX(un))); 19658 19659 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n"); 19660 19661 /* 19662 * Some unformatted drives report not ready error, no need to 19663 * restart if format has been initiated. 19664 */ 19665 mutex_enter(SD_MUTEX(un)); 19666 if (un->un_f_format_in_progress == TRUE) { 19667 mutex_exit(SD_MUTEX(un)); 19668 return; 19669 } 19670 mutex_exit(SD_MUTEX(un)); 19671 19672 /* 19673 * When a START STOP command is issued from here, it is part of a 19674 * failure recovery operation and must be issued before any other 19675 * commands, including any pending retries. Thus it must be sent 19676 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up 19677 * succeeds or not, we will start I/O after the attempt. 19678 */ 19679 ssc = sd_ssc_init(un); 19680 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_START, 19681 SD_PATH_DIRECT_PRIORITY); 19682 if (rval != 0) 19683 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19684 sd_ssc_fini(ssc); 19685 /* 19686 * The above call blocks until the START_STOP_UNIT command completes. 19687 * Now that it has completed, we must re-try the original IO that 19688 * received the NOT READY condition in the first place. There are 19689 * three possible conditions here: 19690 * 19691 * (1) The original IO is on un_retry_bp. 19692 * (2) The original IO is on the regular wait queue, and un_retry_bp 19693 * is NULL. 19694 * (3) The original IO is on the regular wait queue, and un_retry_bp 19695 * points to some other, unrelated bp. 19696 * 19697 * For each case, we must call sd_start_cmds() with un_retry_bp 19698 * as the argument. If un_retry_bp is NULL, this will initiate 19699 * processing of the regular wait queue. If un_retry_bp is not NULL, 19700 * then this will process the bp on un_retry_bp. That may or may not 19701 * be the original IO, but that does not matter: the important thing 19702 * is to keep the IO processing going at this point. 19703 * 19704 * Note: This is a very specific error recovery sequence associated 19705 * with a drive that is not spun up. We attempt a START_STOP_UNIT and 19706 * serialize the I/O with completion of the spin-up. 19707 */ 19708 mutex_enter(SD_MUTEX(un)); 19709 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19710 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n", 19711 un, un->un_retry_bp); 19712 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */ 19713 sd_start_cmds(un, un->un_retry_bp); 19714 mutex_exit(SD_MUTEX(un)); 19715 19716 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n"); 19717 } 19718 19719 19720 /* 19721 * Function: sd_send_scsi_INQUIRY 19722 * 19723 * Description: Issue the scsi INQUIRY command. 19724 * 19725 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 19726 * structure for this target. 19727 * bufaddr 19728 * buflen 19729 * evpd 19730 * page_code 19731 * page_length 19732 * 19733 * Return Code: 0 - Success 19734 * errno return code from sd_ssc_send() 19735 * 19736 * Context: Can sleep. Does not return until command is completed. 19737 */ 19738 19739 static int 19740 sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr, size_t buflen, 19741 uchar_t evpd, uchar_t page_code, size_t *residp) 19742 { 19743 union scsi_cdb cdb; 19744 struct uscsi_cmd ucmd_buf; 19745 int status; 19746 struct sd_lun *un; 19747 19748 ASSERT(ssc != NULL); 19749 un = ssc->ssc_un; 19750 ASSERT(un != NULL); 19751 ASSERT(!mutex_owned(SD_MUTEX(un))); 19752 ASSERT(bufaddr != NULL); 19753 19754 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un); 19755 19756 bzero(&cdb, sizeof (cdb)); 19757 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19758 bzero(bufaddr, buflen); 19759 19760 cdb.scc_cmd = SCMD_INQUIRY; 19761 cdb.cdb_opaque[1] = evpd; 19762 cdb.cdb_opaque[2] = page_code; 19763 FORMG0COUNT(&cdb, buflen); 19764 19765 ucmd_buf.uscsi_cdb = (char *)&cdb; 19766 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19767 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19768 ucmd_buf.uscsi_buflen = buflen; 19769 ucmd_buf.uscsi_rqbuf = NULL; 19770 ucmd_buf.uscsi_rqlen = 0; 19771 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 19772 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */ 19773 19774 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19775 UIO_SYSSPACE, SD_PATH_DIRECT); 19776 19777 /* 19778 * Only handle status == 0, the upper-level caller 19779 * will put different assessment based on the context. 19780 */ 19781 if (status == 0) 19782 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19783 19784 if ((status == 0) && (residp != NULL)) { 19785 *residp = ucmd_buf.uscsi_resid; 19786 } 19787 19788 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n"); 19789 19790 return (status); 19791 } 19792 19793 19794 /* 19795 * Function: sd_send_scsi_TEST_UNIT_READY 19796 * 19797 * Description: Issue the scsi TEST UNIT READY command. 19798 * This routine can be told to set the flag USCSI_DIAGNOSE to 19799 * prevent retrying failed commands. Use this when the intent 19800 * is either to check for device readiness, to clear a Unit 19801 * Attention, or to clear any outstanding sense data. 19802 * However under specific conditions the expected behavior 19803 * is for retries to bring a device ready, so use the flag 19804 * with caution. 19805 * 19806 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 19807 * structure for this target. 19808 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present 19809 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE. 19810 * 0: dont check for media present, do retries on cmd. 19811 * 19812 * Return Code: 0 - Success 19813 * EIO - IO error 19814 * EACCES - Reservation conflict detected 19815 * ENXIO - Not Ready, medium not present 19816 * errno return code from sd_ssc_send() 19817 * 19818 * Context: Can sleep. Does not return until command is completed. 19819 */ 19820 19821 static int 19822 sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag) 19823 { 19824 struct scsi_extended_sense sense_buf; 19825 union scsi_cdb cdb; 19826 struct uscsi_cmd ucmd_buf; 19827 int status; 19828 struct sd_lun *un; 19829 19830 ASSERT(ssc != NULL); 19831 un = ssc->ssc_un; 19832 ASSERT(un != NULL); 19833 ASSERT(!mutex_owned(SD_MUTEX(un))); 19834 19835 SD_TRACE(SD_LOG_IO, un, 19836 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un); 19837 19838 /* 19839 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect 19840 * timeouts when they receive a TUR and the queue is not empty. Check 19841 * the configuration flag set during attach (indicating the drive has 19842 * this firmware bug) and un_ncmds_in_transport before issuing the 19843 * TUR. If there are 19844 * pending commands return success, this is a bit arbitrary but is ok 19845 * for non-removables (i.e. the eliteI disks) and non-clustering 19846 * configurations. 19847 */ 19848 if (un->un_f_cfg_tur_check == TRUE) { 19849 mutex_enter(SD_MUTEX(un)); 19850 if (un->un_ncmds_in_transport != 0) { 19851 mutex_exit(SD_MUTEX(un)); 19852 return (0); 19853 } 19854 mutex_exit(SD_MUTEX(un)); 19855 } 19856 19857 bzero(&cdb, sizeof (cdb)); 19858 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19859 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19860 19861 cdb.scc_cmd = SCMD_TEST_UNIT_READY; 19862 19863 ucmd_buf.uscsi_cdb = (char *)&cdb; 19864 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19865 ucmd_buf.uscsi_bufaddr = NULL; 19866 ucmd_buf.uscsi_buflen = 0; 19867 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19868 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19869 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19870 19871 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ 19872 if ((flag & SD_DONT_RETRY_TUR) != 0) { 19873 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; 19874 } 19875 ucmd_buf.uscsi_timeout = 60; 19876 19877 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19878 UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : 19879 SD_PATH_STANDARD)); 19880 19881 switch (status) { 19882 case 0: 19883 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19884 break; /* Success! */ 19885 case EIO: 19886 switch (ucmd_buf.uscsi_status) { 19887 case STATUS_RESERVATION_CONFLICT: 19888 status = EACCES; 19889 break; 19890 case STATUS_CHECK: 19891 if ((flag & SD_CHECK_FOR_MEDIA) == 0) { 19892 break; 19893 } 19894 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19895 (scsi_sense_key((uint8_t *)&sense_buf) == 19896 KEY_NOT_READY) && 19897 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x3A)) { 19898 status = ENXIO; 19899 } 19900 break; 19901 default: 19902 break; 19903 } 19904 break; 19905 default: 19906 break; 19907 } 19908 19909 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n"); 19910 19911 return (status); 19912 } 19913 19914 /* 19915 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN 19916 * 19917 * Description: Issue the scsi PERSISTENT RESERVE IN command. 19918 * 19919 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 19920 * structure for this target. 19921 * 19922 * Return Code: 0 - Success 19923 * EACCES 19924 * ENOTSUP 19925 * errno return code from sd_ssc_send() 19926 * 19927 * Context: Can sleep. Does not return until command is completed. 19928 */ 19929 19930 static int 19931 sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc, uchar_t usr_cmd, 19932 uint16_t data_len, uchar_t *data_bufp) 19933 { 19934 struct scsi_extended_sense sense_buf; 19935 union scsi_cdb cdb; 19936 struct uscsi_cmd ucmd_buf; 19937 int status; 19938 int no_caller_buf = FALSE; 19939 struct sd_lun *un; 19940 19941 ASSERT(ssc != NULL); 19942 un = ssc->ssc_un; 19943 ASSERT(un != NULL); 19944 ASSERT(!mutex_owned(SD_MUTEX(un))); 19945 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV)); 19946 19947 SD_TRACE(SD_LOG_IO, un, 19948 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un); 19949 19950 bzero(&cdb, sizeof (cdb)); 19951 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19952 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19953 if (data_bufp == NULL) { 19954 /* Allocate a default buf if the caller did not give one */ 19955 ASSERT(data_len == 0); 19956 data_len = MHIOC_RESV_KEY_SIZE; 19957 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP); 19958 no_caller_buf = TRUE; 19959 } 19960 19961 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN; 19962 cdb.cdb_opaque[1] = usr_cmd; 19963 FORMG1COUNT(&cdb, data_len); 19964 19965 ucmd_buf.uscsi_cdb = (char *)&cdb; 19966 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19967 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; 19968 ucmd_buf.uscsi_buflen = data_len; 19969 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19970 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19971 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19972 ucmd_buf.uscsi_timeout = 60; 19973 19974 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19975 UIO_SYSSPACE, SD_PATH_STANDARD); 19976 19977 switch (status) { 19978 case 0: 19979 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19980 19981 break; /* Success! */ 19982 case EIO: 19983 switch (ucmd_buf.uscsi_status) { 19984 case STATUS_RESERVATION_CONFLICT: 19985 status = EACCES; 19986 break; 19987 case STATUS_CHECK: 19988 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19989 (scsi_sense_key((uint8_t *)&sense_buf) == 19990 KEY_ILLEGAL_REQUEST)) { 19991 status = ENOTSUP; 19992 } 19993 break; 19994 default: 19995 break; 19996 } 19997 break; 19998 default: 19999 break; 20000 } 20001 20002 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n"); 20003 20004 if (no_caller_buf == TRUE) { 20005 kmem_free(data_bufp, data_len); 20006 } 20007 20008 return (status); 20009 } 20010 20011 20012 /* 20013 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT 20014 * 20015 * Description: This routine is the driver entry point for handling CD-ROM 20016 * multi-host persistent reservation requests (MHIOCGRP_INKEYS, 20017 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the 20018 * device. 20019 * 20020 * Arguments: ssc - ssc contains un - pointer to soft state struct 20021 * for the target. 20022 * usr_cmd SCSI-3 reservation facility command (one of 20023 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE, 20024 * SD_SCSI3_PREEMPTANDABORT) 20025 * usr_bufp - user provided pointer register, reserve descriptor or 20026 * preempt and abort structure (mhioc_register_t, 20027 * mhioc_resv_desc_t, mhioc_preemptandabort_t) 20028 * 20029 * Return Code: 0 - Success 20030 * EACCES 20031 * ENOTSUP 20032 * errno return code from sd_ssc_send() 20033 * 20034 * Context: Can sleep. Does not return until command is completed. 20035 */ 20036 20037 static int 20038 sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, uchar_t usr_cmd, 20039 uchar_t *usr_bufp) 20040 { 20041 struct scsi_extended_sense sense_buf; 20042 union scsi_cdb cdb; 20043 struct uscsi_cmd ucmd_buf; 20044 int status; 20045 uchar_t data_len = sizeof (sd_prout_t); 20046 sd_prout_t *prp; 20047 struct sd_lun *un; 20048 20049 ASSERT(ssc != NULL); 20050 un = ssc->ssc_un; 20051 ASSERT(un != NULL); 20052 ASSERT(!mutex_owned(SD_MUTEX(un))); 20053 ASSERT(data_len == 24); /* required by scsi spec */ 20054 20055 SD_TRACE(SD_LOG_IO, un, 20056 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un); 20057 20058 if (usr_bufp == NULL) { 20059 return (EINVAL); 20060 } 20061 20062 bzero(&cdb, sizeof (cdb)); 20063 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20064 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20065 prp = kmem_zalloc(data_len, KM_SLEEP); 20066 20067 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT; 20068 cdb.cdb_opaque[1] = usr_cmd; 20069 FORMG1COUNT(&cdb, data_len); 20070 20071 ucmd_buf.uscsi_cdb = (char *)&cdb; 20072 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 20073 ucmd_buf.uscsi_bufaddr = (caddr_t)prp; 20074 ucmd_buf.uscsi_buflen = data_len; 20075 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20076 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20077 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 20078 ucmd_buf.uscsi_timeout = 60; 20079 20080 switch (usr_cmd) { 20081 case SD_SCSI3_REGISTER: { 20082 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp; 20083 20084 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 20085 bcopy(ptr->newkey.key, prp->service_key, 20086 MHIOC_RESV_KEY_SIZE); 20087 prp->aptpl = ptr->aptpl; 20088 break; 20089 } 20090 case SD_SCSI3_RESERVE: 20091 case SD_SCSI3_RELEASE: { 20092 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 20093 20094 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 20095 prp->scope_address = BE_32(ptr->scope_specific_addr); 20096 cdb.cdb_opaque[2] = ptr->type; 20097 break; 20098 } 20099 case SD_SCSI3_PREEMPTANDABORT: { 20100 mhioc_preemptandabort_t *ptr = 20101 (mhioc_preemptandabort_t *)usr_bufp; 20102 20103 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 20104 bcopy(ptr->victim_key.key, prp->service_key, 20105 MHIOC_RESV_KEY_SIZE); 20106 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr); 20107 cdb.cdb_opaque[2] = ptr->resvdesc.type; 20108 ucmd_buf.uscsi_flags |= USCSI_HEAD; 20109 break; 20110 } 20111 case SD_SCSI3_REGISTERANDIGNOREKEY: 20112 { 20113 mhioc_registerandignorekey_t *ptr; 20114 ptr = (mhioc_registerandignorekey_t *)usr_bufp; 20115 bcopy(ptr->newkey.key, 20116 prp->service_key, MHIOC_RESV_KEY_SIZE); 20117 prp->aptpl = ptr->aptpl; 20118 break; 20119 } 20120 default: 20121 ASSERT(FALSE); 20122 break; 20123 } 20124 20125 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20126 UIO_SYSSPACE, SD_PATH_STANDARD); 20127 20128 switch (status) { 20129 case 0: 20130 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20131 break; /* Success! */ 20132 case EIO: 20133 switch (ucmd_buf.uscsi_status) { 20134 case STATUS_RESERVATION_CONFLICT: 20135 status = EACCES; 20136 break; 20137 case STATUS_CHECK: 20138 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20139 (scsi_sense_key((uint8_t *)&sense_buf) == 20140 KEY_ILLEGAL_REQUEST)) { 20141 status = ENOTSUP; 20142 } 20143 break; 20144 default: 20145 break; 20146 } 20147 break; 20148 default: 20149 break; 20150 } 20151 20152 kmem_free(prp, data_len); 20153 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n"); 20154 return (status); 20155 } 20156 20157 20158 /* 20159 * Function: sd_send_scsi_SYNCHRONIZE_CACHE 20160 * 20161 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target 20162 * 20163 * Arguments: un - pointer to the target's soft state struct 20164 * dkc - pointer to the callback structure 20165 * 20166 * Return Code: 0 - success 20167 * errno-type error code 20168 * 20169 * Context: kernel thread context only. 20170 * 20171 * _______________________________________________________________ 20172 * | dkc_flag & | dkc_callback | DKIOCFLUSHWRITECACHE | 20173 * |FLUSH_VOLATILE| | operation | 20174 * |______________|______________|_________________________________| 20175 * | 0 | NULL | Synchronous flush on both | 20176 * | | | volatile and non-volatile cache | 20177 * |______________|______________|_________________________________| 20178 * | 1 | NULL | Synchronous flush on volatile | 20179 * | | | cache; disk drivers may suppress| 20180 * | | | flush if disk table indicates | 20181 * | | | non-volatile cache | 20182 * |______________|______________|_________________________________| 20183 * | 0 | !NULL | Asynchronous flush on both | 20184 * | | | volatile and non-volatile cache;| 20185 * |______________|______________|_________________________________| 20186 * | 1 | !NULL | Asynchronous flush on volatile | 20187 * | | | cache; disk drivers may suppress| 20188 * | | | flush if disk table indicates | 20189 * | | | non-volatile cache | 20190 * |______________|______________|_________________________________| 20191 * 20192 */ 20193 20194 static int 20195 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc) 20196 { 20197 struct sd_uscsi_info *uip; 20198 struct uscsi_cmd *uscmd; 20199 union scsi_cdb *cdb; 20200 struct buf *bp; 20201 int rval = 0; 20202 int is_async; 20203 20204 SD_TRACE(SD_LOG_IO, un, 20205 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un); 20206 20207 ASSERT(un != NULL); 20208 ASSERT(!mutex_owned(SD_MUTEX(un))); 20209 20210 if (dkc == NULL || dkc->dkc_callback == NULL) { 20211 is_async = FALSE; 20212 } else { 20213 is_async = TRUE; 20214 } 20215 20216 mutex_enter(SD_MUTEX(un)); 20217 /* check whether cache flush should be suppressed */ 20218 if (un->un_f_suppress_cache_flush == TRUE) { 20219 mutex_exit(SD_MUTEX(un)); 20220 /* 20221 * suppress the cache flush if the device is told to do 20222 * so by sd.conf or disk table 20223 */ 20224 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_SYNCHRONIZE_CACHE: \ 20225 skip the cache flush since suppress_cache_flush is %d!\n", 20226 un->un_f_suppress_cache_flush); 20227 20228 if (is_async == TRUE) { 20229 /* invoke callback for asynchronous flush */ 20230 (*dkc->dkc_callback)(dkc->dkc_cookie, 0); 20231 } 20232 return (rval); 20233 } 20234 mutex_exit(SD_MUTEX(un)); 20235 20236 /* 20237 * check dkc_flag & FLUSH_VOLATILE so SYNC_NV bit can be 20238 * set properly 20239 */ 20240 cdb = kmem_zalloc(CDB_GROUP1, KM_SLEEP); 20241 cdb->scc_cmd = SCMD_SYNCHRONIZE_CACHE; 20242 20243 mutex_enter(SD_MUTEX(un)); 20244 if (dkc != NULL && un->un_f_sync_nv_supported && 20245 (dkc->dkc_flag & FLUSH_VOLATILE)) { 20246 /* 20247 * if the device supports SYNC_NV bit, turn on 20248 * the SYNC_NV bit to only flush volatile cache 20249 */ 20250 cdb->cdb_un.tag |= SD_SYNC_NV_BIT; 20251 } 20252 mutex_exit(SD_MUTEX(un)); 20253 20254 /* 20255 * First get some memory for the uscsi_cmd struct and cdb 20256 * and initialize for SYNCHRONIZE_CACHE cmd. 20257 */ 20258 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 20259 uscmd->uscsi_cdblen = CDB_GROUP1; 20260 uscmd->uscsi_cdb = (caddr_t)cdb; 20261 uscmd->uscsi_bufaddr = NULL; 20262 uscmd->uscsi_buflen = 0; 20263 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 20264 uscmd->uscsi_rqlen = SENSE_LENGTH; 20265 uscmd->uscsi_rqresid = SENSE_LENGTH; 20266 uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 20267 uscmd->uscsi_timeout = sd_io_time; 20268 20269 /* 20270 * Allocate an sd_uscsi_info struct and fill it with the info 20271 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 20272 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 20273 * since we allocate the buf here in this function, we do not 20274 * need to preserve the prior contents of b_private. 20275 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 20276 */ 20277 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 20278 uip->ui_flags = SD_PATH_DIRECT; 20279 uip->ui_cmdp = uscmd; 20280 20281 bp = getrbuf(KM_SLEEP); 20282 bp->b_private = uip; 20283 20284 /* 20285 * Setup buffer to carry uscsi request. 20286 */ 20287 bp->b_flags = B_BUSY; 20288 bp->b_bcount = 0; 20289 bp->b_blkno = 0; 20290 20291 if (is_async == TRUE) { 20292 bp->b_iodone = sd_send_scsi_SYNCHRONIZE_CACHE_biodone; 20293 uip->ui_dkc = *dkc; 20294 } 20295 20296 bp->b_edev = SD_GET_DEV(un); 20297 bp->b_dev = cmpdev(bp->b_edev); /* maybe unnecessary? */ 20298 20299 /* 20300 * Unset un_f_sync_cache_required flag 20301 */ 20302 mutex_enter(SD_MUTEX(un)); 20303 un->un_f_sync_cache_required = FALSE; 20304 mutex_exit(SD_MUTEX(un)); 20305 20306 (void) sd_uscsi_strategy(bp); 20307 20308 /* 20309 * If synchronous request, wait for completion 20310 * If async just return and let b_iodone callback 20311 * cleanup. 20312 * NOTE: On return, u_ncmds_in_driver will be decremented, 20313 * but it was also incremented in sd_uscsi_strategy(), so 20314 * we should be ok. 20315 */ 20316 if (is_async == FALSE) { 20317 (void) biowait(bp); 20318 rval = sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp); 20319 } 20320 20321 return (rval); 20322 } 20323 20324 20325 static int 20326 sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp) 20327 { 20328 struct sd_uscsi_info *uip; 20329 struct uscsi_cmd *uscmd; 20330 uint8_t *sense_buf; 20331 struct sd_lun *un; 20332 int status; 20333 union scsi_cdb *cdb; 20334 20335 uip = (struct sd_uscsi_info *)(bp->b_private); 20336 ASSERT(uip != NULL); 20337 20338 uscmd = uip->ui_cmdp; 20339 ASSERT(uscmd != NULL); 20340 20341 sense_buf = (uint8_t *)uscmd->uscsi_rqbuf; 20342 ASSERT(sense_buf != NULL); 20343 20344 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 20345 ASSERT(un != NULL); 20346 20347 cdb = (union scsi_cdb *)uscmd->uscsi_cdb; 20348 20349 status = geterror(bp); 20350 switch (status) { 20351 case 0: 20352 break; /* Success! */ 20353 case EIO: 20354 switch (uscmd->uscsi_status) { 20355 case STATUS_RESERVATION_CONFLICT: 20356 /* Ignore reservation conflict */ 20357 status = 0; 20358 goto done; 20359 20360 case STATUS_CHECK: 20361 if ((uscmd->uscsi_rqstatus == STATUS_GOOD) && 20362 (scsi_sense_key(sense_buf) == 20363 KEY_ILLEGAL_REQUEST)) { 20364 /* Ignore Illegal Request error */ 20365 if (cdb->cdb_un.tag&SD_SYNC_NV_BIT) { 20366 mutex_enter(SD_MUTEX(un)); 20367 un->un_f_sync_nv_supported = FALSE; 20368 mutex_exit(SD_MUTEX(un)); 20369 status = 0; 20370 SD_TRACE(SD_LOG_IO, un, 20371 "un_f_sync_nv_supported \ 20372 is set to false.\n"); 20373 goto done; 20374 } 20375 20376 mutex_enter(SD_MUTEX(un)); 20377 un->un_f_sync_cache_supported = FALSE; 20378 mutex_exit(SD_MUTEX(un)); 20379 SD_TRACE(SD_LOG_IO, un, 20380 "sd_send_scsi_SYNCHRONIZE_CACHE_biodone: \ 20381 un_f_sync_cache_supported set to false \ 20382 with asc = %x, ascq = %x\n", 20383 scsi_sense_asc(sense_buf), 20384 scsi_sense_ascq(sense_buf)); 20385 status = ENOTSUP; 20386 goto done; 20387 } 20388 break; 20389 default: 20390 break; 20391 } 20392 /* FALLTHRU */ 20393 default: 20394 /* 20395 * Turn on the un_f_sync_cache_required flag 20396 * since the SYNC CACHE command failed 20397 */ 20398 mutex_enter(SD_MUTEX(un)); 20399 un->un_f_sync_cache_required = TRUE; 20400 mutex_exit(SD_MUTEX(un)); 20401 20402 /* 20403 * Don't log an error message if this device 20404 * has removable media. 20405 */ 20406 if (!un->un_f_has_removable_media) { 20407 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 20408 "SYNCHRONIZE CACHE command failed (%d)\n", status); 20409 } 20410 break; 20411 } 20412 20413 done: 20414 if (uip->ui_dkc.dkc_callback != NULL) { 20415 (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status); 20416 } 20417 20418 ASSERT((bp->b_flags & B_REMAPPED) == 0); 20419 freerbuf(bp); 20420 kmem_free(uip, sizeof (struct sd_uscsi_info)); 20421 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); 20422 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen); 20423 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 20424 20425 return (status); 20426 } 20427 20428 20429 /* 20430 * Function: sd_send_scsi_GET_CONFIGURATION 20431 * 20432 * Description: Issues the get configuration command to the device. 20433 * Called from sd_check_for_writable_cd & sd_get_media_info 20434 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN 20435 * Arguments: ssc 20436 * ucmdbuf 20437 * rqbuf 20438 * rqbuflen 20439 * bufaddr 20440 * buflen 20441 * path_flag 20442 * 20443 * Return Code: 0 - Success 20444 * errno return code from sd_ssc_send() 20445 * 20446 * Context: Can sleep. Does not return until command is completed. 20447 * 20448 */ 20449 20450 static int 20451 sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, struct uscsi_cmd *ucmdbuf, 20452 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen, 20453 int path_flag) 20454 { 20455 char cdb[CDB_GROUP1]; 20456 int status; 20457 struct sd_lun *un; 20458 20459 ASSERT(ssc != NULL); 20460 un = ssc->ssc_un; 20461 ASSERT(un != NULL); 20462 ASSERT(!mutex_owned(SD_MUTEX(un))); 20463 ASSERT(bufaddr != NULL); 20464 ASSERT(ucmdbuf != NULL); 20465 ASSERT(rqbuf != NULL); 20466 20467 SD_TRACE(SD_LOG_IO, un, 20468 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un); 20469 20470 bzero(cdb, sizeof (cdb)); 20471 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 20472 bzero(rqbuf, rqbuflen); 20473 bzero(bufaddr, buflen); 20474 20475 /* 20476 * Set up cdb field for the get configuration command. 20477 */ 20478 cdb[0] = SCMD_GET_CONFIGURATION; 20479 cdb[1] = 0x02; /* Requested Type */ 20480 cdb[8] = SD_PROFILE_HEADER_LEN; 20481 ucmdbuf->uscsi_cdb = cdb; 20482 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 20483 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 20484 ucmdbuf->uscsi_buflen = buflen; 20485 ucmdbuf->uscsi_timeout = sd_io_time; 20486 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 20487 ucmdbuf->uscsi_rqlen = rqbuflen; 20488 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 20489 20490 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL, 20491 UIO_SYSSPACE, path_flag); 20492 20493 switch (status) { 20494 case 0: 20495 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20496 break; /* Success! */ 20497 case EIO: 20498 switch (ucmdbuf->uscsi_status) { 20499 case STATUS_RESERVATION_CONFLICT: 20500 status = EACCES; 20501 break; 20502 default: 20503 break; 20504 } 20505 break; 20506 default: 20507 break; 20508 } 20509 20510 if (status == 0) { 20511 SD_DUMP_MEMORY(un, SD_LOG_IO, 20512 "sd_send_scsi_GET_CONFIGURATION: data", 20513 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 20514 } 20515 20516 SD_TRACE(SD_LOG_IO, un, 20517 "sd_send_scsi_GET_CONFIGURATION: exit\n"); 20518 20519 return (status); 20520 } 20521 20522 /* 20523 * Function: sd_send_scsi_feature_GET_CONFIGURATION 20524 * 20525 * Description: Issues the get configuration command to the device to 20526 * retrieve a specific feature. Called from 20527 * sd_check_for_writable_cd & sd_set_mmc_caps. 20528 * Arguments: ssc 20529 * ucmdbuf 20530 * rqbuf 20531 * rqbuflen 20532 * bufaddr 20533 * buflen 20534 * feature 20535 * 20536 * Return Code: 0 - Success 20537 * errno return code from sd_ssc_send() 20538 * 20539 * Context: Can sleep. Does not return until command is completed. 20540 * 20541 */ 20542 static int 20543 sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc, 20544 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 20545 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag) 20546 { 20547 char cdb[CDB_GROUP1]; 20548 int status; 20549 struct sd_lun *un; 20550 20551 ASSERT(ssc != NULL); 20552 un = ssc->ssc_un; 20553 ASSERT(un != NULL); 20554 ASSERT(!mutex_owned(SD_MUTEX(un))); 20555 ASSERT(bufaddr != NULL); 20556 ASSERT(ucmdbuf != NULL); 20557 ASSERT(rqbuf != NULL); 20558 20559 SD_TRACE(SD_LOG_IO, un, 20560 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un); 20561 20562 bzero(cdb, sizeof (cdb)); 20563 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 20564 bzero(rqbuf, rqbuflen); 20565 bzero(bufaddr, buflen); 20566 20567 /* 20568 * Set up cdb field for the get configuration command. 20569 */ 20570 cdb[0] = SCMD_GET_CONFIGURATION; 20571 cdb[1] = 0x02; /* Requested Type */ 20572 cdb[3] = feature; 20573 cdb[8] = buflen; 20574 ucmdbuf->uscsi_cdb = cdb; 20575 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 20576 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 20577 ucmdbuf->uscsi_buflen = buflen; 20578 ucmdbuf->uscsi_timeout = sd_io_time; 20579 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 20580 ucmdbuf->uscsi_rqlen = rqbuflen; 20581 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 20582 20583 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL, 20584 UIO_SYSSPACE, path_flag); 20585 20586 switch (status) { 20587 case 0: 20588 20589 break; /* Success! */ 20590 case EIO: 20591 switch (ucmdbuf->uscsi_status) { 20592 case STATUS_RESERVATION_CONFLICT: 20593 status = EACCES; 20594 break; 20595 default: 20596 break; 20597 } 20598 break; 20599 default: 20600 break; 20601 } 20602 20603 if (status == 0) { 20604 SD_DUMP_MEMORY(un, SD_LOG_IO, 20605 "sd_send_scsi_feature_GET_CONFIGURATION: data", 20606 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 20607 } 20608 20609 SD_TRACE(SD_LOG_IO, un, 20610 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n"); 20611 20612 return (status); 20613 } 20614 20615 20616 /* 20617 * Function: sd_send_scsi_MODE_SENSE 20618 * 20619 * Description: Utility function for issuing a scsi MODE SENSE command. 20620 * Note: This routine uses a consistent implementation for Group0, 20621 * Group1, and Group2 commands across all platforms. ATAPI devices 20622 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 20623 * 20624 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20625 * structure for this target. 20626 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 20627 * CDB_GROUP[1|2] (10 byte). 20628 * bufaddr - buffer for page data retrieved from the target. 20629 * buflen - size of page to be retrieved. 20630 * page_code - page code of data to be retrieved from the target. 20631 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20632 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20633 * to use the USCSI "direct" chain and bypass the normal 20634 * command waitq. 20635 * 20636 * Return Code: 0 - Success 20637 * errno return code from sd_ssc_send() 20638 * 20639 * Context: Can sleep. Does not return until command is completed. 20640 */ 20641 20642 static int 20643 sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr, 20644 size_t buflen, uchar_t page_code, int path_flag) 20645 { 20646 struct scsi_extended_sense sense_buf; 20647 union scsi_cdb cdb; 20648 struct uscsi_cmd ucmd_buf; 20649 int status; 20650 int headlen; 20651 struct sd_lun *un; 20652 20653 ASSERT(ssc != NULL); 20654 un = ssc->ssc_un; 20655 ASSERT(un != NULL); 20656 ASSERT(!mutex_owned(SD_MUTEX(un))); 20657 ASSERT(bufaddr != NULL); 20658 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 20659 (cdbsize == CDB_GROUP2)); 20660 20661 SD_TRACE(SD_LOG_IO, un, 20662 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un); 20663 20664 bzero(&cdb, sizeof (cdb)); 20665 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20666 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20667 bzero(bufaddr, buflen); 20668 20669 if (cdbsize == CDB_GROUP0) { 20670 cdb.scc_cmd = SCMD_MODE_SENSE; 20671 cdb.cdb_opaque[2] = page_code; 20672 FORMG0COUNT(&cdb, buflen); 20673 headlen = MODE_HEADER_LENGTH; 20674 } else { 20675 cdb.scc_cmd = SCMD_MODE_SENSE_G1; 20676 cdb.cdb_opaque[2] = page_code; 20677 FORMG1COUNT(&cdb, buflen); 20678 headlen = MODE_HEADER_LENGTH_GRP2; 20679 } 20680 20681 ASSERT(headlen <= buflen); 20682 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 20683 20684 ucmd_buf.uscsi_cdb = (char *)&cdb; 20685 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 20686 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 20687 ucmd_buf.uscsi_buflen = buflen; 20688 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20689 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20690 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20691 ucmd_buf.uscsi_timeout = 60; 20692 20693 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20694 UIO_SYSSPACE, path_flag); 20695 20696 switch (status) { 20697 case 0: 20698 /* 20699 * sr_check_wp() uses 0x3f page code and check the header of 20700 * mode page to determine if target device is write-protected. 20701 * But some USB devices return 0 bytes for 0x3f page code. For 20702 * this case, make sure that mode page header is returned at 20703 * least. 20704 */ 20705 if (buflen - ucmd_buf.uscsi_resid < headlen) { 20706 status = EIO; 20707 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 20708 "mode page header is not returned"); 20709 } 20710 break; /* Success! */ 20711 case EIO: 20712 switch (ucmd_buf.uscsi_status) { 20713 case STATUS_RESERVATION_CONFLICT: 20714 status = EACCES; 20715 break; 20716 default: 20717 break; 20718 } 20719 break; 20720 default: 20721 break; 20722 } 20723 20724 if (status == 0) { 20725 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data", 20726 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 20727 } 20728 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n"); 20729 20730 return (status); 20731 } 20732 20733 20734 /* 20735 * Function: sd_send_scsi_MODE_SELECT 20736 * 20737 * Description: Utility function for issuing a scsi MODE SELECT command. 20738 * Note: This routine uses a consistent implementation for Group0, 20739 * Group1, and Group2 commands across all platforms. ATAPI devices 20740 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 20741 * 20742 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20743 * structure for this target. 20744 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 20745 * CDB_GROUP[1|2] (10 byte). 20746 * bufaddr - buffer for page data retrieved from the target. 20747 * buflen - size of page to be retrieved. 20748 * save_page - boolean to determin if SP bit should be set. 20749 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20750 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20751 * to use the USCSI "direct" chain and bypass the normal 20752 * command waitq. 20753 * 20754 * Return Code: 0 - Success 20755 * errno return code from sd_ssc_send() 20756 * 20757 * Context: Can sleep. Does not return until command is completed. 20758 */ 20759 20760 static int 20761 sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr, 20762 size_t buflen, uchar_t save_page, int path_flag) 20763 { 20764 struct scsi_extended_sense sense_buf; 20765 union scsi_cdb cdb; 20766 struct uscsi_cmd ucmd_buf; 20767 int status; 20768 struct sd_lun *un; 20769 20770 ASSERT(ssc != NULL); 20771 un = ssc->ssc_un; 20772 ASSERT(un != NULL); 20773 ASSERT(!mutex_owned(SD_MUTEX(un))); 20774 ASSERT(bufaddr != NULL); 20775 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 20776 (cdbsize == CDB_GROUP2)); 20777 20778 SD_TRACE(SD_LOG_IO, un, 20779 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un); 20780 20781 bzero(&cdb, sizeof (cdb)); 20782 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20783 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20784 20785 /* Set the PF bit for many third party drives */ 20786 cdb.cdb_opaque[1] = 0x10; 20787 20788 /* Set the savepage(SP) bit if given */ 20789 if (save_page == SD_SAVE_PAGE) { 20790 cdb.cdb_opaque[1] |= 0x01; 20791 } 20792 20793 if (cdbsize == CDB_GROUP0) { 20794 cdb.scc_cmd = SCMD_MODE_SELECT; 20795 FORMG0COUNT(&cdb, buflen); 20796 } else { 20797 cdb.scc_cmd = SCMD_MODE_SELECT_G1; 20798 FORMG1COUNT(&cdb, buflen); 20799 } 20800 20801 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 20802 20803 ucmd_buf.uscsi_cdb = (char *)&cdb; 20804 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 20805 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 20806 ucmd_buf.uscsi_buflen = buflen; 20807 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20808 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20809 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 20810 ucmd_buf.uscsi_timeout = 60; 20811 20812 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20813 UIO_SYSSPACE, path_flag); 20814 20815 switch (status) { 20816 case 0: 20817 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20818 break; /* Success! */ 20819 case EIO: 20820 switch (ucmd_buf.uscsi_status) { 20821 case STATUS_RESERVATION_CONFLICT: 20822 status = EACCES; 20823 break; 20824 default: 20825 break; 20826 } 20827 break; 20828 default: 20829 break; 20830 } 20831 20832 if (status == 0) { 20833 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data", 20834 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 20835 } 20836 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n"); 20837 20838 return (status); 20839 } 20840 20841 20842 /* 20843 * Function: sd_send_scsi_RDWR 20844 * 20845 * Description: Issue a scsi READ or WRITE command with the given parameters. 20846 * 20847 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20848 * structure for this target. 20849 * cmd: SCMD_READ or SCMD_WRITE 20850 * bufaddr: Address of caller's buffer to receive the RDWR data 20851 * buflen: Length of caller's buffer receive the RDWR data. 20852 * start_block: Block number for the start of the RDWR operation. 20853 * (Assumes target-native block size.) 20854 * residp: Pointer to variable to receive the redisual of the 20855 * RDWR operation (may be NULL of no residual requested). 20856 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20857 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20858 * to use the USCSI "direct" chain and bypass the normal 20859 * command waitq. 20860 * 20861 * Return Code: 0 - Success 20862 * errno return code from sd_ssc_send() 20863 * 20864 * Context: Can sleep. Does not return until command is completed. 20865 */ 20866 20867 static int 20868 sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr, 20869 size_t buflen, daddr_t start_block, int path_flag) 20870 { 20871 struct scsi_extended_sense sense_buf; 20872 union scsi_cdb cdb; 20873 struct uscsi_cmd ucmd_buf; 20874 uint32_t block_count; 20875 int status; 20876 int cdbsize; 20877 uchar_t flag; 20878 struct sd_lun *un; 20879 20880 ASSERT(ssc != NULL); 20881 un = ssc->ssc_un; 20882 ASSERT(un != NULL); 20883 ASSERT(!mutex_owned(SD_MUTEX(un))); 20884 ASSERT(bufaddr != NULL); 20885 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE)); 20886 20887 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un); 20888 20889 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 20890 return (EINVAL); 20891 } 20892 20893 mutex_enter(SD_MUTEX(un)); 20894 block_count = SD_BYTES2TGTBLOCKS(un, buflen); 20895 mutex_exit(SD_MUTEX(un)); 20896 20897 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE; 20898 20899 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: " 20900 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n", 20901 bufaddr, buflen, start_block, block_count); 20902 20903 bzero(&cdb, sizeof (cdb)); 20904 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20905 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20906 20907 /* Compute CDB size to use */ 20908 if (start_block > 0xffffffff) 20909 cdbsize = CDB_GROUP4; 20910 else if ((start_block & 0xFFE00000) || 20911 (un->un_f_cfg_is_atapi == TRUE)) 20912 cdbsize = CDB_GROUP1; 20913 else 20914 cdbsize = CDB_GROUP0; 20915 20916 switch (cdbsize) { 20917 case CDB_GROUP0: /* 6-byte CDBs */ 20918 cdb.scc_cmd = cmd; 20919 FORMG0ADDR(&cdb, start_block); 20920 FORMG0COUNT(&cdb, block_count); 20921 break; 20922 case CDB_GROUP1: /* 10-byte CDBs */ 20923 cdb.scc_cmd = cmd | SCMD_GROUP1; 20924 FORMG1ADDR(&cdb, start_block); 20925 FORMG1COUNT(&cdb, block_count); 20926 break; 20927 case CDB_GROUP4: /* 16-byte CDBs */ 20928 cdb.scc_cmd = cmd | SCMD_GROUP4; 20929 FORMG4LONGADDR(&cdb, (uint64_t)start_block); 20930 FORMG4COUNT(&cdb, block_count); 20931 break; 20932 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */ 20933 default: 20934 /* All others reserved */ 20935 return (EINVAL); 20936 } 20937 20938 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */ 20939 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 20940 20941 ucmd_buf.uscsi_cdb = (char *)&cdb; 20942 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 20943 ucmd_buf.uscsi_bufaddr = bufaddr; 20944 ucmd_buf.uscsi_buflen = buflen; 20945 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20946 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20947 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; 20948 ucmd_buf.uscsi_timeout = 60; 20949 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20950 UIO_SYSSPACE, path_flag); 20951 20952 switch (status) { 20953 case 0: 20954 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20955 break; /* Success! */ 20956 case EIO: 20957 switch (ucmd_buf.uscsi_status) { 20958 case STATUS_RESERVATION_CONFLICT: 20959 status = EACCES; 20960 break; 20961 default: 20962 break; 20963 } 20964 break; 20965 default: 20966 break; 20967 } 20968 20969 if (status == 0) { 20970 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data", 20971 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 20972 } 20973 20974 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n"); 20975 20976 return (status); 20977 } 20978 20979 20980 /* 20981 * Function: sd_send_scsi_LOG_SENSE 20982 * 20983 * Description: Issue a scsi LOG_SENSE command with the given parameters. 20984 * 20985 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20986 * structure for this target. 20987 * 20988 * Return Code: 0 - Success 20989 * errno return code from sd_ssc_send() 20990 * 20991 * Context: Can sleep. Does not return until command is completed. 20992 */ 20993 20994 static int 20995 sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr, uint16_t buflen, 20996 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, 20997 int path_flag) 20998 20999 { 21000 struct scsi_extended_sense sense_buf; 21001 union scsi_cdb cdb; 21002 struct uscsi_cmd ucmd_buf; 21003 int status; 21004 struct sd_lun *un; 21005 21006 ASSERT(ssc != NULL); 21007 un = ssc->ssc_un; 21008 ASSERT(un != NULL); 21009 ASSERT(!mutex_owned(SD_MUTEX(un))); 21010 21011 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un); 21012 21013 bzero(&cdb, sizeof (cdb)); 21014 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21015 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21016 21017 cdb.scc_cmd = SCMD_LOG_SENSE_G1; 21018 cdb.cdb_opaque[2] = (page_control << 6) | page_code; 21019 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8); 21020 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF); 21021 FORMG1COUNT(&cdb, buflen); 21022 21023 ucmd_buf.uscsi_cdb = (char *)&cdb; 21024 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 21025 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 21026 ucmd_buf.uscsi_buflen = buflen; 21027 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21028 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21029 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 21030 ucmd_buf.uscsi_timeout = 60; 21031 21032 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21033 UIO_SYSSPACE, path_flag); 21034 21035 switch (status) { 21036 case 0: 21037 break; 21038 case EIO: 21039 switch (ucmd_buf.uscsi_status) { 21040 case STATUS_RESERVATION_CONFLICT: 21041 status = EACCES; 21042 break; 21043 case STATUS_CHECK: 21044 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 21045 (scsi_sense_key((uint8_t *)&sense_buf) == 21046 KEY_ILLEGAL_REQUEST) && 21047 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x24)) { 21048 /* 21049 * ASC 0x24: INVALID FIELD IN CDB 21050 */ 21051 switch (page_code) { 21052 case START_STOP_CYCLE_PAGE: 21053 /* 21054 * The start stop cycle counter is 21055 * implemented as page 0x31 in earlier 21056 * generation disks. In new generation 21057 * disks the start stop cycle counter is 21058 * implemented as page 0xE. To properly 21059 * handle this case if an attempt for 21060 * log page 0xE is made and fails we 21061 * will try again using page 0x31. 21062 * 21063 * Network storage BU committed to 21064 * maintain the page 0x31 for this 21065 * purpose and will not have any other 21066 * page implemented with page code 0x31 21067 * until all disks transition to the 21068 * standard page. 21069 */ 21070 mutex_enter(SD_MUTEX(un)); 21071 un->un_start_stop_cycle_page = 21072 START_STOP_CYCLE_VU_PAGE; 21073 cdb.cdb_opaque[2] = 21074 (char)(page_control << 6) | 21075 un->un_start_stop_cycle_page; 21076 mutex_exit(SD_MUTEX(un)); 21077 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 21078 status = sd_ssc_send( 21079 ssc, &ucmd_buf, FKIOCTL, 21080 UIO_SYSSPACE, path_flag); 21081 21082 break; 21083 case TEMPERATURE_PAGE: 21084 status = ENOTTY; 21085 break; 21086 default: 21087 break; 21088 } 21089 } 21090 break; 21091 default: 21092 break; 21093 } 21094 break; 21095 default: 21096 break; 21097 } 21098 21099 if (status == 0) { 21100 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21101 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data", 21102 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21103 } 21104 21105 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n"); 21106 21107 return (status); 21108 } 21109 21110 21111 /* 21112 * Function: sdioctl 21113 * 21114 * Description: Driver's ioctl(9e) entry point function. 21115 * 21116 * Arguments: dev - device number 21117 * cmd - ioctl operation to be performed 21118 * arg - user argument, contains data to be set or reference 21119 * parameter for get 21120 * flag - bit flag, indicating open settings, 32/64 bit type 21121 * cred_p - user credential pointer 21122 * rval_p - calling process return value (OPT) 21123 * 21124 * Return Code: EINVAL 21125 * ENOTTY 21126 * ENXIO 21127 * EIO 21128 * EFAULT 21129 * ENOTSUP 21130 * EPERM 21131 * 21132 * Context: Called from the device switch at normal priority. 21133 */ 21134 21135 static int 21136 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) 21137 { 21138 struct sd_lun *un = NULL; 21139 int err = 0; 21140 int i = 0; 21141 cred_t *cr; 21142 int tmprval = EINVAL; 21143 int is_valid; 21144 sd_ssc_t *ssc; 21145 21146 /* 21147 * All device accesses go thru sdstrategy where we check on suspend 21148 * status 21149 */ 21150 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21151 return (ENXIO); 21152 } 21153 21154 ASSERT(!mutex_owned(SD_MUTEX(un))); 21155 21156 /* Initialize sd_ssc_t for internal uscsi commands */ 21157 ssc = sd_ssc_init(un); 21158 21159 is_valid = SD_IS_VALID_LABEL(un); 21160 21161 /* 21162 * Moved this wait from sd_uscsi_strategy to here for 21163 * reasons of deadlock prevention. Internal driver commands, 21164 * specifically those to change a devices power level, result 21165 * in a call to sd_uscsi_strategy. 21166 */ 21167 mutex_enter(SD_MUTEX(un)); 21168 while ((un->un_state == SD_STATE_SUSPENDED) || 21169 (un->un_state == SD_STATE_PM_CHANGING)) { 21170 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 21171 } 21172 /* 21173 * Twiddling the counter here protects commands from now 21174 * through to the top of sd_uscsi_strategy. Without the 21175 * counter inc. a power down, for example, could get in 21176 * after the above check for state is made and before 21177 * execution gets to the top of sd_uscsi_strategy. 21178 * That would cause problems. 21179 */ 21180 un->un_ncmds_in_driver++; 21181 21182 if (!is_valid && 21183 (flag & (FNDELAY | FNONBLOCK))) { 21184 switch (cmd) { 21185 case DKIOCGGEOM: /* SD_PATH_DIRECT */ 21186 case DKIOCGVTOC: 21187 case DKIOCGEXTVTOC: 21188 case DKIOCGAPART: 21189 case DKIOCPARTINFO: 21190 case DKIOCEXTPARTINFO: 21191 case DKIOCSGEOM: 21192 case DKIOCSAPART: 21193 case DKIOCGETEFI: 21194 case DKIOCPARTITION: 21195 case DKIOCSVTOC: 21196 case DKIOCSEXTVTOC: 21197 case DKIOCSETEFI: 21198 case DKIOCGMBOOT: 21199 case DKIOCSMBOOT: 21200 case DKIOCG_PHYGEOM: 21201 case DKIOCG_VIRTGEOM: 21202 /* let cmlb handle it */ 21203 goto skip_ready_valid; 21204 21205 case CDROMPAUSE: 21206 case CDROMRESUME: 21207 case CDROMPLAYMSF: 21208 case CDROMPLAYTRKIND: 21209 case CDROMREADTOCHDR: 21210 case CDROMREADTOCENTRY: 21211 case CDROMSTOP: 21212 case CDROMSTART: 21213 case CDROMVOLCTRL: 21214 case CDROMSUBCHNL: 21215 case CDROMREADMODE2: 21216 case CDROMREADMODE1: 21217 case CDROMREADOFFSET: 21218 case CDROMSBLKMODE: 21219 case CDROMGBLKMODE: 21220 case CDROMGDRVSPEED: 21221 case CDROMSDRVSPEED: 21222 case CDROMCDDA: 21223 case CDROMCDXA: 21224 case CDROMSUBCODE: 21225 if (!ISCD(un)) { 21226 un->un_ncmds_in_driver--; 21227 ASSERT(un->un_ncmds_in_driver >= 0); 21228 mutex_exit(SD_MUTEX(un)); 21229 err = ENOTTY; 21230 goto done_without_assess; 21231 } 21232 break; 21233 case FDEJECT: 21234 case DKIOCEJECT: 21235 case CDROMEJECT: 21236 if (!un->un_f_eject_media_supported) { 21237 un->un_ncmds_in_driver--; 21238 ASSERT(un->un_ncmds_in_driver >= 0); 21239 mutex_exit(SD_MUTEX(un)); 21240 err = ENOTTY; 21241 goto done_without_assess; 21242 } 21243 break; 21244 case DKIOCFLUSHWRITECACHE: 21245 mutex_exit(SD_MUTEX(un)); 21246 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 21247 if (err != 0) { 21248 mutex_enter(SD_MUTEX(un)); 21249 un->un_ncmds_in_driver--; 21250 ASSERT(un->un_ncmds_in_driver >= 0); 21251 mutex_exit(SD_MUTEX(un)); 21252 err = EIO; 21253 goto done_quick_assess; 21254 } 21255 mutex_enter(SD_MUTEX(un)); 21256 /* FALLTHROUGH */ 21257 case DKIOCREMOVABLE: 21258 case DKIOCHOTPLUGGABLE: 21259 case DKIOCINFO: 21260 case DKIOCGMEDIAINFO: 21261 case MHIOCENFAILFAST: 21262 case MHIOCSTATUS: 21263 case MHIOCTKOWN: 21264 case MHIOCRELEASE: 21265 case MHIOCGRP_INKEYS: 21266 case MHIOCGRP_INRESV: 21267 case MHIOCGRP_REGISTER: 21268 case MHIOCGRP_RESERVE: 21269 case MHIOCGRP_PREEMPTANDABORT: 21270 case MHIOCGRP_REGISTERANDIGNOREKEY: 21271 case CDROMCLOSETRAY: 21272 case USCSICMD: 21273 goto skip_ready_valid; 21274 default: 21275 break; 21276 } 21277 21278 mutex_exit(SD_MUTEX(un)); 21279 err = sd_ready_and_valid(ssc, SDPART(dev)); 21280 mutex_enter(SD_MUTEX(un)); 21281 21282 if (err != SD_READY_VALID) { 21283 switch (cmd) { 21284 case DKIOCSTATE: 21285 case CDROMGDRVSPEED: 21286 case CDROMSDRVSPEED: 21287 case FDEJECT: /* for eject command */ 21288 case DKIOCEJECT: 21289 case CDROMEJECT: 21290 case DKIOCREMOVABLE: 21291 case DKIOCHOTPLUGGABLE: 21292 break; 21293 default: 21294 if (un->un_f_has_removable_media) { 21295 err = ENXIO; 21296 } else { 21297 /* Do not map SD_RESERVED_BY_OTHERS to EIO */ 21298 if (err == SD_RESERVED_BY_OTHERS) { 21299 err = EACCES; 21300 } else { 21301 err = EIO; 21302 } 21303 } 21304 un->un_ncmds_in_driver--; 21305 ASSERT(un->un_ncmds_in_driver >= 0); 21306 mutex_exit(SD_MUTEX(un)); 21307 21308 goto done_without_assess; 21309 } 21310 } 21311 } 21312 21313 skip_ready_valid: 21314 mutex_exit(SD_MUTEX(un)); 21315 21316 switch (cmd) { 21317 case DKIOCINFO: 21318 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n"); 21319 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag); 21320 break; 21321 21322 case DKIOCGMEDIAINFO: 21323 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n"); 21324 err = sd_get_media_info(dev, (caddr_t)arg, flag); 21325 break; 21326 21327 case DKIOCGGEOM: 21328 case DKIOCGVTOC: 21329 case DKIOCGEXTVTOC: 21330 case DKIOCGAPART: 21331 case DKIOCPARTINFO: 21332 case DKIOCEXTPARTINFO: 21333 case DKIOCSGEOM: 21334 case DKIOCSAPART: 21335 case DKIOCGETEFI: 21336 case DKIOCPARTITION: 21337 case DKIOCSVTOC: 21338 case DKIOCSEXTVTOC: 21339 case DKIOCSETEFI: 21340 case DKIOCGMBOOT: 21341 case DKIOCSMBOOT: 21342 case DKIOCG_PHYGEOM: 21343 case DKIOCG_VIRTGEOM: 21344 SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd); 21345 21346 /* TUR should spin up */ 21347 21348 if (un->un_f_has_removable_media) 21349 err = sd_send_scsi_TEST_UNIT_READY(ssc, 21350 SD_CHECK_FOR_MEDIA); 21351 21352 else 21353 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 21354 21355 if (err != 0) 21356 goto done_with_assess; 21357 21358 err = cmlb_ioctl(un->un_cmlbhandle, dev, 21359 cmd, arg, flag, cred_p, rval_p, (void *)SD_PATH_DIRECT); 21360 21361 if ((err == 0) && 21362 ((cmd == DKIOCSETEFI) || 21363 (un->un_f_pkstats_enabled) && 21364 (cmd == DKIOCSAPART || cmd == DKIOCSVTOC))) { 21365 21366 tmprval = cmlb_validate(un->un_cmlbhandle, CMLB_SILENT, 21367 (void *)SD_PATH_DIRECT); 21368 if ((tmprval == 0) && un->un_f_pkstats_enabled) { 21369 sd_set_pstats(un); 21370 SD_TRACE(SD_LOG_IO_PARTITION, un, 21371 "sd_ioctl: un:0x%p pstats created and " 21372 "set\n", un); 21373 } 21374 } 21375 21376 if ((cmd == DKIOCSVTOC) || 21377 ((cmd == DKIOCSETEFI) && (tmprval == 0))) { 21378 21379 mutex_enter(SD_MUTEX(un)); 21380 if (un->un_f_devid_supported && 21381 (un->un_f_opt_fab_devid == TRUE)) { 21382 if (un->un_devid == NULL) { 21383 sd_register_devid(ssc, SD_DEVINFO(un), 21384 SD_TARGET_IS_UNRESERVED); 21385 } else { 21386 /* 21387 * The device id for this disk 21388 * has been fabricated. The 21389 * device id must be preserved 21390 * by writing it back out to 21391 * disk. 21392 */ 21393 if (sd_write_deviceid(ssc) != 0) { 21394 ddi_devid_free(un->un_devid); 21395 un->un_devid = NULL; 21396 } 21397 } 21398 } 21399 mutex_exit(SD_MUTEX(un)); 21400 } 21401 21402 break; 21403 21404 case DKIOCLOCK: 21405 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n"); 21406 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 21407 SD_PATH_STANDARD); 21408 goto done_with_assess; 21409 21410 case DKIOCUNLOCK: 21411 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n"); 21412 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW, 21413 SD_PATH_STANDARD); 21414 goto done_with_assess; 21415 21416 case DKIOCSTATE: { 21417 enum dkio_state state; 21418 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n"); 21419 21420 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) { 21421 err = EFAULT; 21422 } else { 21423 err = sd_check_media(dev, state); 21424 if (err == 0) { 21425 if (ddi_copyout(&un->un_mediastate, (void *)arg, 21426 sizeof (int), flag) != 0) 21427 err = EFAULT; 21428 } 21429 } 21430 break; 21431 } 21432 21433 case DKIOCREMOVABLE: 21434 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n"); 21435 i = un->un_f_has_removable_media ? 1 : 0; 21436 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 21437 err = EFAULT; 21438 } else { 21439 err = 0; 21440 } 21441 break; 21442 21443 case DKIOCHOTPLUGGABLE: 21444 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCHOTPLUGGABLE\n"); 21445 i = un->un_f_is_hotpluggable ? 1 : 0; 21446 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 21447 err = EFAULT; 21448 } else { 21449 err = 0; 21450 } 21451 break; 21452 21453 case DKIOCGTEMPERATURE: 21454 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n"); 21455 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag); 21456 break; 21457 21458 case MHIOCENFAILFAST: 21459 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n"); 21460 if ((err = drv_priv(cred_p)) == 0) { 21461 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag); 21462 } 21463 break; 21464 21465 case MHIOCTKOWN: 21466 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n"); 21467 if ((err = drv_priv(cred_p)) == 0) { 21468 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag); 21469 } 21470 break; 21471 21472 case MHIOCRELEASE: 21473 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n"); 21474 if ((err = drv_priv(cred_p)) == 0) { 21475 err = sd_mhdioc_release(dev); 21476 } 21477 break; 21478 21479 case MHIOCSTATUS: 21480 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n"); 21481 if ((err = drv_priv(cred_p)) == 0) { 21482 switch (sd_send_scsi_TEST_UNIT_READY(ssc, 0)) { 21483 case 0: 21484 err = 0; 21485 break; 21486 case EACCES: 21487 *rval_p = 1; 21488 err = 0; 21489 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 21490 break; 21491 default: 21492 err = EIO; 21493 goto done_with_assess; 21494 } 21495 } 21496 break; 21497 21498 case MHIOCQRESERVE: 21499 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n"); 21500 if ((err = drv_priv(cred_p)) == 0) { 21501 err = sd_reserve_release(dev, SD_RESERVE); 21502 } 21503 break; 21504 21505 case MHIOCREREGISTERDEVID: 21506 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n"); 21507 if (drv_priv(cred_p) == EPERM) { 21508 err = EPERM; 21509 } else if (!un->un_f_devid_supported) { 21510 err = ENOTTY; 21511 } else { 21512 err = sd_mhdioc_register_devid(dev); 21513 } 21514 break; 21515 21516 case MHIOCGRP_INKEYS: 21517 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n"); 21518 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 21519 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21520 err = ENOTSUP; 21521 } else { 21522 err = sd_mhdioc_inkeys(dev, (caddr_t)arg, 21523 flag); 21524 } 21525 } 21526 break; 21527 21528 case MHIOCGRP_INRESV: 21529 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n"); 21530 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 21531 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21532 err = ENOTSUP; 21533 } else { 21534 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag); 21535 } 21536 } 21537 break; 21538 21539 case MHIOCGRP_REGISTER: 21540 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n"); 21541 if ((err = drv_priv(cred_p)) != EPERM) { 21542 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21543 err = ENOTSUP; 21544 } else if (arg != NULL) { 21545 mhioc_register_t reg; 21546 if (ddi_copyin((void *)arg, ®, 21547 sizeof (mhioc_register_t), flag) != 0) { 21548 err = EFAULT; 21549 } else { 21550 err = 21551 sd_send_scsi_PERSISTENT_RESERVE_OUT( 21552 ssc, SD_SCSI3_REGISTER, 21553 (uchar_t *)®); 21554 if (err != 0) 21555 goto done_with_assess; 21556 } 21557 } 21558 } 21559 break; 21560 21561 case MHIOCGRP_RESERVE: 21562 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n"); 21563 if ((err = drv_priv(cred_p)) != EPERM) { 21564 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21565 err = ENOTSUP; 21566 } else if (arg != NULL) { 21567 mhioc_resv_desc_t resv_desc; 21568 if (ddi_copyin((void *)arg, &resv_desc, 21569 sizeof (mhioc_resv_desc_t), flag) != 0) { 21570 err = EFAULT; 21571 } else { 21572 err = 21573 sd_send_scsi_PERSISTENT_RESERVE_OUT( 21574 ssc, SD_SCSI3_RESERVE, 21575 (uchar_t *)&resv_desc); 21576 if (err != 0) 21577 goto done_with_assess; 21578 } 21579 } 21580 } 21581 break; 21582 21583 case MHIOCGRP_PREEMPTANDABORT: 21584 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 21585 if ((err = drv_priv(cred_p)) != EPERM) { 21586 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21587 err = ENOTSUP; 21588 } else if (arg != NULL) { 21589 mhioc_preemptandabort_t preempt_abort; 21590 if (ddi_copyin((void *)arg, &preempt_abort, 21591 sizeof (mhioc_preemptandabort_t), 21592 flag) != 0) { 21593 err = EFAULT; 21594 } else { 21595 err = 21596 sd_send_scsi_PERSISTENT_RESERVE_OUT( 21597 ssc, SD_SCSI3_PREEMPTANDABORT, 21598 (uchar_t *)&preempt_abort); 21599 if (err != 0) 21600 goto done_with_assess; 21601 } 21602 } 21603 } 21604 break; 21605 21606 case MHIOCGRP_REGISTERANDIGNOREKEY: 21607 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTERANDIGNOREKEY\n"); 21608 if ((err = drv_priv(cred_p)) != EPERM) { 21609 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21610 err = ENOTSUP; 21611 } else if (arg != NULL) { 21612 mhioc_registerandignorekey_t r_and_i; 21613 if (ddi_copyin((void *)arg, (void *)&r_and_i, 21614 sizeof (mhioc_registerandignorekey_t), 21615 flag) != 0) { 21616 err = EFAULT; 21617 } else { 21618 err = 21619 sd_send_scsi_PERSISTENT_RESERVE_OUT( 21620 ssc, SD_SCSI3_REGISTERANDIGNOREKEY, 21621 (uchar_t *)&r_and_i); 21622 if (err != 0) 21623 goto done_with_assess; 21624 } 21625 } 21626 } 21627 break; 21628 21629 case USCSICMD: 21630 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n"); 21631 cr = ddi_get_cred(); 21632 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 21633 err = EPERM; 21634 } else { 21635 enum uio_seg uioseg; 21636 21637 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE : 21638 UIO_USERSPACE; 21639 if (un->un_f_format_in_progress == TRUE) { 21640 err = EAGAIN; 21641 break; 21642 } 21643 21644 err = sd_ssc_send(ssc, 21645 (struct uscsi_cmd *)arg, 21646 flag, uioseg, SD_PATH_STANDARD); 21647 if (err != 0) 21648 goto done_with_assess; 21649 else 21650 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21651 } 21652 break; 21653 21654 case CDROMPAUSE: 21655 case CDROMRESUME: 21656 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n"); 21657 if (!ISCD(un)) { 21658 err = ENOTTY; 21659 } else { 21660 err = sr_pause_resume(dev, cmd); 21661 } 21662 break; 21663 21664 case CDROMPLAYMSF: 21665 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n"); 21666 if (!ISCD(un)) { 21667 err = ENOTTY; 21668 } else { 21669 err = sr_play_msf(dev, (caddr_t)arg, flag); 21670 } 21671 break; 21672 21673 case CDROMPLAYTRKIND: 21674 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n"); 21675 #if defined(__i386) || defined(__amd64) 21676 /* 21677 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead 21678 */ 21679 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 21680 #else 21681 if (!ISCD(un)) { 21682 #endif 21683 err = ENOTTY; 21684 } else { 21685 err = sr_play_trkind(dev, (caddr_t)arg, flag); 21686 } 21687 break; 21688 21689 case CDROMREADTOCHDR: 21690 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n"); 21691 if (!ISCD(un)) { 21692 err = ENOTTY; 21693 } else { 21694 err = sr_read_tochdr(dev, (caddr_t)arg, flag); 21695 } 21696 break; 21697 21698 case CDROMREADTOCENTRY: 21699 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n"); 21700 if (!ISCD(un)) { 21701 err = ENOTTY; 21702 } else { 21703 err = sr_read_tocentry(dev, (caddr_t)arg, flag); 21704 } 21705 break; 21706 21707 case CDROMSTOP: 21708 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n"); 21709 if (!ISCD(un)) { 21710 err = ENOTTY; 21711 } else { 21712 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_STOP, 21713 SD_PATH_STANDARD); 21714 goto done_with_assess; 21715 } 21716 break; 21717 21718 case CDROMSTART: 21719 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n"); 21720 if (!ISCD(un)) { 21721 err = ENOTTY; 21722 } else { 21723 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_START, 21724 SD_PATH_STANDARD); 21725 goto done_with_assess; 21726 } 21727 break; 21728 21729 case CDROMCLOSETRAY: 21730 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n"); 21731 if (!ISCD(un)) { 21732 err = ENOTTY; 21733 } else { 21734 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_CLOSE, 21735 SD_PATH_STANDARD); 21736 goto done_with_assess; 21737 } 21738 break; 21739 21740 case FDEJECT: /* for eject command */ 21741 case DKIOCEJECT: 21742 case CDROMEJECT: 21743 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n"); 21744 if (!un->un_f_eject_media_supported) { 21745 err = ENOTTY; 21746 } else { 21747 err = sr_eject(dev); 21748 } 21749 break; 21750 21751 case CDROMVOLCTRL: 21752 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n"); 21753 if (!ISCD(un)) { 21754 err = ENOTTY; 21755 } else { 21756 err = sr_volume_ctrl(dev, (caddr_t)arg, flag); 21757 } 21758 break; 21759 21760 case CDROMSUBCHNL: 21761 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n"); 21762 if (!ISCD(un)) { 21763 err = ENOTTY; 21764 } else { 21765 err = sr_read_subchannel(dev, (caddr_t)arg, flag); 21766 } 21767 break; 21768 21769 case CDROMREADMODE2: 21770 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n"); 21771 if (!ISCD(un)) { 21772 err = ENOTTY; 21773 } else if (un->un_f_cfg_is_atapi == TRUE) { 21774 /* 21775 * If the drive supports READ CD, use that instead of 21776 * switching the LBA size via a MODE SELECT 21777 * Block Descriptor 21778 */ 21779 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag); 21780 } else { 21781 err = sr_read_mode2(dev, (caddr_t)arg, flag); 21782 } 21783 break; 21784 21785 case CDROMREADMODE1: 21786 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n"); 21787 if (!ISCD(un)) { 21788 err = ENOTTY; 21789 } else { 21790 err = sr_read_mode1(dev, (caddr_t)arg, flag); 21791 } 21792 break; 21793 21794 case CDROMREADOFFSET: 21795 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n"); 21796 if (!ISCD(un)) { 21797 err = ENOTTY; 21798 } else { 21799 err = sr_read_sony_session_offset(dev, (caddr_t)arg, 21800 flag); 21801 } 21802 break; 21803 21804 case CDROMSBLKMODE: 21805 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n"); 21806 /* 21807 * There is no means of changing block size in case of atapi 21808 * drives, thus return ENOTTY if drive type is atapi 21809 */ 21810 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 21811 err = ENOTTY; 21812 } else if (un->un_f_mmc_cap == TRUE) { 21813 21814 /* 21815 * MMC Devices do not support changing the 21816 * logical block size 21817 * 21818 * Note: EINVAL is being returned instead of ENOTTY to 21819 * maintain consistancy with the original mmc 21820 * driver update. 21821 */ 21822 err = EINVAL; 21823 } else { 21824 mutex_enter(SD_MUTEX(un)); 21825 if ((!(un->un_exclopen & (1<<SDPART(dev)))) || 21826 (un->un_ncmds_in_transport > 0)) { 21827 mutex_exit(SD_MUTEX(un)); 21828 err = EINVAL; 21829 } else { 21830 mutex_exit(SD_MUTEX(un)); 21831 err = sr_change_blkmode(dev, cmd, arg, flag); 21832 } 21833 } 21834 break; 21835 21836 case CDROMGBLKMODE: 21837 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n"); 21838 if (!ISCD(un)) { 21839 err = ENOTTY; 21840 } else if ((un->un_f_cfg_is_atapi != FALSE) && 21841 (un->un_f_blockcount_is_valid != FALSE)) { 21842 /* 21843 * Drive is an ATAPI drive so return target block 21844 * size for ATAPI drives since we cannot change the 21845 * blocksize on ATAPI drives. Used primarily to detect 21846 * if an ATAPI cdrom is present. 21847 */ 21848 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg, 21849 sizeof (int), flag) != 0) { 21850 err = EFAULT; 21851 } else { 21852 err = 0; 21853 } 21854 21855 } else { 21856 /* 21857 * Drive supports changing block sizes via a Mode 21858 * Select. 21859 */ 21860 err = sr_change_blkmode(dev, cmd, arg, flag); 21861 } 21862 break; 21863 21864 case CDROMGDRVSPEED: 21865 case CDROMSDRVSPEED: 21866 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n"); 21867 if (!ISCD(un)) { 21868 err = ENOTTY; 21869 } else if (un->un_f_mmc_cap == TRUE) { 21870 /* 21871 * Note: In the future the driver implementation 21872 * for getting and 21873 * setting cd speed should entail: 21874 * 1) If non-mmc try the Toshiba mode page 21875 * (sr_change_speed) 21876 * 2) If mmc but no support for Real Time Streaming try 21877 * the SET CD SPEED (0xBB) command 21878 * (sr_atapi_change_speed) 21879 * 3) If mmc and support for Real Time Streaming 21880 * try the GET PERFORMANCE and SET STREAMING 21881 * commands (not yet implemented, 4380808) 21882 */ 21883 /* 21884 * As per recent MMC spec, CD-ROM speed is variable 21885 * and changes with LBA. Since there is no such 21886 * things as drive speed now, fail this ioctl. 21887 * 21888 * Note: EINVAL is returned for consistancy of original 21889 * implementation which included support for getting 21890 * the drive speed of mmc devices but not setting 21891 * the drive speed. Thus EINVAL would be returned 21892 * if a set request was made for an mmc device. 21893 * We no longer support get or set speed for 21894 * mmc but need to remain consistent with regard 21895 * to the error code returned. 21896 */ 21897 err = EINVAL; 21898 } else if (un->un_f_cfg_is_atapi == TRUE) { 21899 err = sr_atapi_change_speed(dev, cmd, arg, flag); 21900 } else { 21901 err = sr_change_speed(dev, cmd, arg, flag); 21902 } 21903 break; 21904 21905 case CDROMCDDA: 21906 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n"); 21907 if (!ISCD(un)) { 21908 err = ENOTTY; 21909 } else { 21910 err = sr_read_cdda(dev, (void *)arg, flag); 21911 } 21912 break; 21913 21914 case CDROMCDXA: 21915 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n"); 21916 if (!ISCD(un)) { 21917 err = ENOTTY; 21918 } else { 21919 err = sr_read_cdxa(dev, (caddr_t)arg, flag); 21920 } 21921 break; 21922 21923 case CDROMSUBCODE: 21924 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n"); 21925 if (!ISCD(un)) { 21926 err = ENOTTY; 21927 } else { 21928 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag); 21929 } 21930 break; 21931 21932 21933 #ifdef SDDEBUG 21934 /* RESET/ABORTS testing ioctls */ 21935 case DKIOCRESET: { 21936 int reset_level; 21937 21938 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) { 21939 err = EFAULT; 21940 } else { 21941 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: " 21942 "reset_level = 0x%lx\n", reset_level); 21943 if (scsi_reset(SD_ADDRESS(un), reset_level)) { 21944 err = 0; 21945 } else { 21946 err = EIO; 21947 } 21948 } 21949 break; 21950 } 21951 21952 case DKIOCABORT: 21953 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n"); 21954 if (scsi_abort(SD_ADDRESS(un), NULL)) { 21955 err = 0; 21956 } else { 21957 err = EIO; 21958 } 21959 break; 21960 #endif 21961 21962 #ifdef SD_FAULT_INJECTION 21963 /* SDIOC FaultInjection testing ioctls */ 21964 case SDIOCSTART: 21965 case SDIOCSTOP: 21966 case SDIOCINSERTPKT: 21967 case SDIOCINSERTXB: 21968 case SDIOCINSERTUN: 21969 case SDIOCINSERTARQ: 21970 case SDIOCPUSH: 21971 case SDIOCRETRIEVE: 21972 case SDIOCRUN: 21973 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" 21974 "SDIOC detected cmd:0x%X:\n", cmd); 21975 /* call error generator */ 21976 sd_faultinjection_ioctl(cmd, arg, un); 21977 err = 0; 21978 break; 21979 21980 #endif /* SD_FAULT_INJECTION */ 21981 21982 case DKIOCFLUSHWRITECACHE: 21983 { 21984 struct dk_callback *dkc = (struct dk_callback *)arg; 21985 21986 mutex_enter(SD_MUTEX(un)); 21987 if (!un->un_f_sync_cache_supported || 21988 !un->un_f_write_cache_enabled) { 21989 err = un->un_f_sync_cache_supported ? 21990 0 : ENOTSUP; 21991 mutex_exit(SD_MUTEX(un)); 21992 if ((flag & FKIOCTL) && dkc != NULL && 21993 dkc->dkc_callback != NULL) { 21994 (*dkc->dkc_callback)(dkc->dkc_cookie, 21995 err); 21996 /* 21997 * Did callback and reported error. 21998 * Since we did a callback, ioctl 21999 * should return 0. 22000 */ 22001 err = 0; 22002 } 22003 break; 22004 } 22005 mutex_exit(SD_MUTEX(un)); 22006 22007 if ((flag & FKIOCTL) && dkc != NULL && 22008 dkc->dkc_callback != NULL) { 22009 /* async SYNC CACHE request */ 22010 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 22011 } else { 22012 /* synchronous SYNC CACHE request */ 22013 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); 22014 } 22015 } 22016 break; 22017 22018 case DKIOCGETWCE: { 22019 22020 int wce; 22021 22022 if ((err = sd_get_write_cache_enabled(ssc, &wce)) != 0) { 22023 break; 22024 } 22025 22026 if (ddi_copyout(&wce, (void *)arg, sizeof (wce), flag)) { 22027 err = EFAULT; 22028 } 22029 break; 22030 } 22031 22032 case DKIOCSETWCE: { 22033 22034 int wce, sync_supported; 22035 22036 if (ddi_copyin((void *)arg, &wce, sizeof (wce), flag)) { 22037 err = EFAULT; 22038 break; 22039 } 22040 22041 /* 22042 * Synchronize multiple threads trying to enable 22043 * or disable the cache via the un_f_wcc_cv 22044 * condition variable. 22045 */ 22046 mutex_enter(SD_MUTEX(un)); 22047 22048 /* 22049 * Don't allow the cache to be enabled if the 22050 * config file has it disabled. 22051 */ 22052 if (un->un_f_opt_disable_cache && wce) { 22053 mutex_exit(SD_MUTEX(un)); 22054 err = EINVAL; 22055 break; 22056 } 22057 22058 /* 22059 * Wait for write cache change in progress 22060 * bit to be clear before proceeding. 22061 */ 22062 while (un->un_f_wcc_inprog) 22063 cv_wait(&un->un_wcc_cv, SD_MUTEX(un)); 22064 22065 un->un_f_wcc_inprog = 1; 22066 22067 if (un->un_f_write_cache_enabled && wce == 0) { 22068 /* 22069 * Disable the write cache. Don't clear 22070 * un_f_write_cache_enabled until after 22071 * the mode select and flush are complete. 22072 */ 22073 sync_supported = un->un_f_sync_cache_supported; 22074 22075 /* 22076 * If cache flush is suppressed, we assume that the 22077 * controller firmware will take care of managing the 22078 * write cache for us: no need to explicitly 22079 * disable it. 22080 */ 22081 if (!un->un_f_suppress_cache_flush) { 22082 mutex_exit(SD_MUTEX(un)); 22083 if ((err = sd_cache_control(ssc, 22084 SD_CACHE_NOCHANGE, 22085 SD_CACHE_DISABLE)) == 0 && 22086 sync_supported) { 22087 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, 22088 NULL); 22089 } 22090 } else { 22091 mutex_exit(SD_MUTEX(un)); 22092 } 22093 22094 mutex_enter(SD_MUTEX(un)); 22095 if (err == 0) { 22096 un->un_f_write_cache_enabled = 0; 22097 } 22098 22099 } else if (!un->un_f_write_cache_enabled && wce != 0) { 22100 /* 22101 * Set un_f_write_cache_enabled first, so there is 22102 * no window where the cache is enabled, but the 22103 * bit says it isn't. 22104 */ 22105 un->un_f_write_cache_enabled = 1; 22106 22107 /* 22108 * If cache flush is suppressed, we assume that the 22109 * controller firmware will take care of managing the 22110 * write cache for us: no need to explicitly 22111 * enable it. 22112 */ 22113 if (!un->un_f_suppress_cache_flush) { 22114 mutex_exit(SD_MUTEX(un)); 22115 err = sd_cache_control(ssc, SD_CACHE_NOCHANGE, 22116 SD_CACHE_ENABLE); 22117 } else { 22118 mutex_exit(SD_MUTEX(un)); 22119 } 22120 22121 mutex_enter(SD_MUTEX(un)); 22122 22123 if (err) { 22124 un->un_f_write_cache_enabled = 0; 22125 } 22126 } 22127 22128 un->un_f_wcc_inprog = 0; 22129 cv_broadcast(&un->un_wcc_cv); 22130 mutex_exit(SD_MUTEX(un)); 22131 break; 22132 } 22133 22134 default: 22135 err = ENOTTY; 22136 break; 22137 } 22138 mutex_enter(SD_MUTEX(un)); 22139 un->un_ncmds_in_driver--; 22140 ASSERT(un->un_ncmds_in_driver >= 0); 22141 mutex_exit(SD_MUTEX(un)); 22142 22143 22144 done_without_assess: 22145 sd_ssc_fini(ssc); 22146 22147 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 22148 return (err); 22149 22150 done_with_assess: 22151 mutex_enter(SD_MUTEX(un)); 22152 un->un_ncmds_in_driver--; 22153 ASSERT(un->un_ncmds_in_driver >= 0); 22154 mutex_exit(SD_MUTEX(un)); 22155 22156 done_quick_assess: 22157 if (err != 0) 22158 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22159 /* Uninitialize sd_ssc_t pointer */ 22160 sd_ssc_fini(ssc); 22161 22162 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 22163 return (err); 22164 } 22165 22166 22167 /* 22168 * Function: sd_dkio_ctrl_info 22169 * 22170 * Description: This routine is the driver entry point for handling controller 22171 * information ioctl requests (DKIOCINFO). 22172 * 22173 * Arguments: dev - the device number 22174 * arg - pointer to user provided dk_cinfo structure 22175 * specifying the controller type and attributes. 22176 * flag - this argument is a pass through to ddi_copyxxx() 22177 * directly from the mode argument of ioctl(). 22178 * 22179 * Return Code: 0 22180 * EFAULT 22181 * ENXIO 22182 */ 22183 22184 static int 22185 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag) 22186 { 22187 struct sd_lun *un = NULL; 22188 struct dk_cinfo *info; 22189 dev_info_t *pdip; 22190 int lun, tgt; 22191 22192 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22193 return (ENXIO); 22194 } 22195 22196 info = (struct dk_cinfo *) 22197 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); 22198 22199 switch (un->un_ctype) { 22200 case CTYPE_CDROM: 22201 info->dki_ctype = DKC_CDROM; 22202 break; 22203 default: 22204 info->dki_ctype = DKC_SCSI_CCS; 22205 break; 22206 } 22207 pdip = ddi_get_parent(SD_DEVINFO(un)); 22208 info->dki_cnum = ddi_get_instance(pdip); 22209 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) { 22210 (void) strcpy(info->dki_cname, ddi_get_name(pdip)); 22211 } else { 22212 (void) strncpy(info->dki_cname, ddi_node_name(pdip), 22213 DK_DEVLEN - 1); 22214 } 22215 22216 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 22217 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0); 22218 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 22219 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0); 22220 22221 /* Unit Information */ 22222 info->dki_unit = ddi_get_instance(SD_DEVINFO(un)); 22223 info->dki_slave = ((tgt << 3) | lun); 22224 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)), 22225 DK_DEVLEN - 1); 22226 info->dki_flags = DKI_FMTVOL; 22227 info->dki_partition = SDPART(dev); 22228 22229 /* Max Transfer size of this device in blocks */ 22230 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize; 22231 info->dki_addr = 0; 22232 info->dki_space = 0; 22233 info->dki_prio = 0; 22234 info->dki_vec = 0; 22235 22236 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) { 22237 kmem_free(info, sizeof (struct dk_cinfo)); 22238 return (EFAULT); 22239 } else { 22240 kmem_free(info, sizeof (struct dk_cinfo)); 22241 return (0); 22242 } 22243 } 22244 22245 22246 /* 22247 * Function: sd_get_media_info 22248 * 22249 * Description: This routine is the driver entry point for handling ioctl 22250 * requests for the media type or command set profile used by the 22251 * drive to operate on the media (DKIOCGMEDIAINFO). 22252 * 22253 * Arguments: dev - the device number 22254 * arg - pointer to user provided dk_minfo structure 22255 * specifying the media type, logical block size and 22256 * drive capacity. 22257 * flag - this argument is a pass through to ddi_copyxxx() 22258 * directly from the mode argument of ioctl(). 22259 * 22260 * Return Code: 0 22261 * EACCESS 22262 * EFAULT 22263 * ENXIO 22264 * EIO 22265 */ 22266 22267 static int 22268 sd_get_media_info(dev_t dev, caddr_t arg, int flag) 22269 { 22270 struct sd_lun *un = NULL; 22271 struct uscsi_cmd com; 22272 struct scsi_inquiry *sinq; 22273 struct dk_minfo media_info; 22274 u_longlong_t media_capacity; 22275 uint64_t capacity; 22276 uint_t lbasize; 22277 uchar_t *out_data; 22278 uchar_t *rqbuf; 22279 int rval = 0; 22280 int rtn; 22281 sd_ssc_t *ssc; 22282 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 22283 (un->un_state == SD_STATE_OFFLINE)) { 22284 return (ENXIO); 22285 } 22286 22287 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info: entry\n"); 22288 22289 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 22290 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 22291 22292 /* Issue a TUR to determine if the drive is ready with media present */ 22293 ssc = sd_ssc_init(un); 22294 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_CHECK_FOR_MEDIA); 22295 if (rval == ENXIO) { 22296 goto done; 22297 } else if (rval != 0) { 22298 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22299 } 22300 22301 /* Now get configuration data */ 22302 if (ISCD(un)) { 22303 media_info.dki_media_type = DK_CDROM; 22304 22305 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 22306 if (un->un_f_mmc_cap == TRUE) { 22307 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, 22308 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN, 22309 SD_PATH_STANDARD); 22310 22311 if (rtn) { 22312 /* 22313 * Failed for other than an illegal request 22314 * or command not supported 22315 */ 22316 if ((com.uscsi_status == STATUS_CHECK) && 22317 (com.uscsi_rqstatus == STATUS_GOOD)) { 22318 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 22319 (rqbuf[12] != 0x20)) { 22320 rval = EIO; 22321 goto done; 22322 } 22323 } 22324 else 22325 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22326 } else { 22327 /* 22328 * The GET CONFIGURATION command succeeded 22329 * so set the media type according to the 22330 * returned data 22331 */ 22332 media_info.dki_media_type = out_data[6]; 22333 media_info.dki_media_type <<= 8; 22334 media_info.dki_media_type |= out_data[7]; 22335 } 22336 } 22337 } else { 22338 /* 22339 * The profile list is not available, so we attempt to identify 22340 * the media type based on the inquiry data 22341 */ 22342 sinq = un->un_sd->sd_inq; 22343 if ((sinq->inq_dtype == DTYPE_DIRECT) || 22344 (sinq->inq_dtype == DTYPE_OPTICAL)) { 22345 /* This is a direct access device or optical disk */ 22346 media_info.dki_media_type = DK_FIXED_DISK; 22347 22348 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 22349 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 22350 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 22351 media_info.dki_media_type = DK_ZIP; 22352 } else if ( 22353 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 22354 media_info.dki_media_type = DK_JAZ; 22355 } 22356 } 22357 } else { 22358 /* 22359 * Not a CD, direct access or optical disk so return 22360 * unknown media 22361 */ 22362 media_info.dki_media_type = DK_UNKNOWN; 22363 } 22364 } 22365 22366 /* Now read the capacity so we can provide the lbasize and capacity */ 22367 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize, 22368 SD_PATH_DIRECT); 22369 switch (rval) { 22370 case 0: 22371 break; 22372 case EACCES: 22373 rval = EACCES; 22374 goto done; 22375 default: 22376 rval = EIO; 22377 goto done; 22378 } 22379 22380 /* 22381 * If lun is expanded dynamically, update the un structure. 22382 */ 22383 mutex_enter(SD_MUTEX(un)); 22384 if ((un->un_f_blockcount_is_valid == TRUE) && 22385 (un->un_f_tgt_blocksize_is_valid == TRUE) && 22386 (capacity > un->un_blockcount)) { 22387 sd_update_block_info(un, lbasize, capacity); 22388 } 22389 mutex_exit(SD_MUTEX(un)); 22390 22391 media_info.dki_lbsize = lbasize; 22392 media_capacity = capacity; 22393 22394 /* 22395 * sd_send_scsi_READ_CAPACITY() reports capacity in 22396 * un->un_sys_blocksize chunks. So we need to convert it into 22397 * cap.lbasize chunks. 22398 */ 22399 media_capacity *= un->un_sys_blocksize; 22400 media_capacity /= lbasize; 22401 media_info.dki_capacity = media_capacity; 22402 22403 if (ddi_copyout(&media_info, arg, sizeof (struct dk_minfo), flag)) { 22404 rval = EFAULT; 22405 /* Put goto. Anybody might add some code below in future */ 22406 goto no_assessment; 22407 } 22408 done: 22409 if (rval != 0) { 22410 if (rval == EIO) 22411 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 22412 else 22413 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22414 } 22415 no_assessment: 22416 sd_ssc_fini(ssc); 22417 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 22418 kmem_free(rqbuf, SENSE_LENGTH); 22419 return (rval); 22420 } 22421 22422 22423 /* 22424 * Function: sd_check_media 22425 * 22426 * Description: This utility routine implements the functionality for the 22427 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the 22428 * driver state changes from that specified by the user 22429 * (inserted or ejected). For example, if the user specifies 22430 * DKIO_EJECTED and the current media state is inserted this 22431 * routine will immediately return DKIO_INSERTED. However, if the 22432 * current media state is not inserted the user thread will be 22433 * blocked until the drive state changes. If DKIO_NONE is specified 22434 * the user thread will block until a drive state change occurs. 22435 * 22436 * Arguments: dev - the device number 22437 * state - user pointer to a dkio_state, updated with the current 22438 * drive state at return. 22439 * 22440 * Return Code: ENXIO 22441 * EIO 22442 * EAGAIN 22443 * EINTR 22444 */ 22445 22446 static int 22447 sd_check_media(dev_t dev, enum dkio_state state) 22448 { 22449 struct sd_lun *un = NULL; 22450 enum dkio_state prev_state; 22451 opaque_t token = NULL; 22452 int rval = 0; 22453 sd_ssc_t *ssc; 22454 22455 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22456 return (ENXIO); 22457 } 22458 22459 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n"); 22460 22461 ssc = sd_ssc_init(un); 22462 22463 mutex_enter(SD_MUTEX(un)); 22464 22465 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: " 22466 "state=%x, mediastate=%x\n", state, un->un_mediastate); 22467 22468 prev_state = un->un_mediastate; 22469 22470 /* is there anything to do? */ 22471 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) { 22472 /* 22473 * submit the request to the scsi_watch service; 22474 * scsi_media_watch_cb() does the real work 22475 */ 22476 mutex_exit(SD_MUTEX(un)); 22477 22478 /* 22479 * This change handles the case where a scsi watch request is 22480 * added to a device that is powered down. To accomplish this 22481 * we power up the device before adding the scsi watch request, 22482 * since the scsi watch sends a TUR directly to the device 22483 * which the device cannot handle if it is powered down. 22484 */ 22485 if (sd_pm_entry(un) != DDI_SUCCESS) { 22486 mutex_enter(SD_MUTEX(un)); 22487 goto done; 22488 } 22489 22490 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), 22491 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 22492 (caddr_t)dev); 22493 22494 sd_pm_exit(un); 22495 22496 mutex_enter(SD_MUTEX(un)); 22497 if (token == NULL) { 22498 rval = EAGAIN; 22499 goto done; 22500 } 22501 22502 /* 22503 * This is a special case IOCTL that doesn't return 22504 * until the media state changes. Routine sdpower 22505 * knows about and handles this so don't count it 22506 * as an active cmd in the driver, which would 22507 * keep the device busy to the pm framework. 22508 * If the count isn't decremented the device can't 22509 * be powered down. 22510 */ 22511 un->un_ncmds_in_driver--; 22512 ASSERT(un->un_ncmds_in_driver >= 0); 22513 22514 /* 22515 * if a prior request had been made, this will be the same 22516 * token, as scsi_watch was designed that way. 22517 */ 22518 un->un_swr_token = token; 22519 un->un_specified_mediastate = state; 22520 22521 /* 22522 * now wait for media change 22523 * we will not be signalled unless mediastate == state but it is 22524 * still better to test for this condition, since there is a 22525 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED 22526 */ 22527 SD_TRACE(SD_LOG_COMMON, un, 22528 "sd_check_media: waiting for media state change\n"); 22529 while (un->un_mediastate == state) { 22530 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) { 22531 SD_TRACE(SD_LOG_COMMON, un, 22532 "sd_check_media: waiting for media state " 22533 "was interrupted\n"); 22534 un->un_ncmds_in_driver++; 22535 rval = EINTR; 22536 goto done; 22537 } 22538 SD_TRACE(SD_LOG_COMMON, un, 22539 "sd_check_media: received signal, state=%x\n", 22540 un->un_mediastate); 22541 } 22542 /* 22543 * Inc the counter to indicate the device once again 22544 * has an active outstanding cmd. 22545 */ 22546 un->un_ncmds_in_driver++; 22547 } 22548 22549 /* invalidate geometry */ 22550 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) { 22551 sr_ejected(un); 22552 } 22553 22554 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) { 22555 uint64_t capacity; 22556 uint_t lbasize; 22557 22558 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n"); 22559 mutex_exit(SD_MUTEX(un)); 22560 /* 22561 * Since the following routines use SD_PATH_DIRECT, we must 22562 * call PM directly before the upcoming disk accesses. This 22563 * may cause the disk to be power/spin up. 22564 */ 22565 22566 if (sd_pm_entry(un) == DDI_SUCCESS) { 22567 rval = sd_send_scsi_READ_CAPACITY(ssc, 22568 &capacity, &lbasize, SD_PATH_DIRECT); 22569 if (rval != 0) { 22570 sd_pm_exit(un); 22571 if (rval == EIO) 22572 sd_ssc_assessment(ssc, 22573 SD_FMT_STATUS_CHECK); 22574 else 22575 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22576 mutex_enter(SD_MUTEX(un)); 22577 goto done; 22578 } 22579 } else { 22580 rval = EIO; 22581 mutex_enter(SD_MUTEX(un)); 22582 goto done; 22583 } 22584 mutex_enter(SD_MUTEX(un)); 22585 22586 sd_update_block_info(un, lbasize, capacity); 22587 22588 /* 22589 * Check if the media in the device is writable or not 22590 */ 22591 if (ISCD(un)) { 22592 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT); 22593 } 22594 22595 mutex_exit(SD_MUTEX(un)); 22596 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 22597 if ((cmlb_validate(un->un_cmlbhandle, 0, 22598 (void *)SD_PATH_DIRECT) == 0) && un->un_f_pkstats_enabled) { 22599 sd_set_pstats(un); 22600 SD_TRACE(SD_LOG_IO_PARTITION, un, 22601 "sd_check_media: un:0x%p pstats created and " 22602 "set\n", un); 22603 } 22604 22605 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 22606 SD_PATH_DIRECT); 22607 22608 sd_pm_exit(un); 22609 22610 if (rval != 0) { 22611 if (rval == EIO) 22612 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 22613 else 22614 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22615 } 22616 22617 mutex_enter(SD_MUTEX(un)); 22618 } 22619 done: 22620 sd_ssc_fini(ssc); 22621 un->un_f_watcht_stopped = FALSE; 22622 /* 22623 * Use of this local token and the mutex ensures that we avoid 22624 * some race conditions associated with terminating the 22625 * scsi watch. 22626 */ 22627 if (token) { 22628 un->un_swr_token = (opaque_t)NULL; 22629 mutex_exit(SD_MUTEX(un)); 22630 (void) scsi_watch_request_terminate(token, 22631 SCSI_WATCH_TERMINATE_WAIT); 22632 mutex_enter(SD_MUTEX(un)); 22633 } 22634 22635 /* 22636 * Update the capacity kstat value, if no media previously 22637 * (capacity kstat is 0) and a media has been inserted 22638 * (un_f_blockcount_is_valid == TRUE) 22639 */ 22640 if (un->un_errstats) { 22641 struct sd_errstats *stp = NULL; 22642 22643 stp = (struct sd_errstats *)un->un_errstats->ks_data; 22644 if ((stp->sd_capacity.value.ui64 == 0) && 22645 (un->un_f_blockcount_is_valid == TRUE)) { 22646 stp->sd_capacity.value.ui64 = 22647 (uint64_t)((uint64_t)un->un_blockcount * 22648 un->un_sys_blocksize); 22649 } 22650 } 22651 mutex_exit(SD_MUTEX(un)); 22652 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n"); 22653 return (rval); 22654 } 22655 22656 22657 /* 22658 * Function: sd_delayed_cv_broadcast 22659 * 22660 * Description: Delayed cv_broadcast to allow for target to recover from media 22661 * insertion. 22662 * 22663 * Arguments: arg - driver soft state (unit) structure 22664 */ 22665 22666 static void 22667 sd_delayed_cv_broadcast(void *arg) 22668 { 22669 struct sd_lun *un = arg; 22670 22671 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n"); 22672 22673 mutex_enter(SD_MUTEX(un)); 22674 un->un_dcvb_timeid = NULL; 22675 cv_broadcast(&un->un_state_cv); 22676 mutex_exit(SD_MUTEX(un)); 22677 } 22678 22679 22680 /* 22681 * Function: sd_media_watch_cb 22682 * 22683 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This 22684 * routine processes the TUR sense data and updates the driver 22685 * state if a transition has occurred. The user thread 22686 * (sd_check_media) is then signalled. 22687 * 22688 * Arguments: arg - the device 'dev_t' is used for context to discriminate 22689 * among multiple watches that share this callback function 22690 * resultp - scsi watch facility result packet containing scsi 22691 * packet, status byte and sense data 22692 * 22693 * Return Code: 0 for success, -1 for failure 22694 */ 22695 22696 static int 22697 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 22698 { 22699 struct sd_lun *un; 22700 struct scsi_status *statusp = resultp->statusp; 22701 uint8_t *sensep = (uint8_t *)resultp->sensep; 22702 enum dkio_state state = DKIO_NONE; 22703 dev_t dev = (dev_t)arg; 22704 uchar_t actual_sense_length; 22705 uint8_t skey, asc, ascq; 22706 22707 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22708 return (-1); 22709 } 22710 actual_sense_length = resultp->actual_sense_length; 22711 22712 mutex_enter(SD_MUTEX(un)); 22713 SD_TRACE(SD_LOG_COMMON, un, 22714 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n", 22715 *((char *)statusp), (void *)sensep, actual_sense_length); 22716 22717 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) { 22718 un->un_mediastate = DKIO_DEV_GONE; 22719 cv_broadcast(&un->un_state_cv); 22720 mutex_exit(SD_MUTEX(un)); 22721 22722 return (0); 22723 } 22724 22725 /* 22726 * If there was a check condition then sensep points to valid sense data 22727 * If status was not a check condition but a reservation or busy status 22728 * then the new state is DKIO_NONE 22729 */ 22730 if (sensep != NULL) { 22731 skey = scsi_sense_key(sensep); 22732 asc = scsi_sense_asc(sensep); 22733 ascq = scsi_sense_ascq(sensep); 22734 22735 SD_INFO(SD_LOG_COMMON, un, 22736 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n", 22737 skey, asc, ascq); 22738 /* This routine only uses up to 13 bytes of sense data. */ 22739 if (actual_sense_length >= 13) { 22740 if (skey == KEY_UNIT_ATTENTION) { 22741 if (asc == 0x28) { 22742 state = DKIO_INSERTED; 22743 } 22744 } else if (skey == KEY_NOT_READY) { 22745 /* 22746 * if 02/04/02 means that the host 22747 * should send start command. Explicitly 22748 * leave the media state as is 22749 * (inserted) as the media is inserted 22750 * and host has stopped device for PM 22751 * reasons. Upon next true read/write 22752 * to this media will bring the 22753 * device to the right state good for 22754 * media access. 22755 */ 22756 if (asc == 0x3a) { 22757 state = DKIO_EJECTED; 22758 } else { 22759 /* 22760 * If the drive is busy with an 22761 * operation or long write, keep the 22762 * media in an inserted state. 22763 */ 22764 22765 if ((asc == 0x04) && 22766 ((ascq == 0x02) || 22767 (ascq == 0x07) || 22768 (ascq == 0x08))) { 22769 state = DKIO_INSERTED; 22770 } 22771 } 22772 } else if (skey == KEY_NO_SENSE) { 22773 if ((asc == 0x00) && (ascq == 0x00)) { 22774 /* 22775 * Sense Data 00/00/00 does not provide 22776 * any information about the state of 22777 * the media. Ignore it. 22778 */ 22779 mutex_exit(SD_MUTEX(un)); 22780 return (0); 22781 } 22782 } 22783 } 22784 } else if ((*((char *)statusp) == STATUS_GOOD) && 22785 (resultp->pkt->pkt_reason == CMD_CMPLT)) { 22786 state = DKIO_INSERTED; 22787 } 22788 22789 SD_TRACE(SD_LOG_COMMON, un, 22790 "sd_media_watch_cb: state=%x, specified=%x\n", 22791 state, un->un_specified_mediastate); 22792 22793 /* 22794 * now signal the waiting thread if this is *not* the specified state; 22795 * delay the signal if the state is DKIO_INSERTED to allow the target 22796 * to recover 22797 */ 22798 if (state != un->un_specified_mediastate) { 22799 un->un_mediastate = state; 22800 if (state == DKIO_INSERTED) { 22801 /* 22802 * delay the signal to give the drive a chance 22803 * to do what it apparently needs to do 22804 */ 22805 SD_TRACE(SD_LOG_COMMON, un, 22806 "sd_media_watch_cb: delayed cv_broadcast\n"); 22807 if (un->un_dcvb_timeid == NULL) { 22808 un->un_dcvb_timeid = 22809 timeout(sd_delayed_cv_broadcast, un, 22810 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY)); 22811 } 22812 } else { 22813 SD_TRACE(SD_LOG_COMMON, un, 22814 "sd_media_watch_cb: immediate cv_broadcast\n"); 22815 cv_broadcast(&un->un_state_cv); 22816 } 22817 } 22818 mutex_exit(SD_MUTEX(un)); 22819 return (0); 22820 } 22821 22822 22823 /* 22824 * Function: sd_dkio_get_temp 22825 * 22826 * Description: This routine is the driver entry point for handling ioctl 22827 * requests to get the disk temperature. 22828 * 22829 * Arguments: dev - the device number 22830 * arg - pointer to user provided dk_temperature structure. 22831 * flag - this argument is a pass through to ddi_copyxxx() 22832 * directly from the mode argument of ioctl(). 22833 * 22834 * Return Code: 0 22835 * EFAULT 22836 * ENXIO 22837 * EAGAIN 22838 */ 22839 22840 static int 22841 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag) 22842 { 22843 struct sd_lun *un = NULL; 22844 struct dk_temperature *dktemp = NULL; 22845 uchar_t *temperature_page; 22846 int rval = 0; 22847 int path_flag = SD_PATH_STANDARD; 22848 sd_ssc_t *ssc; 22849 22850 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22851 return (ENXIO); 22852 } 22853 22854 ssc = sd_ssc_init(un); 22855 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP); 22856 22857 /* copyin the disk temp argument to get the user flags */ 22858 if (ddi_copyin((void *)arg, dktemp, 22859 sizeof (struct dk_temperature), flag) != 0) { 22860 rval = EFAULT; 22861 goto done; 22862 } 22863 22864 /* Initialize the temperature to invalid. */ 22865 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 22866 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 22867 22868 /* 22869 * Note: Investigate removing the "bypass pm" semantic. 22870 * Can we just bypass PM always? 22871 */ 22872 if (dktemp->dkt_flags & DKT_BYPASS_PM) { 22873 path_flag = SD_PATH_DIRECT; 22874 ASSERT(!mutex_owned(&un->un_pm_mutex)); 22875 mutex_enter(&un->un_pm_mutex); 22876 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 22877 /* 22878 * If DKT_BYPASS_PM is set, and the drive happens to be 22879 * in low power mode, we can not wake it up, Need to 22880 * return EAGAIN. 22881 */ 22882 mutex_exit(&un->un_pm_mutex); 22883 rval = EAGAIN; 22884 goto done; 22885 } else { 22886 /* 22887 * Indicate to PM the device is busy. This is required 22888 * to avoid a race - i.e. the ioctl is issuing a 22889 * command and the pm framework brings down the device 22890 * to low power mode (possible power cut-off on some 22891 * platforms). 22892 */ 22893 mutex_exit(&un->un_pm_mutex); 22894 if (sd_pm_entry(un) != DDI_SUCCESS) { 22895 rval = EAGAIN; 22896 goto done; 22897 } 22898 } 22899 } 22900 22901 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP); 22902 22903 rval = sd_send_scsi_LOG_SENSE(ssc, temperature_page, 22904 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag); 22905 if (rval != 0) 22906 goto done2; 22907 22908 /* 22909 * For the current temperature verify that the parameter length is 0x02 22910 * and the parameter code is 0x00 22911 */ 22912 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) && 22913 (temperature_page[5] == 0x00)) { 22914 if (temperature_page[9] == 0xFF) { 22915 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 22916 } else { 22917 dktemp->dkt_cur_temp = (short)(temperature_page[9]); 22918 } 22919 } 22920 22921 /* 22922 * For the reference temperature verify that the parameter 22923 * length is 0x02 and the parameter code is 0x01 22924 */ 22925 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) && 22926 (temperature_page[11] == 0x01)) { 22927 if (temperature_page[15] == 0xFF) { 22928 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 22929 } else { 22930 dktemp->dkt_ref_temp = (short)(temperature_page[15]); 22931 } 22932 } 22933 22934 /* Do the copyout regardless of the temperature commands status. */ 22935 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature), 22936 flag) != 0) { 22937 rval = EFAULT; 22938 goto done1; 22939 } 22940 22941 done2: 22942 if (rval != 0) { 22943 if (rval == EIO) 22944 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 22945 else 22946 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22947 } 22948 done1: 22949 if (path_flag == SD_PATH_DIRECT) { 22950 sd_pm_exit(un); 22951 } 22952 22953 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE); 22954 done: 22955 sd_ssc_fini(ssc); 22956 if (dktemp != NULL) { 22957 kmem_free(dktemp, sizeof (struct dk_temperature)); 22958 } 22959 22960 return (rval); 22961 } 22962 22963 22964 /* 22965 * Function: sd_log_page_supported 22966 * 22967 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of 22968 * supported log pages. 22969 * 22970 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 22971 * structure for this target. 22972 * log_page - 22973 * 22974 * Return Code: -1 - on error (log sense is optional and may not be supported). 22975 * 0 - log page not found. 22976 * 1 - log page found. 22977 */ 22978 22979 static int 22980 sd_log_page_supported(sd_ssc_t *ssc, int log_page) 22981 { 22982 uchar_t *log_page_data; 22983 int i; 22984 int match = 0; 22985 int log_size; 22986 int status = 0; 22987 struct sd_lun *un; 22988 22989 ASSERT(ssc != NULL); 22990 un = ssc->ssc_un; 22991 ASSERT(un != NULL); 22992 22993 log_page_data = kmem_zalloc(0xFF, KM_SLEEP); 22994 22995 status = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 0xFF, 0, 0x01, 0, 22996 SD_PATH_DIRECT); 22997 22998 if (status != 0) { 22999 if (status == EIO) { 23000 /* 23001 * Some disks do not support log sense, we 23002 * should ignore this kind of error(sense key is 23003 * 0x5 - illegal request). 23004 */ 23005 uint8_t *sensep; 23006 int senlen; 23007 23008 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 23009 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 23010 ssc->ssc_uscsi_cmd->uscsi_rqresid); 23011 23012 if (senlen > 0 && 23013 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 23014 sd_ssc_assessment(ssc, 23015 SD_FMT_IGNORE_COMPROMISE); 23016 } else { 23017 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23018 } 23019 } else { 23020 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23021 } 23022 23023 SD_ERROR(SD_LOG_COMMON, un, 23024 "sd_log_page_supported: failed log page retrieval\n"); 23025 kmem_free(log_page_data, 0xFF); 23026 return (-1); 23027 } 23028 23029 log_size = log_page_data[3]; 23030 23031 /* 23032 * The list of supported log pages start from the fourth byte. Check 23033 * until we run out of log pages or a match is found. 23034 */ 23035 for (i = 4; (i < (log_size + 4)) && !match; i++) { 23036 if (log_page_data[i] == log_page) { 23037 match++; 23038 } 23039 } 23040 kmem_free(log_page_data, 0xFF); 23041 return (match); 23042 } 23043 23044 23045 /* 23046 * Function: sd_mhdioc_failfast 23047 * 23048 * Description: This routine is the driver entry point for handling ioctl 23049 * requests to enable/disable the multihost failfast option. 23050 * (MHIOCENFAILFAST) 23051 * 23052 * Arguments: dev - the device number 23053 * arg - user specified probing interval. 23054 * flag - this argument is a pass through to ddi_copyxxx() 23055 * directly from the mode argument of ioctl(). 23056 * 23057 * Return Code: 0 23058 * EFAULT 23059 * ENXIO 23060 */ 23061 23062 static int 23063 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag) 23064 { 23065 struct sd_lun *un = NULL; 23066 int mh_time; 23067 int rval = 0; 23068 23069 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23070 return (ENXIO); 23071 } 23072 23073 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag)) 23074 return (EFAULT); 23075 23076 if (mh_time) { 23077 mutex_enter(SD_MUTEX(un)); 23078 un->un_resvd_status |= SD_FAILFAST; 23079 mutex_exit(SD_MUTEX(un)); 23080 /* 23081 * If mh_time is INT_MAX, then this ioctl is being used for 23082 * SCSI-3 PGR purposes, and we don't need to spawn watch thread. 23083 */ 23084 if (mh_time != INT_MAX) { 23085 rval = sd_check_mhd(dev, mh_time); 23086 } 23087 } else { 23088 (void) sd_check_mhd(dev, 0); 23089 mutex_enter(SD_MUTEX(un)); 23090 un->un_resvd_status &= ~SD_FAILFAST; 23091 mutex_exit(SD_MUTEX(un)); 23092 } 23093 return (rval); 23094 } 23095 23096 23097 /* 23098 * Function: sd_mhdioc_takeown 23099 * 23100 * Description: This routine is the driver entry point for handling ioctl 23101 * requests to forcefully acquire exclusive access rights to the 23102 * multihost disk (MHIOCTKOWN). 23103 * 23104 * Arguments: dev - the device number 23105 * arg - user provided structure specifying the delay 23106 * parameters in milliseconds 23107 * flag - this argument is a pass through to ddi_copyxxx() 23108 * directly from the mode argument of ioctl(). 23109 * 23110 * Return Code: 0 23111 * EFAULT 23112 * ENXIO 23113 */ 23114 23115 static int 23116 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag) 23117 { 23118 struct sd_lun *un = NULL; 23119 struct mhioctkown *tkown = NULL; 23120 int rval = 0; 23121 23122 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23123 return (ENXIO); 23124 } 23125 23126 if (arg != NULL) { 23127 tkown = (struct mhioctkown *) 23128 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP); 23129 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag); 23130 if (rval != 0) { 23131 rval = EFAULT; 23132 goto error; 23133 } 23134 } 23135 23136 rval = sd_take_ownership(dev, tkown); 23137 mutex_enter(SD_MUTEX(un)); 23138 if (rval == 0) { 23139 un->un_resvd_status |= SD_RESERVE; 23140 if (tkown != NULL && tkown->reinstate_resv_delay != 0) { 23141 sd_reinstate_resv_delay = 23142 tkown->reinstate_resv_delay * 1000; 23143 } else { 23144 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 23145 } 23146 /* 23147 * Give the scsi_watch routine interval set by 23148 * the MHIOCENFAILFAST ioctl precedence here. 23149 */ 23150 if ((un->un_resvd_status & SD_FAILFAST) == 0) { 23151 mutex_exit(SD_MUTEX(un)); 23152 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000); 23153 SD_TRACE(SD_LOG_IOCTL_MHD, un, 23154 "sd_mhdioc_takeown : %d\n", 23155 sd_reinstate_resv_delay); 23156 } else { 23157 mutex_exit(SD_MUTEX(un)); 23158 } 23159 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY, 23160 sd_mhd_reset_notify_cb, (caddr_t)un); 23161 } else { 23162 un->un_resvd_status &= ~SD_RESERVE; 23163 mutex_exit(SD_MUTEX(un)); 23164 } 23165 23166 error: 23167 if (tkown != NULL) { 23168 kmem_free(tkown, sizeof (struct mhioctkown)); 23169 } 23170 return (rval); 23171 } 23172 23173 23174 /* 23175 * Function: sd_mhdioc_release 23176 * 23177 * Description: This routine is the driver entry point for handling ioctl 23178 * requests to release exclusive access rights to the multihost 23179 * disk (MHIOCRELEASE). 23180 * 23181 * Arguments: dev - the device number 23182 * 23183 * Return Code: 0 23184 * ENXIO 23185 */ 23186 23187 static int 23188 sd_mhdioc_release(dev_t dev) 23189 { 23190 struct sd_lun *un = NULL; 23191 timeout_id_t resvd_timeid_save; 23192 int resvd_status_save; 23193 int rval = 0; 23194 23195 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23196 return (ENXIO); 23197 } 23198 23199 mutex_enter(SD_MUTEX(un)); 23200 resvd_status_save = un->un_resvd_status; 23201 un->un_resvd_status &= 23202 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE); 23203 if (un->un_resvd_timeid) { 23204 resvd_timeid_save = un->un_resvd_timeid; 23205 un->un_resvd_timeid = NULL; 23206 mutex_exit(SD_MUTEX(un)); 23207 (void) untimeout(resvd_timeid_save); 23208 } else { 23209 mutex_exit(SD_MUTEX(un)); 23210 } 23211 23212 /* 23213 * destroy any pending timeout thread that may be attempting to 23214 * reinstate reservation on this device. 23215 */ 23216 sd_rmv_resv_reclaim_req(dev); 23217 23218 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) { 23219 mutex_enter(SD_MUTEX(un)); 23220 if ((un->un_mhd_token) && 23221 ((un->un_resvd_status & SD_FAILFAST) == 0)) { 23222 mutex_exit(SD_MUTEX(un)); 23223 (void) sd_check_mhd(dev, 0); 23224 } else { 23225 mutex_exit(SD_MUTEX(un)); 23226 } 23227 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 23228 sd_mhd_reset_notify_cb, (caddr_t)un); 23229 } else { 23230 /* 23231 * sd_mhd_watch_cb will restart the resvd recover timeout thread 23232 */ 23233 mutex_enter(SD_MUTEX(un)); 23234 un->un_resvd_status = resvd_status_save; 23235 mutex_exit(SD_MUTEX(un)); 23236 } 23237 return (rval); 23238 } 23239 23240 23241 /* 23242 * Function: sd_mhdioc_register_devid 23243 * 23244 * Description: This routine is the driver entry point for handling ioctl 23245 * requests to register the device id (MHIOCREREGISTERDEVID). 23246 * 23247 * Note: The implementation for this ioctl has been updated to 23248 * be consistent with the original PSARC case (1999/357) 23249 * (4375899, 4241671, 4220005) 23250 * 23251 * Arguments: dev - the device number 23252 * 23253 * Return Code: 0 23254 * ENXIO 23255 */ 23256 23257 static int 23258 sd_mhdioc_register_devid(dev_t dev) 23259 { 23260 struct sd_lun *un = NULL; 23261 int rval = 0; 23262 sd_ssc_t *ssc; 23263 23264 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23265 return (ENXIO); 23266 } 23267 23268 ASSERT(!mutex_owned(SD_MUTEX(un))); 23269 23270 mutex_enter(SD_MUTEX(un)); 23271 23272 /* If a devid already exists, de-register it */ 23273 if (un->un_devid != NULL) { 23274 ddi_devid_unregister(SD_DEVINFO(un)); 23275 /* 23276 * After unregister devid, needs to free devid memory 23277 */ 23278 ddi_devid_free(un->un_devid); 23279 un->un_devid = NULL; 23280 } 23281 23282 /* Check for reservation conflict */ 23283 mutex_exit(SD_MUTEX(un)); 23284 ssc = sd_ssc_init(un); 23285 rval = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 23286 mutex_enter(SD_MUTEX(un)); 23287 23288 switch (rval) { 23289 case 0: 23290 sd_register_devid(ssc, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED); 23291 break; 23292 case EACCES: 23293 break; 23294 default: 23295 rval = EIO; 23296 } 23297 23298 mutex_exit(SD_MUTEX(un)); 23299 if (rval != 0) { 23300 if (rval == EIO) 23301 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23302 else 23303 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23304 } 23305 sd_ssc_fini(ssc); 23306 return (rval); 23307 } 23308 23309 23310 /* 23311 * Function: sd_mhdioc_inkeys 23312 * 23313 * Description: This routine is the driver entry point for handling ioctl 23314 * requests to issue the SCSI-3 Persistent In Read Keys command 23315 * to the device (MHIOCGRP_INKEYS). 23316 * 23317 * Arguments: dev - the device number 23318 * arg - user provided in_keys structure 23319 * flag - this argument is a pass through to ddi_copyxxx() 23320 * directly from the mode argument of ioctl(). 23321 * 23322 * Return Code: code returned by sd_persistent_reservation_in_read_keys() 23323 * ENXIO 23324 * EFAULT 23325 */ 23326 23327 static int 23328 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag) 23329 { 23330 struct sd_lun *un; 23331 mhioc_inkeys_t inkeys; 23332 int rval = 0; 23333 23334 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23335 return (ENXIO); 23336 } 23337 23338 #ifdef _MULTI_DATAMODEL 23339 switch (ddi_model_convert_from(flag & FMODELS)) { 23340 case DDI_MODEL_ILP32: { 23341 struct mhioc_inkeys32 inkeys32; 23342 23343 if (ddi_copyin(arg, &inkeys32, 23344 sizeof (struct mhioc_inkeys32), flag) != 0) { 23345 return (EFAULT); 23346 } 23347 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li; 23348 if ((rval = sd_persistent_reservation_in_read_keys(un, 23349 &inkeys, flag)) != 0) { 23350 return (rval); 23351 } 23352 inkeys32.generation = inkeys.generation; 23353 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32), 23354 flag) != 0) { 23355 return (EFAULT); 23356 } 23357 break; 23358 } 23359 case DDI_MODEL_NONE: 23360 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), 23361 flag) != 0) { 23362 return (EFAULT); 23363 } 23364 if ((rval = sd_persistent_reservation_in_read_keys(un, 23365 &inkeys, flag)) != 0) { 23366 return (rval); 23367 } 23368 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), 23369 flag) != 0) { 23370 return (EFAULT); 23371 } 23372 break; 23373 } 23374 23375 #else /* ! _MULTI_DATAMODEL */ 23376 23377 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) { 23378 return (EFAULT); 23379 } 23380 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag); 23381 if (rval != 0) { 23382 return (rval); 23383 } 23384 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) { 23385 return (EFAULT); 23386 } 23387 23388 #endif /* _MULTI_DATAMODEL */ 23389 23390 return (rval); 23391 } 23392 23393 23394 /* 23395 * Function: sd_mhdioc_inresv 23396 * 23397 * Description: This routine is the driver entry point for handling ioctl 23398 * requests to issue the SCSI-3 Persistent In Read Reservations 23399 * command to the device (MHIOCGRP_INKEYS). 23400 * 23401 * Arguments: dev - the device number 23402 * arg - user provided in_resv structure 23403 * flag - this argument is a pass through to ddi_copyxxx() 23404 * directly from the mode argument of ioctl(). 23405 * 23406 * Return Code: code returned by sd_persistent_reservation_in_read_resv() 23407 * ENXIO 23408 * EFAULT 23409 */ 23410 23411 static int 23412 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag) 23413 { 23414 struct sd_lun *un; 23415 mhioc_inresvs_t inresvs; 23416 int rval = 0; 23417 23418 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23419 return (ENXIO); 23420 } 23421 23422 #ifdef _MULTI_DATAMODEL 23423 23424 switch (ddi_model_convert_from(flag & FMODELS)) { 23425 case DDI_MODEL_ILP32: { 23426 struct mhioc_inresvs32 inresvs32; 23427 23428 if (ddi_copyin(arg, &inresvs32, 23429 sizeof (struct mhioc_inresvs32), flag) != 0) { 23430 return (EFAULT); 23431 } 23432 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li; 23433 if ((rval = sd_persistent_reservation_in_read_resv(un, 23434 &inresvs, flag)) != 0) { 23435 return (rval); 23436 } 23437 inresvs32.generation = inresvs.generation; 23438 if (ddi_copyout(&inresvs32, arg, 23439 sizeof (struct mhioc_inresvs32), flag) != 0) { 23440 return (EFAULT); 23441 } 23442 break; 23443 } 23444 case DDI_MODEL_NONE: 23445 if (ddi_copyin(arg, &inresvs, 23446 sizeof (mhioc_inresvs_t), flag) != 0) { 23447 return (EFAULT); 23448 } 23449 if ((rval = sd_persistent_reservation_in_read_resv(un, 23450 &inresvs, flag)) != 0) { 23451 return (rval); 23452 } 23453 if (ddi_copyout(&inresvs, arg, 23454 sizeof (mhioc_inresvs_t), flag) != 0) { 23455 return (EFAULT); 23456 } 23457 break; 23458 } 23459 23460 #else /* ! _MULTI_DATAMODEL */ 23461 23462 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) { 23463 return (EFAULT); 23464 } 23465 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag); 23466 if (rval != 0) { 23467 return (rval); 23468 } 23469 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) { 23470 return (EFAULT); 23471 } 23472 23473 #endif /* ! _MULTI_DATAMODEL */ 23474 23475 return (rval); 23476 } 23477 23478 23479 /* 23480 * The following routines support the clustering functionality described below 23481 * and implement lost reservation reclaim functionality. 23482 * 23483 * Clustering 23484 * ---------- 23485 * The clustering code uses two different, independent forms of SCSI 23486 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3 23487 * Persistent Group Reservations. For any particular disk, it will use either 23488 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk. 23489 * 23490 * SCSI-2 23491 * The cluster software takes ownership of a multi-hosted disk by issuing the 23492 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the 23493 * MHIOCRELEASE ioctl. Closely related is the MHIOCENFAILFAST ioctl -- a 23494 * cluster, just after taking ownership of the disk with the MHIOCTKOWN ioctl 23495 * then issues the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the 23496 * driver. The meaning of failfast is that if the driver (on this host) ever 23497 * encounters the scsi error return code RESERVATION_CONFLICT from the device, 23498 * it should immediately panic the host. The motivation for this ioctl is that 23499 * if this host does encounter reservation conflict, the underlying cause is 23500 * that some other host of the cluster has decided that this host is no longer 23501 * in the cluster and has seized control of the disks for itself. Since this 23502 * host is no longer in the cluster, it ought to panic itself. The 23503 * MHIOCENFAILFAST ioctl does two things: 23504 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT 23505 * error to panic the host 23506 * (b) it sets up a periodic timer to test whether this host still has 23507 * "access" (in that no other host has reserved the device): if the 23508 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The 23509 * purpose of that periodic timer is to handle scenarios where the host is 23510 * otherwise temporarily quiescent, temporarily doing no real i/o. 23511 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host, 23512 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for 23513 * the device itself. 23514 * 23515 * SCSI-3 PGR 23516 * A direct semantic implementation of the SCSI-3 Persistent Reservation 23517 * facility is supported through the shared multihost disk ioctls 23518 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE, 23519 * MHIOCGRP_PREEMPTANDABORT) 23520 * 23521 * Reservation Reclaim: 23522 * -------------------- 23523 * To support the lost reservation reclaim operations this driver creates a 23524 * single thread to handle reinstating reservations on all devices that have 23525 * lost reservations sd_resv_reclaim_requests are logged for all devices that 23526 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb 23527 * and the reservation reclaim thread loops through the requests to regain the 23528 * lost reservations. 23529 */ 23530 23531 /* 23532 * Function: sd_check_mhd() 23533 * 23534 * Description: This function sets up and submits a scsi watch request or 23535 * terminates an existing watch request. This routine is used in 23536 * support of reservation reclaim. 23537 * 23538 * Arguments: dev - the device 'dev_t' is used for context to discriminate 23539 * among multiple watches that share the callback function 23540 * interval - the number of microseconds specifying the watch 23541 * interval for issuing TEST UNIT READY commands. If 23542 * set to 0 the watch should be terminated. If the 23543 * interval is set to 0 and if the device is required 23544 * to hold reservation while disabling failfast, the 23545 * watch is restarted with an interval of 23546 * reinstate_resv_delay. 23547 * 23548 * Return Code: 0 - Successful submit/terminate of scsi watch request 23549 * ENXIO - Indicates an invalid device was specified 23550 * EAGAIN - Unable to submit the scsi watch request 23551 */ 23552 23553 static int 23554 sd_check_mhd(dev_t dev, int interval) 23555 { 23556 struct sd_lun *un; 23557 opaque_t token; 23558 23559 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23560 return (ENXIO); 23561 } 23562 23563 /* is this a watch termination request? */ 23564 if (interval == 0) { 23565 mutex_enter(SD_MUTEX(un)); 23566 /* if there is an existing watch task then terminate it */ 23567 if (un->un_mhd_token) { 23568 token = un->un_mhd_token; 23569 un->un_mhd_token = NULL; 23570 mutex_exit(SD_MUTEX(un)); 23571 (void) scsi_watch_request_terminate(token, 23572 SCSI_WATCH_TERMINATE_ALL_WAIT); 23573 mutex_enter(SD_MUTEX(un)); 23574 } else { 23575 mutex_exit(SD_MUTEX(un)); 23576 /* 23577 * Note: If we return here we don't check for the 23578 * failfast case. This is the original legacy 23579 * implementation but perhaps we should be checking 23580 * the failfast case. 23581 */ 23582 return (0); 23583 } 23584 /* 23585 * If the device is required to hold reservation while 23586 * disabling failfast, we need to restart the scsi_watch 23587 * routine with an interval of reinstate_resv_delay. 23588 */ 23589 if (un->un_resvd_status & SD_RESERVE) { 23590 interval = sd_reinstate_resv_delay/1000; 23591 } else { 23592 /* no failfast so bail */ 23593 mutex_exit(SD_MUTEX(un)); 23594 return (0); 23595 } 23596 mutex_exit(SD_MUTEX(un)); 23597 } 23598 23599 /* 23600 * adjust minimum time interval to 1 second, 23601 * and convert from msecs to usecs 23602 */ 23603 if (interval > 0 && interval < 1000) { 23604 interval = 1000; 23605 } 23606 interval *= 1000; 23607 23608 /* 23609 * submit the request to the scsi_watch service 23610 */ 23611 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval, 23612 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev); 23613 if (token == NULL) { 23614 return (EAGAIN); 23615 } 23616 23617 /* 23618 * save token for termination later on 23619 */ 23620 mutex_enter(SD_MUTEX(un)); 23621 un->un_mhd_token = token; 23622 mutex_exit(SD_MUTEX(un)); 23623 return (0); 23624 } 23625 23626 23627 /* 23628 * Function: sd_mhd_watch_cb() 23629 * 23630 * Description: This function is the call back function used by the scsi watch 23631 * facility. The scsi watch facility sends the "Test Unit Ready" 23632 * and processes the status. If applicable (i.e. a "Unit Attention" 23633 * status and automatic "Request Sense" not used) the scsi watch 23634 * facility will send a "Request Sense" and retrieve the sense data 23635 * to be passed to this callback function. In either case the 23636 * automatic "Request Sense" or the facility submitting one, this 23637 * callback is passed the status and sense data. 23638 * 23639 * Arguments: arg - the device 'dev_t' is used for context to discriminate 23640 * among multiple watches that share this callback function 23641 * resultp - scsi watch facility result packet containing scsi 23642 * packet, status byte and sense data 23643 * 23644 * Return Code: 0 - continue the watch task 23645 * non-zero - terminate the watch task 23646 */ 23647 23648 static int 23649 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 23650 { 23651 struct sd_lun *un; 23652 struct scsi_status *statusp; 23653 uint8_t *sensep; 23654 struct scsi_pkt *pkt; 23655 uchar_t actual_sense_length; 23656 dev_t dev = (dev_t)arg; 23657 23658 ASSERT(resultp != NULL); 23659 statusp = resultp->statusp; 23660 sensep = (uint8_t *)resultp->sensep; 23661 pkt = resultp->pkt; 23662 actual_sense_length = resultp->actual_sense_length; 23663 23664 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23665 return (ENXIO); 23666 } 23667 23668 SD_TRACE(SD_LOG_IOCTL_MHD, un, 23669 "sd_mhd_watch_cb: reason '%s', status '%s'\n", 23670 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp))); 23671 23672 /* Begin processing of the status and/or sense data */ 23673 if (pkt->pkt_reason != CMD_CMPLT) { 23674 /* Handle the incomplete packet */ 23675 sd_mhd_watch_incomplete(un, pkt); 23676 return (0); 23677 } else if (*((unsigned char *)statusp) != STATUS_GOOD) { 23678 if (*((unsigned char *)statusp) 23679 == STATUS_RESERVATION_CONFLICT) { 23680 /* 23681 * Handle a reservation conflict by panicking if 23682 * configured for failfast or by logging the conflict 23683 * and updating the reservation status 23684 */ 23685 mutex_enter(SD_MUTEX(un)); 23686 if ((un->un_resvd_status & SD_FAILFAST) && 23687 (sd_failfast_enable)) { 23688 sd_panic_for_res_conflict(un); 23689 /*NOTREACHED*/ 23690 } 23691 SD_INFO(SD_LOG_IOCTL_MHD, un, 23692 "sd_mhd_watch_cb: Reservation Conflict\n"); 23693 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 23694 mutex_exit(SD_MUTEX(un)); 23695 } 23696 } 23697 23698 if (sensep != NULL) { 23699 if (actual_sense_length >= (SENSE_LENGTH - 2)) { 23700 mutex_enter(SD_MUTEX(un)); 23701 if ((scsi_sense_asc(sensep) == 23702 SD_SCSI_RESET_SENSE_CODE) && 23703 (un->un_resvd_status & SD_RESERVE)) { 23704 /* 23705 * The additional sense code indicates a power 23706 * on or bus device reset has occurred; update 23707 * the reservation status. 23708 */ 23709 un->un_resvd_status |= 23710 (SD_LOST_RESERVE | SD_WANT_RESERVE); 23711 SD_INFO(SD_LOG_IOCTL_MHD, un, 23712 "sd_mhd_watch_cb: Lost Reservation\n"); 23713 } 23714 } else { 23715 return (0); 23716 } 23717 } else { 23718 mutex_enter(SD_MUTEX(un)); 23719 } 23720 23721 if ((un->un_resvd_status & SD_RESERVE) && 23722 (un->un_resvd_status & SD_LOST_RESERVE)) { 23723 if (un->un_resvd_status & SD_WANT_RESERVE) { 23724 /* 23725 * A reset occurred in between the last probe and this 23726 * one so if a timeout is pending cancel it. 23727 */ 23728 if (un->un_resvd_timeid) { 23729 timeout_id_t temp_id = un->un_resvd_timeid; 23730 un->un_resvd_timeid = NULL; 23731 mutex_exit(SD_MUTEX(un)); 23732 (void) untimeout(temp_id); 23733 mutex_enter(SD_MUTEX(un)); 23734 } 23735 un->un_resvd_status &= ~SD_WANT_RESERVE; 23736 } 23737 if (un->un_resvd_timeid == 0) { 23738 /* Schedule a timeout to handle the lost reservation */ 23739 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover, 23740 (void *)dev, 23741 drv_usectohz(sd_reinstate_resv_delay)); 23742 } 23743 } 23744 mutex_exit(SD_MUTEX(un)); 23745 return (0); 23746 } 23747 23748 23749 /* 23750 * Function: sd_mhd_watch_incomplete() 23751 * 23752 * Description: This function is used to find out why a scsi pkt sent by the 23753 * scsi watch facility was not completed. Under some scenarios this 23754 * routine will return. Otherwise it will send a bus reset to see 23755 * if the drive is still online. 23756 * 23757 * Arguments: un - driver soft state (unit) structure 23758 * pkt - incomplete scsi pkt 23759 */ 23760 23761 static void 23762 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt) 23763 { 23764 int be_chatty; 23765 int perr; 23766 23767 ASSERT(pkt != NULL); 23768 ASSERT(un != NULL); 23769 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT)); 23770 perr = (pkt->pkt_statistics & STAT_PERR); 23771 23772 mutex_enter(SD_MUTEX(un)); 23773 if (un->un_state == SD_STATE_DUMPING) { 23774 mutex_exit(SD_MUTEX(un)); 23775 return; 23776 } 23777 23778 switch (pkt->pkt_reason) { 23779 case CMD_UNX_BUS_FREE: 23780 /* 23781 * If we had a parity error that caused the target to drop BSY*, 23782 * don't be chatty about it. 23783 */ 23784 if (perr && be_chatty) { 23785 be_chatty = 0; 23786 } 23787 break; 23788 case CMD_TAG_REJECT: 23789 /* 23790 * The SCSI-2 spec states that a tag reject will be sent by the 23791 * target if tagged queuing is not supported. A tag reject may 23792 * also be sent during certain initialization periods or to 23793 * control internal resources. For the latter case the target 23794 * may also return Queue Full. 23795 * 23796 * If this driver receives a tag reject from a target that is 23797 * going through an init period or controlling internal 23798 * resources tagged queuing will be disabled. This is a less 23799 * than optimal behavior but the driver is unable to determine 23800 * the target state and assumes tagged queueing is not supported 23801 */ 23802 pkt->pkt_flags = 0; 23803 un->un_tagflags = 0; 23804 23805 if (un->un_f_opt_queueing == TRUE) { 23806 un->un_throttle = min(un->un_throttle, 3); 23807 } else { 23808 un->un_throttle = 1; 23809 } 23810 mutex_exit(SD_MUTEX(un)); 23811 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 23812 mutex_enter(SD_MUTEX(un)); 23813 break; 23814 case CMD_INCOMPLETE: 23815 /* 23816 * The transport stopped with an abnormal state, fallthrough and 23817 * reset the target and/or bus unless selection did not complete 23818 * (indicated by STATE_GOT_BUS) in which case we don't want to 23819 * go through a target/bus reset 23820 */ 23821 if (pkt->pkt_state == STATE_GOT_BUS) { 23822 break; 23823 } 23824 /*FALLTHROUGH*/ 23825 23826 case CMD_TIMEOUT: 23827 default: 23828 /* 23829 * The lun may still be running the command, so a lun reset 23830 * should be attempted. If the lun reset fails or cannot be 23831 * issued, than try a target reset. Lastly try a bus reset. 23832 */ 23833 if ((pkt->pkt_statistics & 23834 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 23835 int reset_retval = 0; 23836 mutex_exit(SD_MUTEX(un)); 23837 if (un->un_f_allow_bus_device_reset == TRUE) { 23838 if (un->un_f_lun_reset_enabled == TRUE) { 23839 reset_retval = 23840 scsi_reset(SD_ADDRESS(un), 23841 RESET_LUN); 23842 } 23843 if (reset_retval == 0) { 23844 reset_retval = 23845 scsi_reset(SD_ADDRESS(un), 23846 RESET_TARGET); 23847 } 23848 } 23849 if (reset_retval == 0) { 23850 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 23851 } 23852 mutex_enter(SD_MUTEX(un)); 23853 } 23854 break; 23855 } 23856 23857 /* A device/bus reset has occurred; update the reservation status. */ 23858 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics & 23859 (STAT_BUS_RESET | STAT_DEV_RESET))) { 23860 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 23861 un->un_resvd_status |= 23862 (SD_LOST_RESERVE | SD_WANT_RESERVE); 23863 SD_INFO(SD_LOG_IOCTL_MHD, un, 23864 "sd_mhd_watch_incomplete: Lost Reservation\n"); 23865 } 23866 } 23867 23868 /* 23869 * The disk has been turned off; Update the device state. 23870 * 23871 * Note: Should we be offlining the disk here? 23872 */ 23873 if (pkt->pkt_state == STATE_GOT_BUS) { 23874 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: " 23875 "Disk not responding to selection\n"); 23876 if (un->un_state != SD_STATE_OFFLINE) { 23877 New_state(un, SD_STATE_OFFLINE); 23878 } 23879 } else if (be_chatty) { 23880 /* 23881 * suppress messages if they are all the same pkt reason; 23882 * with TQ, many (up to 256) are returned with the same 23883 * pkt_reason 23884 */ 23885 if (pkt->pkt_reason != un->un_last_pkt_reason) { 23886 SD_ERROR(SD_LOG_IOCTL_MHD, un, 23887 "sd_mhd_watch_incomplete: " 23888 "SCSI transport failed: reason '%s'\n", 23889 scsi_rname(pkt->pkt_reason)); 23890 } 23891 } 23892 un->un_last_pkt_reason = pkt->pkt_reason; 23893 mutex_exit(SD_MUTEX(un)); 23894 } 23895 23896 23897 /* 23898 * Function: sd_sname() 23899 * 23900 * Description: This is a simple little routine to return a string containing 23901 * a printable description of command status byte for use in 23902 * logging. 23903 * 23904 * Arguments: status - pointer to a status byte 23905 * 23906 * Return Code: char * - string containing status description. 23907 */ 23908 23909 static char * 23910 sd_sname(uchar_t status) 23911 { 23912 switch (status & STATUS_MASK) { 23913 case STATUS_GOOD: 23914 return ("good status"); 23915 case STATUS_CHECK: 23916 return ("check condition"); 23917 case STATUS_MET: 23918 return ("condition met"); 23919 case STATUS_BUSY: 23920 return ("busy"); 23921 case STATUS_INTERMEDIATE: 23922 return ("intermediate"); 23923 case STATUS_INTERMEDIATE_MET: 23924 return ("intermediate - condition met"); 23925 case STATUS_RESERVATION_CONFLICT: 23926 return ("reservation_conflict"); 23927 case STATUS_TERMINATED: 23928 return ("command terminated"); 23929 case STATUS_QFULL: 23930 return ("queue full"); 23931 default: 23932 return ("<unknown status>"); 23933 } 23934 } 23935 23936 23937 /* 23938 * Function: sd_mhd_resvd_recover() 23939 * 23940 * Description: This function adds a reservation entry to the 23941 * sd_resv_reclaim_request list and signals the reservation 23942 * reclaim thread that there is work pending. If the reservation 23943 * reclaim thread has not been previously created this function 23944 * will kick it off. 23945 * 23946 * Arguments: arg - the device 'dev_t' is used for context to discriminate 23947 * among multiple watches that share this callback function 23948 * 23949 * Context: This routine is called by timeout() and is run in interrupt 23950 * context. It must not sleep or call other functions which may 23951 * sleep. 23952 */ 23953 23954 static void 23955 sd_mhd_resvd_recover(void *arg) 23956 { 23957 dev_t dev = (dev_t)arg; 23958 struct sd_lun *un; 23959 struct sd_thr_request *sd_treq = NULL; 23960 struct sd_thr_request *sd_cur = NULL; 23961 struct sd_thr_request *sd_prev = NULL; 23962 int already_there = 0; 23963 23964 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23965 return; 23966 } 23967 23968 mutex_enter(SD_MUTEX(un)); 23969 un->un_resvd_timeid = NULL; 23970 if (un->un_resvd_status & SD_WANT_RESERVE) { 23971 /* 23972 * There was a reset so don't issue the reserve, allow the 23973 * sd_mhd_watch_cb callback function to notice this and 23974 * reschedule the timeout for reservation. 23975 */ 23976 mutex_exit(SD_MUTEX(un)); 23977 return; 23978 } 23979 mutex_exit(SD_MUTEX(un)); 23980 23981 /* 23982 * Add this device to the sd_resv_reclaim_request list and the 23983 * sd_resv_reclaim_thread should take care of the rest. 23984 * 23985 * Note: We can't sleep in this context so if the memory allocation 23986 * fails allow the sd_mhd_watch_cb callback function to notice this and 23987 * reschedule the timeout for reservation. (4378460) 23988 */ 23989 sd_treq = (struct sd_thr_request *) 23990 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP); 23991 if (sd_treq == NULL) { 23992 return; 23993 } 23994 23995 sd_treq->sd_thr_req_next = NULL; 23996 sd_treq->dev = dev; 23997 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 23998 if (sd_tr.srq_thr_req_head == NULL) { 23999 sd_tr.srq_thr_req_head = sd_treq; 24000 } else { 24001 sd_cur = sd_prev = sd_tr.srq_thr_req_head; 24002 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) { 24003 if (sd_cur->dev == dev) { 24004 /* 24005 * already in Queue so don't log 24006 * another request for the device 24007 */ 24008 already_there = 1; 24009 break; 24010 } 24011 sd_prev = sd_cur; 24012 } 24013 if (!already_there) { 24014 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: " 24015 "logging request for %lx\n", dev); 24016 sd_prev->sd_thr_req_next = sd_treq; 24017 } else { 24018 kmem_free(sd_treq, sizeof (struct sd_thr_request)); 24019 } 24020 } 24021 24022 /* 24023 * Create a kernel thread to do the reservation reclaim and free up this 24024 * thread. We cannot block this thread while we go away to do the 24025 * reservation reclaim 24026 */ 24027 if (sd_tr.srq_resv_reclaim_thread == NULL) 24028 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0, 24029 sd_resv_reclaim_thread, NULL, 24030 0, &p0, TS_RUN, v.v_maxsyspri - 2); 24031 24032 /* Tell the reservation reclaim thread that it has work to do */ 24033 cv_signal(&sd_tr.srq_resv_reclaim_cv); 24034 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24035 } 24036 24037 /* 24038 * Function: sd_resv_reclaim_thread() 24039 * 24040 * Description: This function implements the reservation reclaim operations 24041 * 24042 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24043 * among multiple watches that share this callback function 24044 */ 24045 24046 static void 24047 sd_resv_reclaim_thread() 24048 { 24049 struct sd_lun *un; 24050 struct sd_thr_request *sd_mhreq; 24051 24052 /* Wait for work */ 24053 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24054 if (sd_tr.srq_thr_req_head == NULL) { 24055 cv_wait(&sd_tr.srq_resv_reclaim_cv, 24056 &sd_tr.srq_resv_reclaim_mutex); 24057 } 24058 24059 /* Loop while we have work */ 24060 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) { 24061 un = ddi_get_soft_state(sd_state, 24062 SDUNIT(sd_tr.srq_thr_cur_req->dev)); 24063 if (un == NULL) { 24064 /* 24065 * softstate structure is NULL so just 24066 * dequeue the request and continue 24067 */ 24068 sd_tr.srq_thr_req_head = 24069 sd_tr.srq_thr_cur_req->sd_thr_req_next; 24070 kmem_free(sd_tr.srq_thr_cur_req, 24071 sizeof (struct sd_thr_request)); 24072 continue; 24073 } 24074 24075 /* dequeue the request */ 24076 sd_mhreq = sd_tr.srq_thr_cur_req; 24077 sd_tr.srq_thr_req_head = 24078 sd_tr.srq_thr_cur_req->sd_thr_req_next; 24079 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24080 24081 /* 24082 * Reclaim reservation only if SD_RESERVE is still set. There 24083 * may have been a call to MHIOCRELEASE before we got here. 24084 */ 24085 mutex_enter(SD_MUTEX(un)); 24086 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 24087 /* 24088 * Note: The SD_LOST_RESERVE flag is cleared before 24089 * reclaiming the reservation. If this is done after the 24090 * call to sd_reserve_release a reservation loss in the 24091 * window between pkt completion of reserve cmd and 24092 * mutex_enter below may not be recognized 24093 */ 24094 un->un_resvd_status &= ~SD_LOST_RESERVE; 24095 mutex_exit(SD_MUTEX(un)); 24096 24097 if (sd_reserve_release(sd_mhreq->dev, 24098 SD_RESERVE) == 0) { 24099 mutex_enter(SD_MUTEX(un)); 24100 un->un_resvd_status |= SD_RESERVE; 24101 mutex_exit(SD_MUTEX(un)); 24102 SD_INFO(SD_LOG_IOCTL_MHD, un, 24103 "sd_resv_reclaim_thread: " 24104 "Reservation Recovered\n"); 24105 } else { 24106 mutex_enter(SD_MUTEX(un)); 24107 un->un_resvd_status |= SD_LOST_RESERVE; 24108 mutex_exit(SD_MUTEX(un)); 24109 SD_INFO(SD_LOG_IOCTL_MHD, un, 24110 "sd_resv_reclaim_thread: Failed " 24111 "Reservation Recovery\n"); 24112 } 24113 } else { 24114 mutex_exit(SD_MUTEX(un)); 24115 } 24116 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24117 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req); 24118 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 24119 sd_mhreq = sd_tr.srq_thr_cur_req = NULL; 24120 /* 24121 * wakeup the destroy thread if anyone is waiting on 24122 * us to complete. 24123 */ 24124 cv_signal(&sd_tr.srq_inprocess_cv); 24125 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24126 "sd_resv_reclaim_thread: cv_signalling current request \n"); 24127 } 24128 24129 /* 24130 * cleanup the sd_tr structure now that this thread will not exist 24131 */ 24132 ASSERT(sd_tr.srq_thr_req_head == NULL); 24133 ASSERT(sd_tr.srq_thr_cur_req == NULL); 24134 sd_tr.srq_resv_reclaim_thread = NULL; 24135 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24136 thread_exit(); 24137 } 24138 24139 24140 /* 24141 * Function: sd_rmv_resv_reclaim_req() 24142 * 24143 * Description: This function removes any pending reservation reclaim requests 24144 * for the specified device. 24145 * 24146 * Arguments: dev - the device 'dev_t' 24147 */ 24148 24149 static void 24150 sd_rmv_resv_reclaim_req(dev_t dev) 24151 { 24152 struct sd_thr_request *sd_mhreq; 24153 struct sd_thr_request *sd_prev; 24154 24155 /* Remove a reservation reclaim request from the list */ 24156 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24157 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) { 24158 /* 24159 * We are attempting to reinstate reservation for 24160 * this device. We wait for sd_reserve_release() 24161 * to return before we return. 24162 */ 24163 cv_wait(&sd_tr.srq_inprocess_cv, 24164 &sd_tr.srq_resv_reclaim_mutex); 24165 } else { 24166 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head; 24167 if (sd_mhreq && sd_mhreq->dev == dev) { 24168 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next; 24169 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 24170 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24171 return; 24172 } 24173 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) { 24174 if (sd_mhreq && sd_mhreq->dev == dev) { 24175 break; 24176 } 24177 sd_prev = sd_mhreq; 24178 } 24179 if (sd_mhreq != NULL) { 24180 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next; 24181 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 24182 } 24183 } 24184 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24185 } 24186 24187 24188 /* 24189 * Function: sd_mhd_reset_notify_cb() 24190 * 24191 * Description: This is a call back function for scsi_reset_notify. This 24192 * function updates the softstate reserved status and logs the 24193 * reset. The driver scsi watch facility callback function 24194 * (sd_mhd_watch_cb) and reservation reclaim thread functionality 24195 * will reclaim the reservation. 24196 * 24197 * Arguments: arg - driver soft state (unit) structure 24198 */ 24199 24200 static void 24201 sd_mhd_reset_notify_cb(caddr_t arg) 24202 { 24203 struct sd_lun *un = (struct sd_lun *)arg; 24204 24205 mutex_enter(SD_MUTEX(un)); 24206 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 24207 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE); 24208 SD_INFO(SD_LOG_IOCTL_MHD, un, 24209 "sd_mhd_reset_notify_cb: Lost Reservation\n"); 24210 } 24211 mutex_exit(SD_MUTEX(un)); 24212 } 24213 24214 24215 /* 24216 * Function: sd_take_ownership() 24217 * 24218 * Description: This routine implements an algorithm to achieve a stable 24219 * reservation on disks which don't implement priority reserve, 24220 * and makes sure that other host lose re-reservation attempts. 24221 * This algorithm contains of a loop that keeps issuing the RESERVE 24222 * for some period of time (min_ownership_delay, default 6 seconds) 24223 * During that loop, it looks to see if there has been a bus device 24224 * reset or bus reset (both of which cause an existing reservation 24225 * to be lost). If the reservation is lost issue RESERVE until a 24226 * period of min_ownership_delay with no resets has gone by, or 24227 * until max_ownership_delay has expired. This loop ensures that 24228 * the host really did manage to reserve the device, in spite of 24229 * resets. The looping for min_ownership_delay (default six 24230 * seconds) is important to early generation clustering products, 24231 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an 24232 * MHIOCENFAILFAST periodic timer of two seconds. By having 24233 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having 24234 * MHIOCENFAILFAST poll every two seconds, the idea is that by the 24235 * time the MHIOCTKOWN ioctl returns, the other host (if any) will 24236 * have already noticed, via the MHIOCENFAILFAST polling, that it 24237 * no longer "owns" the disk and will have panicked itself. Thus, 24238 * the host issuing the MHIOCTKOWN is assured (with timing 24239 * dependencies) that by the time it actually starts to use the 24240 * disk for real work, the old owner is no longer accessing it. 24241 * 24242 * min_ownership_delay is the minimum amount of time for which the 24243 * disk must be reserved continuously devoid of resets before the 24244 * MHIOCTKOWN ioctl will return success. 24245 * 24246 * max_ownership_delay indicates the amount of time by which the 24247 * take ownership should succeed or timeout with an error. 24248 * 24249 * Arguments: dev - the device 'dev_t' 24250 * *p - struct containing timing info. 24251 * 24252 * Return Code: 0 for success or error code 24253 */ 24254 24255 static int 24256 sd_take_ownership(dev_t dev, struct mhioctkown *p) 24257 { 24258 struct sd_lun *un; 24259 int rval; 24260 int err; 24261 int reservation_count = 0; 24262 int min_ownership_delay = 6000000; /* in usec */ 24263 int max_ownership_delay = 30000000; /* in usec */ 24264 clock_t start_time; /* starting time of this algorithm */ 24265 clock_t end_time; /* time limit for giving up */ 24266 clock_t ownership_time; /* time limit for stable ownership */ 24267 clock_t current_time; 24268 clock_t previous_current_time; 24269 24270 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24271 return (ENXIO); 24272 } 24273 24274 /* 24275 * Attempt a device reservation. A priority reservation is requested. 24276 */ 24277 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE)) 24278 != SD_SUCCESS) { 24279 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24280 "sd_take_ownership: return(1)=%d\n", rval); 24281 return (rval); 24282 } 24283 24284 /* Update the softstate reserved status to indicate the reservation */ 24285 mutex_enter(SD_MUTEX(un)); 24286 un->un_resvd_status |= SD_RESERVE; 24287 un->un_resvd_status &= 24288 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT); 24289 mutex_exit(SD_MUTEX(un)); 24290 24291 if (p != NULL) { 24292 if (p->min_ownership_delay != 0) { 24293 min_ownership_delay = p->min_ownership_delay * 1000; 24294 } 24295 if (p->max_ownership_delay != 0) { 24296 max_ownership_delay = p->max_ownership_delay * 1000; 24297 } 24298 } 24299 SD_INFO(SD_LOG_IOCTL_MHD, un, 24300 "sd_take_ownership: min, max delays: %d, %d\n", 24301 min_ownership_delay, max_ownership_delay); 24302 24303 start_time = ddi_get_lbolt(); 24304 current_time = start_time; 24305 ownership_time = current_time + drv_usectohz(min_ownership_delay); 24306 end_time = start_time + drv_usectohz(max_ownership_delay); 24307 24308 while (current_time - end_time < 0) { 24309 delay(drv_usectohz(500000)); 24310 24311 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) { 24312 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) { 24313 mutex_enter(SD_MUTEX(un)); 24314 rval = (un->un_resvd_status & 24315 SD_RESERVATION_CONFLICT) ? EACCES : EIO; 24316 mutex_exit(SD_MUTEX(un)); 24317 break; 24318 } 24319 } 24320 previous_current_time = current_time; 24321 current_time = ddi_get_lbolt(); 24322 mutex_enter(SD_MUTEX(un)); 24323 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) { 24324 ownership_time = ddi_get_lbolt() + 24325 drv_usectohz(min_ownership_delay); 24326 reservation_count = 0; 24327 } else { 24328 reservation_count++; 24329 } 24330 un->un_resvd_status |= SD_RESERVE; 24331 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE); 24332 mutex_exit(SD_MUTEX(un)); 24333 24334 SD_INFO(SD_LOG_IOCTL_MHD, un, 24335 "sd_take_ownership: ticks for loop iteration=%ld, " 24336 "reservation=%s\n", (current_time - previous_current_time), 24337 reservation_count ? "ok" : "reclaimed"); 24338 24339 if (current_time - ownership_time >= 0 && 24340 reservation_count >= 4) { 24341 rval = 0; /* Achieved a stable ownership */ 24342 break; 24343 } 24344 if (current_time - end_time >= 0) { 24345 rval = EACCES; /* No ownership in max possible time */ 24346 break; 24347 } 24348 } 24349 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24350 "sd_take_ownership: return(2)=%d\n", rval); 24351 return (rval); 24352 } 24353 24354 24355 /* 24356 * Function: sd_reserve_release() 24357 * 24358 * Description: This function builds and sends scsi RESERVE, RELEASE, and 24359 * PRIORITY RESERVE commands based on a user specified command type 24360 * 24361 * Arguments: dev - the device 'dev_t' 24362 * cmd - user specified command type; one of SD_PRIORITY_RESERVE, 24363 * SD_RESERVE, SD_RELEASE 24364 * 24365 * Return Code: 0 or Error Code 24366 */ 24367 24368 static int 24369 sd_reserve_release(dev_t dev, int cmd) 24370 { 24371 struct uscsi_cmd *com = NULL; 24372 struct sd_lun *un = NULL; 24373 char cdb[CDB_GROUP0]; 24374 int rval; 24375 24376 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) || 24377 (cmd == SD_PRIORITY_RESERVE)); 24378 24379 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24380 return (ENXIO); 24381 } 24382 24383 /* instantiate and initialize the command and cdb */ 24384 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24385 bzero(cdb, CDB_GROUP0); 24386 com->uscsi_flags = USCSI_SILENT; 24387 com->uscsi_timeout = un->un_reserve_release_time; 24388 com->uscsi_cdblen = CDB_GROUP0; 24389 com->uscsi_cdb = cdb; 24390 if (cmd == SD_RELEASE) { 24391 cdb[0] = SCMD_RELEASE; 24392 } else { 24393 cdb[0] = SCMD_RESERVE; 24394 } 24395 24396 /* Send the command. */ 24397 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24398 SD_PATH_STANDARD); 24399 24400 /* 24401 * "break" a reservation that is held by another host, by issuing a 24402 * reset if priority reserve is desired, and we could not get the 24403 * device. 24404 */ 24405 if ((cmd == SD_PRIORITY_RESERVE) && 24406 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 24407 /* 24408 * First try to reset the LUN. If we cannot, then try a target 24409 * reset, followed by a bus reset if the target reset fails. 24410 */ 24411 int reset_retval = 0; 24412 if (un->un_f_lun_reset_enabled == TRUE) { 24413 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 24414 } 24415 if (reset_retval == 0) { 24416 /* The LUN reset either failed or was not issued */ 24417 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 24418 } 24419 if ((reset_retval == 0) && 24420 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) { 24421 rval = EIO; 24422 kmem_free(com, sizeof (*com)); 24423 return (rval); 24424 } 24425 24426 bzero(com, sizeof (struct uscsi_cmd)); 24427 com->uscsi_flags = USCSI_SILENT; 24428 com->uscsi_cdb = cdb; 24429 com->uscsi_cdblen = CDB_GROUP0; 24430 com->uscsi_timeout = 5; 24431 24432 /* 24433 * Reissue the last reserve command, this time without request 24434 * sense. Assume that it is just a regular reserve command. 24435 */ 24436 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24437 SD_PATH_STANDARD); 24438 } 24439 24440 /* Return an error if still getting a reservation conflict. */ 24441 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 24442 rval = EACCES; 24443 } 24444 24445 kmem_free(com, sizeof (*com)); 24446 return (rval); 24447 } 24448 24449 24450 #define SD_NDUMP_RETRIES 12 24451 /* 24452 * System Crash Dump routine 24453 */ 24454 24455 static int 24456 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 24457 { 24458 int instance; 24459 int partition; 24460 int i; 24461 int err; 24462 struct sd_lun *un; 24463 struct scsi_pkt *wr_pktp; 24464 struct buf *wr_bp; 24465 struct buf wr_buf; 24466 daddr_t tgt_byte_offset; /* rmw - byte offset for target */ 24467 daddr_t tgt_blkno; /* rmw - blkno for target */ 24468 size_t tgt_byte_count; /* rmw - # of bytes to xfer */ 24469 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */ 24470 size_t io_start_offset; 24471 int doing_rmw = FALSE; 24472 int rval; 24473 ssize_t dma_resid; 24474 daddr_t oblkno; 24475 diskaddr_t nblks = 0; 24476 diskaddr_t start_block; 24477 24478 instance = SDUNIT(dev); 24479 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 24480 !SD_IS_VALID_LABEL(un) || ISCD(un)) { 24481 return (ENXIO); 24482 } 24483 24484 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 24485 24486 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); 24487 24488 partition = SDPART(dev); 24489 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition); 24490 24491 /* Validate blocks to dump at against partition size. */ 24492 24493 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 24494 &nblks, &start_block, NULL, NULL, (void *)SD_PATH_DIRECT); 24495 24496 if ((blkno + nblk) > nblks) { 24497 SD_TRACE(SD_LOG_DUMP, un, 24498 "sddump: dump range larger than partition: " 24499 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 24500 blkno, nblk, nblks); 24501 return (EINVAL); 24502 } 24503 24504 mutex_enter(&un->un_pm_mutex); 24505 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 24506 struct scsi_pkt *start_pktp; 24507 24508 mutex_exit(&un->un_pm_mutex); 24509 24510 /* 24511 * use pm framework to power on HBA 1st 24512 */ 24513 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 24514 24515 /* 24516 * Dump no long uses sdpower to power on a device, it's 24517 * in-line here so it can be done in polled mode. 24518 */ 24519 24520 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n"); 24521 24522 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL, 24523 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL); 24524 24525 if (start_pktp == NULL) { 24526 /* We were not given a SCSI packet, fail. */ 24527 return (EIO); 24528 } 24529 bzero(start_pktp->pkt_cdbp, CDB_GROUP0); 24530 start_pktp->pkt_cdbp[0] = SCMD_START_STOP; 24531 start_pktp->pkt_cdbp[4] = SD_TARGET_START; 24532 start_pktp->pkt_flags = FLAG_NOINTR; 24533 24534 mutex_enter(SD_MUTEX(un)); 24535 SD_FILL_SCSI1_LUN(un, start_pktp); 24536 mutex_exit(SD_MUTEX(un)); 24537 /* 24538 * Scsi_poll returns 0 (success) if the command completes and 24539 * the status block is STATUS_GOOD. 24540 */ 24541 if (sd_scsi_poll(un, start_pktp) != 0) { 24542 scsi_destroy_pkt(start_pktp); 24543 return (EIO); 24544 } 24545 scsi_destroy_pkt(start_pktp); 24546 (void) sd_ddi_pm_resume(un); 24547 } else { 24548 mutex_exit(&un->un_pm_mutex); 24549 } 24550 24551 mutex_enter(SD_MUTEX(un)); 24552 un->un_throttle = 0; 24553 24554 /* 24555 * The first time through, reset the specific target device. 24556 * However, when cpr calls sddump we know that sd is in a 24557 * a good state so no bus reset is required. 24558 * Clear sense data via Request Sense cmd. 24559 * In sddump we don't care about allow_bus_device_reset anymore 24560 */ 24561 24562 if ((un->un_state != SD_STATE_SUSPENDED) && 24563 (un->un_state != SD_STATE_DUMPING)) { 24564 24565 New_state(un, SD_STATE_DUMPING); 24566 24567 if (un->un_f_is_fibre == FALSE) { 24568 mutex_exit(SD_MUTEX(un)); 24569 /* 24570 * Attempt a bus reset for parallel scsi. 24571 * 24572 * Note: A bus reset is required because on some host 24573 * systems (i.e. E420R) a bus device reset is 24574 * insufficient to reset the state of the target. 24575 * 24576 * Note: Don't issue the reset for fibre-channel, 24577 * because this tends to hang the bus (loop) for 24578 * too long while everyone is logging out and in 24579 * and the deadman timer for dumping will fire 24580 * before the dump is complete. 24581 */ 24582 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) { 24583 mutex_enter(SD_MUTEX(un)); 24584 Restore_state(un); 24585 mutex_exit(SD_MUTEX(un)); 24586 return (EIO); 24587 } 24588 24589 /* Delay to give the device some recovery time. */ 24590 drv_usecwait(10000); 24591 24592 if (sd_send_polled_RQS(un) == SD_FAILURE) { 24593 SD_INFO(SD_LOG_DUMP, un, 24594 "sddump: sd_send_polled_RQS failed\n"); 24595 } 24596 mutex_enter(SD_MUTEX(un)); 24597 } 24598 } 24599 24600 /* 24601 * Convert the partition-relative block number to a 24602 * disk physical block number. 24603 */ 24604 blkno += start_block; 24605 24606 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno); 24607 24608 24609 /* 24610 * Check if the device has a non-512 block size. 24611 */ 24612 wr_bp = NULL; 24613 if (NOT_DEVBSIZE(un)) { 24614 tgt_byte_offset = blkno * un->un_sys_blocksize; 24615 tgt_byte_count = nblk * un->un_sys_blocksize; 24616 if ((tgt_byte_offset % un->un_tgt_blocksize) || 24617 (tgt_byte_count % un->un_tgt_blocksize)) { 24618 doing_rmw = TRUE; 24619 /* 24620 * Calculate the block number and number of block 24621 * in terms of the media block size. 24622 */ 24623 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 24624 tgt_nblk = 24625 ((tgt_byte_offset + tgt_byte_count + 24626 (un->un_tgt_blocksize - 1)) / 24627 un->un_tgt_blocksize) - tgt_blkno; 24628 24629 /* 24630 * Invoke the routine which is going to do read part 24631 * of read-modify-write. 24632 * Note that this routine returns a pointer to 24633 * a valid bp in wr_bp. 24634 */ 24635 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk, 24636 &wr_bp); 24637 if (err) { 24638 mutex_exit(SD_MUTEX(un)); 24639 return (err); 24640 } 24641 /* 24642 * Offset is being calculated as - 24643 * (original block # * system block size) - 24644 * (new block # * target block size) 24645 */ 24646 io_start_offset = 24647 ((uint64_t)(blkno * un->un_sys_blocksize)) - 24648 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize)); 24649 24650 ASSERT((io_start_offset >= 0) && 24651 (io_start_offset < un->un_tgt_blocksize)); 24652 /* 24653 * Do the modify portion of read modify write. 24654 */ 24655 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset], 24656 (size_t)nblk * un->un_sys_blocksize); 24657 } else { 24658 doing_rmw = FALSE; 24659 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 24660 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize; 24661 } 24662 24663 /* Convert blkno and nblk to target blocks */ 24664 blkno = tgt_blkno; 24665 nblk = tgt_nblk; 24666 } else { 24667 wr_bp = &wr_buf; 24668 bzero(wr_bp, sizeof (struct buf)); 24669 wr_bp->b_flags = B_BUSY; 24670 wr_bp->b_un.b_addr = addr; 24671 wr_bp->b_bcount = nblk << DEV_BSHIFT; 24672 wr_bp->b_resid = 0; 24673 } 24674 24675 mutex_exit(SD_MUTEX(un)); 24676 24677 /* 24678 * Obtain a SCSI packet for the write command. 24679 * It should be safe to call the allocator here without 24680 * worrying about being locked for DVMA mapping because 24681 * the address we're passed is already a DVMA mapping 24682 * 24683 * We are also not going to worry about semaphore ownership 24684 * in the dump buffer. Dumping is single threaded at present. 24685 */ 24686 24687 wr_pktp = NULL; 24688 24689 dma_resid = wr_bp->b_bcount; 24690 oblkno = blkno; 24691 24692 while (dma_resid != 0) { 24693 24694 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 24695 wr_bp->b_flags &= ~B_ERROR; 24696 24697 if (un->un_partial_dma_supported == 1) { 24698 blkno = oblkno + 24699 ((wr_bp->b_bcount - dma_resid) / 24700 un->un_tgt_blocksize); 24701 nblk = dma_resid / un->un_tgt_blocksize; 24702 24703 if (wr_pktp) { 24704 /* 24705 * Partial DMA transfers after initial transfer 24706 */ 24707 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp, 24708 blkno, nblk); 24709 } else { 24710 /* Initial transfer */ 24711 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 24712 un->un_pkt_flags, NULL_FUNC, NULL, 24713 blkno, nblk); 24714 } 24715 } else { 24716 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 24717 0, NULL_FUNC, NULL, blkno, nblk); 24718 } 24719 24720 if (rval == 0) { 24721 /* We were given a SCSI packet, continue. */ 24722 break; 24723 } 24724 24725 if (i == 0) { 24726 if (wr_bp->b_flags & B_ERROR) { 24727 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24728 "no resources for dumping; " 24729 "error code: 0x%x, retrying", 24730 geterror(wr_bp)); 24731 } else { 24732 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24733 "no resources for dumping; retrying"); 24734 } 24735 } else if (i != (SD_NDUMP_RETRIES - 1)) { 24736 if (wr_bp->b_flags & B_ERROR) { 24737 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 24738 "no resources for dumping; error code: " 24739 "0x%x, retrying\n", geterror(wr_bp)); 24740 } 24741 } else { 24742 if (wr_bp->b_flags & B_ERROR) { 24743 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 24744 "no resources for dumping; " 24745 "error code: 0x%x, retries failed, " 24746 "giving up.\n", geterror(wr_bp)); 24747 } else { 24748 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 24749 "no resources for dumping; " 24750 "retries failed, giving up.\n"); 24751 } 24752 mutex_enter(SD_MUTEX(un)); 24753 Restore_state(un); 24754 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) { 24755 mutex_exit(SD_MUTEX(un)); 24756 scsi_free_consistent_buf(wr_bp); 24757 } else { 24758 mutex_exit(SD_MUTEX(un)); 24759 } 24760 return (EIO); 24761 } 24762 drv_usecwait(10000); 24763 } 24764 24765 if (un->un_partial_dma_supported == 1) { 24766 /* 24767 * save the resid from PARTIAL_DMA 24768 */ 24769 dma_resid = wr_pktp->pkt_resid; 24770 if (dma_resid != 0) 24771 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid); 24772 wr_pktp->pkt_resid = 0; 24773 } else { 24774 dma_resid = 0; 24775 } 24776 24777 /* SunBug 1222170 */ 24778 wr_pktp->pkt_flags = FLAG_NOINTR; 24779 24780 err = EIO; 24781 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 24782 24783 /* 24784 * Scsi_poll returns 0 (success) if the command completes and 24785 * the status block is STATUS_GOOD. We should only check 24786 * errors if this condition is not true. Even then we should 24787 * send our own request sense packet only if we have a check 24788 * condition and auto request sense has not been performed by 24789 * the hba. 24790 */ 24791 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n"); 24792 24793 if ((sd_scsi_poll(un, wr_pktp) == 0) && 24794 (wr_pktp->pkt_resid == 0)) { 24795 err = SD_SUCCESS; 24796 break; 24797 } 24798 24799 /* 24800 * Check CMD_DEV_GONE 1st, give up if device is gone. 24801 */ 24802 if (wr_pktp->pkt_reason == CMD_DEV_GONE) { 24803 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24804 "Error while dumping state...Device is gone\n"); 24805 break; 24806 } 24807 24808 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) { 24809 SD_INFO(SD_LOG_DUMP, un, 24810 "sddump: write failed with CHECK, try # %d\n", i); 24811 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) { 24812 (void) sd_send_polled_RQS(un); 24813 } 24814 24815 continue; 24816 } 24817 24818 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) { 24819 int reset_retval = 0; 24820 24821 SD_INFO(SD_LOG_DUMP, un, 24822 "sddump: write failed with BUSY, try # %d\n", i); 24823 24824 if (un->un_f_lun_reset_enabled == TRUE) { 24825 reset_retval = scsi_reset(SD_ADDRESS(un), 24826 RESET_LUN); 24827 } 24828 if (reset_retval == 0) { 24829 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 24830 } 24831 (void) sd_send_polled_RQS(un); 24832 24833 } else { 24834 SD_INFO(SD_LOG_DUMP, un, 24835 "sddump: write failed with 0x%x, try # %d\n", 24836 SD_GET_PKT_STATUS(wr_pktp), i); 24837 mutex_enter(SD_MUTEX(un)); 24838 sd_reset_target(un, wr_pktp); 24839 mutex_exit(SD_MUTEX(un)); 24840 } 24841 24842 /* 24843 * If we are not getting anywhere with lun/target resets, 24844 * let's reset the bus. 24845 */ 24846 if (i == SD_NDUMP_RETRIES/2) { 24847 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 24848 (void) sd_send_polled_RQS(un); 24849 } 24850 } 24851 } 24852 24853 scsi_destroy_pkt(wr_pktp); 24854 mutex_enter(SD_MUTEX(un)); 24855 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) { 24856 mutex_exit(SD_MUTEX(un)); 24857 scsi_free_consistent_buf(wr_bp); 24858 } else { 24859 mutex_exit(SD_MUTEX(un)); 24860 } 24861 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err); 24862 return (err); 24863 } 24864 24865 /* 24866 * Function: sd_scsi_poll() 24867 * 24868 * Description: This is a wrapper for the scsi_poll call. 24869 * 24870 * Arguments: sd_lun - The unit structure 24871 * scsi_pkt - The scsi packet being sent to the device. 24872 * 24873 * Return Code: 0 - Command completed successfully with good status 24874 * -1 - Command failed. This could indicate a check condition 24875 * or other status value requiring recovery action. 24876 * 24877 * NOTE: This code is only called off sddump(). 24878 */ 24879 24880 static int 24881 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp) 24882 { 24883 int status; 24884 24885 ASSERT(un != NULL); 24886 ASSERT(!mutex_owned(SD_MUTEX(un))); 24887 ASSERT(pktp != NULL); 24888 24889 status = SD_SUCCESS; 24890 24891 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) { 24892 pktp->pkt_flags |= un->un_tagflags; 24893 pktp->pkt_flags &= ~FLAG_NODISCON; 24894 } 24895 24896 status = sd_ddi_scsi_poll(pktp); 24897 /* 24898 * Scsi_poll returns 0 (success) if the command completes and the 24899 * status block is STATUS_GOOD. We should only check errors if this 24900 * condition is not true. Even then we should send our own request 24901 * sense packet only if we have a check condition and auto 24902 * request sense has not been performed by the hba. 24903 * Don't get RQS data if pkt_reason is CMD_DEV_GONE. 24904 */ 24905 if ((status != SD_SUCCESS) && 24906 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) && 24907 (pktp->pkt_state & STATE_ARQ_DONE) == 0 && 24908 (pktp->pkt_reason != CMD_DEV_GONE)) 24909 (void) sd_send_polled_RQS(un); 24910 24911 return (status); 24912 } 24913 24914 /* 24915 * Function: sd_send_polled_RQS() 24916 * 24917 * Description: This sends the request sense command to a device. 24918 * 24919 * Arguments: sd_lun - The unit structure 24920 * 24921 * Return Code: 0 - Command completed successfully with good status 24922 * -1 - Command failed. 24923 * 24924 */ 24925 24926 static int 24927 sd_send_polled_RQS(struct sd_lun *un) 24928 { 24929 int ret_val; 24930 struct scsi_pkt *rqs_pktp; 24931 struct buf *rqs_bp; 24932 24933 ASSERT(un != NULL); 24934 ASSERT(!mutex_owned(SD_MUTEX(un))); 24935 24936 ret_val = SD_SUCCESS; 24937 24938 rqs_pktp = un->un_rqs_pktp; 24939 rqs_bp = un->un_rqs_bp; 24940 24941 mutex_enter(SD_MUTEX(un)); 24942 24943 if (un->un_sense_isbusy) { 24944 ret_val = SD_FAILURE; 24945 mutex_exit(SD_MUTEX(un)); 24946 return (ret_val); 24947 } 24948 24949 /* 24950 * If the request sense buffer (and packet) is not in use, 24951 * let's set the un_sense_isbusy and send our packet 24952 */ 24953 un->un_sense_isbusy = 1; 24954 rqs_pktp->pkt_resid = 0; 24955 rqs_pktp->pkt_reason = 0; 24956 rqs_pktp->pkt_flags |= FLAG_NOINTR; 24957 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH); 24958 24959 mutex_exit(SD_MUTEX(un)); 24960 24961 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at" 24962 " 0x%p\n", rqs_bp->b_un.b_addr); 24963 24964 /* 24965 * Can't send this to sd_scsi_poll, we wrap ourselves around the 24966 * axle - it has a call into us! 24967 */ 24968 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) { 24969 SD_INFO(SD_LOG_COMMON, un, 24970 "sd_send_polled_RQS: RQS failed\n"); 24971 } 24972 24973 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:", 24974 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX); 24975 24976 mutex_enter(SD_MUTEX(un)); 24977 un->un_sense_isbusy = 0; 24978 mutex_exit(SD_MUTEX(un)); 24979 24980 return (ret_val); 24981 } 24982 24983 /* 24984 * Defines needed for localized version of the scsi_poll routine. 24985 */ 24986 #define CSEC 10000 /* usecs */ 24987 #define SEC_TO_CSEC (1000000/CSEC) 24988 24989 /* 24990 * Function: sd_ddi_scsi_poll() 24991 * 24992 * Description: Localized version of the scsi_poll routine. The purpose is to 24993 * send a scsi_pkt to a device as a polled command. This version 24994 * is to ensure more robust handling of transport errors. 24995 * Specifically this routine cures not ready, coming ready 24996 * transition for power up and reset of sonoma's. This can take 24997 * up to 45 seconds for power-on and 20 seconds for reset of a 24998 * sonoma lun. 24999 * 25000 * Arguments: scsi_pkt - The scsi_pkt being sent to a device 25001 * 25002 * Return Code: 0 - Command completed successfully with good status 25003 * -1 - Command failed. 25004 * 25005 * NOTE: This code is almost identical to scsi_poll, however before 6668774 can 25006 * be fixed (removing this code), we need to determine how to handle the 25007 * KEY_UNIT_ATTENTION condition below in conditions not as limited as sddump(). 25008 * 25009 * NOTE: This code is only called off sddump(). 25010 */ 25011 static int 25012 sd_ddi_scsi_poll(struct scsi_pkt *pkt) 25013 { 25014 int rval = -1; 25015 int savef; 25016 long savet; 25017 void (*savec)(); 25018 int timeout; 25019 int busy_count; 25020 int poll_delay; 25021 int rc; 25022 uint8_t *sensep; 25023 struct scsi_arq_status *arqstat; 25024 extern int do_polled_io; 25025 25026 ASSERT(pkt->pkt_scbp); 25027 25028 /* 25029 * save old flags.. 25030 */ 25031 savef = pkt->pkt_flags; 25032 savec = pkt->pkt_comp; 25033 savet = pkt->pkt_time; 25034 25035 pkt->pkt_flags |= FLAG_NOINTR; 25036 25037 /* 25038 * XXX there is nothing in the SCSA spec that states that we should not 25039 * do a callback for polled cmds; however, removing this will break sd 25040 * and probably other target drivers 25041 */ 25042 pkt->pkt_comp = NULL; 25043 25044 /* 25045 * we don't like a polled command without timeout. 25046 * 60 seconds seems long enough. 25047 */ 25048 if (pkt->pkt_time == 0) 25049 pkt->pkt_time = SCSI_POLL_TIMEOUT; 25050 25051 /* 25052 * Send polled cmd. 25053 * 25054 * We do some error recovery for various errors. Tran_busy, 25055 * queue full, and non-dispatched commands are retried every 10 msec. 25056 * as they are typically transient failures. Busy status and Not 25057 * Ready are retried every second as this status takes a while to 25058 * change. 25059 */ 25060 timeout = pkt->pkt_time * SEC_TO_CSEC; 25061 25062 for (busy_count = 0; busy_count < timeout; busy_count++) { 25063 /* 25064 * Initialize pkt status variables. 25065 */ 25066 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0; 25067 25068 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) { 25069 if (rc != TRAN_BUSY) { 25070 /* Transport failed - give up. */ 25071 break; 25072 } else { 25073 /* Transport busy - try again. */ 25074 poll_delay = 1 * CSEC; /* 10 msec. */ 25075 } 25076 } else { 25077 /* 25078 * Transport accepted - check pkt status. 25079 */ 25080 rc = (*pkt->pkt_scbp) & STATUS_MASK; 25081 if ((pkt->pkt_reason == CMD_CMPLT) && 25082 (rc == STATUS_CHECK) && 25083 (pkt->pkt_state & STATE_ARQ_DONE)) { 25084 arqstat = 25085 (struct scsi_arq_status *)(pkt->pkt_scbp); 25086 sensep = (uint8_t *)&arqstat->sts_sensedata; 25087 } else { 25088 sensep = NULL; 25089 } 25090 25091 if ((pkt->pkt_reason == CMD_CMPLT) && 25092 (rc == STATUS_GOOD)) { 25093 /* No error - we're done */ 25094 rval = 0; 25095 break; 25096 25097 } else if (pkt->pkt_reason == CMD_DEV_GONE) { 25098 /* Lost connection - give up */ 25099 break; 25100 25101 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) && 25102 (pkt->pkt_state == 0)) { 25103 /* Pkt not dispatched - try again. */ 25104 poll_delay = 1 * CSEC; /* 10 msec. */ 25105 25106 } else if ((pkt->pkt_reason == CMD_CMPLT) && 25107 (rc == STATUS_QFULL)) { 25108 /* Queue full - try again. */ 25109 poll_delay = 1 * CSEC; /* 10 msec. */ 25110 25111 } else if ((pkt->pkt_reason == CMD_CMPLT) && 25112 (rc == STATUS_BUSY)) { 25113 /* Busy - try again. */ 25114 poll_delay = 100 * CSEC; /* 1 sec. */ 25115 busy_count += (SEC_TO_CSEC - 1); 25116 25117 } else if ((sensep != NULL) && 25118 (scsi_sense_key(sensep) == KEY_UNIT_ATTENTION)) { 25119 /* 25120 * Unit Attention - try again. 25121 * Pretend it took 1 sec. 25122 * NOTE: 'continue' avoids poll_delay 25123 */ 25124 busy_count += (SEC_TO_CSEC - 1); 25125 continue; 25126 25127 } else if ((sensep != NULL) && 25128 (scsi_sense_key(sensep) == KEY_NOT_READY) && 25129 (scsi_sense_asc(sensep) == 0x04) && 25130 (scsi_sense_ascq(sensep) == 0x01)) { 25131 /* 25132 * Not ready -> ready - try again. 25133 * 04h/01h: LUN IS IN PROCESS OF BECOMING READY 25134 * ...same as STATUS_BUSY 25135 */ 25136 poll_delay = 100 * CSEC; /* 1 sec. */ 25137 busy_count += (SEC_TO_CSEC - 1); 25138 25139 } else { 25140 /* BAD status - give up. */ 25141 break; 25142 } 25143 } 25144 25145 if (((curthread->t_flag & T_INTR_THREAD) == 0) && 25146 !do_polled_io) { 25147 delay(drv_usectohz(poll_delay)); 25148 } else { 25149 /* we busy wait during cpr_dump or interrupt threads */ 25150 drv_usecwait(poll_delay); 25151 } 25152 } 25153 25154 pkt->pkt_flags = savef; 25155 pkt->pkt_comp = savec; 25156 pkt->pkt_time = savet; 25157 25158 /* return on error */ 25159 if (rval) 25160 return (rval); 25161 25162 /* 25163 * This is not a performance critical code path. 25164 * 25165 * As an accommodation for scsi_poll callers, to avoid ddi_dma_sync() 25166 * issues associated with looking at DMA memory prior to 25167 * scsi_pkt_destroy(), we scsi_sync_pkt() prior to return. 25168 */ 25169 scsi_sync_pkt(pkt); 25170 return (0); 25171 } 25172 25173 25174 25175 /* 25176 * Function: sd_persistent_reservation_in_read_keys 25177 * 25178 * Description: This routine is the driver entry point for handling CD-ROM 25179 * multi-host persistent reservation requests (MHIOCGRP_INKEYS) 25180 * by sending the SCSI-3 PRIN commands to the device. 25181 * Processes the read keys command response by copying the 25182 * reservation key information into the user provided buffer. 25183 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented. 25184 * 25185 * Arguments: un - Pointer to soft state struct for the target. 25186 * usrp - user provided pointer to multihost Persistent In Read 25187 * Keys structure (mhioc_inkeys_t) 25188 * flag - this argument is a pass through to ddi_copyxxx() 25189 * directly from the mode argument of ioctl(). 25190 * 25191 * Return Code: 0 - Success 25192 * EACCES 25193 * ENOTSUP 25194 * errno return code from sd_send_scsi_cmd() 25195 * 25196 * Context: Can sleep. Does not return until command is completed. 25197 */ 25198 25199 static int 25200 sd_persistent_reservation_in_read_keys(struct sd_lun *un, 25201 mhioc_inkeys_t *usrp, int flag) 25202 { 25203 #ifdef _MULTI_DATAMODEL 25204 struct mhioc_key_list32 li32; 25205 #endif 25206 sd_prin_readkeys_t *in; 25207 mhioc_inkeys_t *ptr; 25208 mhioc_key_list_t li; 25209 uchar_t *data_bufp; 25210 int data_len; 25211 int rval = 0; 25212 size_t copysz; 25213 sd_ssc_t *ssc; 25214 25215 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) { 25216 return (EINVAL); 25217 } 25218 bzero(&li, sizeof (mhioc_key_list_t)); 25219 25220 ssc = sd_ssc_init(un); 25221 25222 /* 25223 * Get the listsize from user 25224 */ 25225 #ifdef _MULTI_DATAMODEL 25226 25227 switch (ddi_model_convert_from(flag & FMODELS)) { 25228 case DDI_MODEL_ILP32: 25229 copysz = sizeof (struct mhioc_key_list32); 25230 if (ddi_copyin(ptr->li, &li32, copysz, flag)) { 25231 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25232 "sd_persistent_reservation_in_read_keys: " 25233 "failed ddi_copyin: mhioc_key_list32_t\n"); 25234 rval = EFAULT; 25235 goto done; 25236 } 25237 li.listsize = li32.listsize; 25238 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list; 25239 break; 25240 25241 case DDI_MODEL_NONE: 25242 copysz = sizeof (mhioc_key_list_t); 25243 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 25244 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25245 "sd_persistent_reservation_in_read_keys: " 25246 "failed ddi_copyin: mhioc_key_list_t\n"); 25247 rval = EFAULT; 25248 goto done; 25249 } 25250 break; 25251 } 25252 25253 #else /* ! _MULTI_DATAMODEL */ 25254 copysz = sizeof (mhioc_key_list_t); 25255 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 25256 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25257 "sd_persistent_reservation_in_read_keys: " 25258 "failed ddi_copyin: mhioc_key_list_t\n"); 25259 rval = EFAULT; 25260 goto done; 25261 } 25262 #endif 25263 25264 data_len = li.listsize * MHIOC_RESV_KEY_SIZE; 25265 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t)); 25266 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 25267 25268 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS, 25269 data_len, data_bufp); 25270 if (rval != 0) { 25271 if (rval == EIO) 25272 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 25273 else 25274 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 25275 goto done; 25276 } 25277 in = (sd_prin_readkeys_t *)data_bufp; 25278 ptr->generation = BE_32(in->generation); 25279 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE; 25280 25281 /* 25282 * Return the min(listsize, listlen) keys 25283 */ 25284 #ifdef _MULTI_DATAMODEL 25285 25286 switch (ddi_model_convert_from(flag & FMODELS)) { 25287 case DDI_MODEL_ILP32: 25288 li32.listlen = li.listlen; 25289 if (ddi_copyout(&li32, ptr->li, copysz, flag)) { 25290 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25291 "sd_persistent_reservation_in_read_keys: " 25292 "failed ddi_copyout: mhioc_key_list32_t\n"); 25293 rval = EFAULT; 25294 goto done; 25295 } 25296 break; 25297 25298 case DDI_MODEL_NONE: 25299 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 25300 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25301 "sd_persistent_reservation_in_read_keys: " 25302 "failed ddi_copyout: mhioc_key_list_t\n"); 25303 rval = EFAULT; 25304 goto done; 25305 } 25306 break; 25307 } 25308 25309 #else /* ! _MULTI_DATAMODEL */ 25310 25311 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 25312 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25313 "sd_persistent_reservation_in_read_keys: " 25314 "failed ddi_copyout: mhioc_key_list_t\n"); 25315 rval = EFAULT; 25316 goto done; 25317 } 25318 25319 #endif /* _MULTI_DATAMODEL */ 25320 25321 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE, 25322 li.listsize * MHIOC_RESV_KEY_SIZE); 25323 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) { 25324 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25325 "sd_persistent_reservation_in_read_keys: " 25326 "failed ddi_copyout: keylist\n"); 25327 rval = EFAULT; 25328 } 25329 done: 25330 sd_ssc_fini(ssc); 25331 kmem_free(data_bufp, data_len); 25332 return (rval); 25333 } 25334 25335 25336 /* 25337 * Function: sd_persistent_reservation_in_read_resv 25338 * 25339 * Description: This routine is the driver entry point for handling CD-ROM 25340 * multi-host persistent reservation requests (MHIOCGRP_INRESV) 25341 * by sending the SCSI-3 PRIN commands to the device. 25342 * Process the read persistent reservations command response by 25343 * copying the reservation information into the user provided 25344 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented. 25345 * 25346 * Arguments: un - Pointer to soft state struct for the target. 25347 * usrp - user provided pointer to multihost Persistent In Read 25348 * Keys structure (mhioc_inkeys_t) 25349 * flag - this argument is a pass through to ddi_copyxxx() 25350 * directly from the mode argument of ioctl(). 25351 * 25352 * Return Code: 0 - Success 25353 * EACCES 25354 * ENOTSUP 25355 * errno return code from sd_send_scsi_cmd() 25356 * 25357 * Context: Can sleep. Does not return until command is completed. 25358 */ 25359 25360 static int 25361 sd_persistent_reservation_in_read_resv(struct sd_lun *un, 25362 mhioc_inresvs_t *usrp, int flag) 25363 { 25364 #ifdef _MULTI_DATAMODEL 25365 struct mhioc_resv_desc_list32 resvlist32; 25366 #endif 25367 sd_prin_readresv_t *in; 25368 mhioc_inresvs_t *ptr; 25369 sd_readresv_desc_t *readresv_ptr; 25370 mhioc_resv_desc_list_t resvlist; 25371 mhioc_resv_desc_t resvdesc; 25372 uchar_t *data_bufp = NULL; 25373 int data_len; 25374 int rval = 0; 25375 int i; 25376 size_t copysz; 25377 mhioc_resv_desc_t *bufp; 25378 sd_ssc_t *ssc; 25379 25380 if ((ptr = usrp) == NULL) { 25381 return (EINVAL); 25382 } 25383 25384 ssc = sd_ssc_init(un); 25385 25386 /* 25387 * Get the listsize from user 25388 */ 25389 #ifdef _MULTI_DATAMODEL 25390 switch (ddi_model_convert_from(flag & FMODELS)) { 25391 case DDI_MODEL_ILP32: 25392 copysz = sizeof (struct mhioc_resv_desc_list32); 25393 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) { 25394 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25395 "sd_persistent_reservation_in_read_resv: " 25396 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 25397 rval = EFAULT; 25398 goto done; 25399 } 25400 resvlist.listsize = resvlist32.listsize; 25401 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list; 25402 break; 25403 25404 case DDI_MODEL_NONE: 25405 copysz = sizeof (mhioc_resv_desc_list_t); 25406 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 25407 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25408 "sd_persistent_reservation_in_read_resv: " 25409 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 25410 rval = EFAULT; 25411 goto done; 25412 } 25413 break; 25414 } 25415 #else /* ! _MULTI_DATAMODEL */ 25416 copysz = sizeof (mhioc_resv_desc_list_t); 25417 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 25418 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25419 "sd_persistent_reservation_in_read_resv: " 25420 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 25421 rval = EFAULT; 25422 goto done; 25423 } 25424 #endif /* ! _MULTI_DATAMODEL */ 25425 25426 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN; 25427 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t)); 25428 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 25429 25430 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_RESV, 25431 data_len, data_bufp); 25432 if (rval != 0) { 25433 if (rval == EIO) 25434 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 25435 else 25436 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 25437 goto done; 25438 } 25439 in = (sd_prin_readresv_t *)data_bufp; 25440 ptr->generation = BE_32(in->generation); 25441 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN; 25442 25443 /* 25444 * Return the min(listsize, listlen( keys 25445 */ 25446 #ifdef _MULTI_DATAMODEL 25447 25448 switch (ddi_model_convert_from(flag & FMODELS)) { 25449 case DDI_MODEL_ILP32: 25450 resvlist32.listlen = resvlist.listlen; 25451 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) { 25452 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25453 "sd_persistent_reservation_in_read_resv: " 25454 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 25455 rval = EFAULT; 25456 goto done; 25457 } 25458 break; 25459 25460 case DDI_MODEL_NONE: 25461 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 25462 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25463 "sd_persistent_reservation_in_read_resv: " 25464 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 25465 rval = EFAULT; 25466 goto done; 25467 } 25468 break; 25469 } 25470 25471 #else /* ! _MULTI_DATAMODEL */ 25472 25473 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 25474 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25475 "sd_persistent_reservation_in_read_resv: " 25476 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 25477 rval = EFAULT; 25478 goto done; 25479 } 25480 25481 #endif /* ! _MULTI_DATAMODEL */ 25482 25483 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc; 25484 bufp = resvlist.list; 25485 copysz = sizeof (mhioc_resv_desc_t); 25486 for (i = 0; i < min(resvlist.listlen, resvlist.listsize); 25487 i++, readresv_ptr++, bufp++) { 25488 25489 bcopy(&readresv_ptr->resvkey, &resvdesc.key, 25490 MHIOC_RESV_KEY_SIZE); 25491 resvdesc.type = readresv_ptr->type; 25492 resvdesc.scope = readresv_ptr->scope; 25493 resvdesc.scope_specific_addr = 25494 BE_32(readresv_ptr->scope_specific_addr); 25495 25496 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) { 25497 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25498 "sd_persistent_reservation_in_read_resv: " 25499 "failed ddi_copyout: resvlist\n"); 25500 rval = EFAULT; 25501 goto done; 25502 } 25503 } 25504 done: 25505 sd_ssc_fini(ssc); 25506 /* only if data_bufp is allocated, we need to free it */ 25507 if (data_bufp) { 25508 kmem_free(data_bufp, data_len); 25509 } 25510 return (rval); 25511 } 25512 25513 25514 /* 25515 * Function: sr_change_blkmode() 25516 * 25517 * Description: This routine is the driver entry point for handling CD-ROM 25518 * block mode ioctl requests. Support for returning and changing 25519 * the current block size in use by the device is implemented. The 25520 * LBA size is changed via a MODE SELECT Block Descriptor. 25521 * 25522 * This routine issues a mode sense with an allocation length of 25523 * 12 bytes for the mode page header and a single block descriptor. 25524 * 25525 * Arguments: dev - the device 'dev_t' 25526 * cmd - the request type; one of CDROMGBLKMODE (get) or 25527 * CDROMSBLKMODE (set) 25528 * data - current block size or requested block size 25529 * flag - this argument is a pass through to ddi_copyxxx() directly 25530 * from the mode argument of ioctl(). 25531 * 25532 * Return Code: the code returned by sd_send_scsi_cmd() 25533 * EINVAL if invalid arguments are provided 25534 * EFAULT if ddi_copyxxx() fails 25535 * ENXIO if fail ddi_get_soft_state 25536 * EIO if invalid mode sense block descriptor length 25537 * 25538 */ 25539 25540 static int 25541 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag) 25542 { 25543 struct sd_lun *un = NULL; 25544 struct mode_header *sense_mhp, *select_mhp; 25545 struct block_descriptor *sense_desc, *select_desc; 25546 int current_bsize; 25547 int rval = EINVAL; 25548 uchar_t *sense = NULL; 25549 uchar_t *select = NULL; 25550 sd_ssc_t *ssc; 25551 25552 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE)); 25553 25554 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25555 return (ENXIO); 25556 } 25557 25558 /* 25559 * The block length is changed via the Mode Select block descriptor, the 25560 * "Read/Write Error Recovery" mode page (0x1) contents are not actually 25561 * required as part of this routine. Therefore the mode sense allocation 25562 * length is specified to be the length of a mode page header and a 25563 * block descriptor. 25564 */ 25565 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 25566 25567 ssc = sd_ssc_init(un); 25568 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 25569 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD); 25570 sd_ssc_fini(ssc); 25571 if (rval != 0) { 25572 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25573 "sr_change_blkmode: Mode Sense Failed\n"); 25574 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 25575 return (rval); 25576 } 25577 25578 /* Check the block descriptor len to handle only 1 block descriptor */ 25579 sense_mhp = (struct mode_header *)sense; 25580 if ((sense_mhp->bdesc_length == 0) || 25581 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) { 25582 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25583 "sr_change_blkmode: Mode Sense returned invalid block" 25584 " descriptor length\n"); 25585 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 25586 return (EIO); 25587 } 25588 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH); 25589 current_bsize = ((sense_desc->blksize_hi << 16) | 25590 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo); 25591 25592 /* Process command */ 25593 switch (cmd) { 25594 case CDROMGBLKMODE: 25595 /* Return the block size obtained during the mode sense */ 25596 if (ddi_copyout(¤t_bsize, (void *)data, 25597 sizeof (int), flag) != 0) 25598 rval = EFAULT; 25599 break; 25600 case CDROMSBLKMODE: 25601 /* Validate the requested block size */ 25602 switch (data) { 25603 case CDROM_BLK_512: 25604 case CDROM_BLK_1024: 25605 case CDROM_BLK_2048: 25606 case CDROM_BLK_2056: 25607 case CDROM_BLK_2336: 25608 case CDROM_BLK_2340: 25609 case CDROM_BLK_2352: 25610 case CDROM_BLK_2368: 25611 case CDROM_BLK_2448: 25612 case CDROM_BLK_2646: 25613 case CDROM_BLK_2647: 25614 break; 25615 default: 25616 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25617 "sr_change_blkmode: " 25618 "Block Size '%ld' Not Supported\n", data); 25619 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 25620 return (EINVAL); 25621 } 25622 25623 /* 25624 * The current block size matches the requested block size so 25625 * there is no need to send the mode select to change the size 25626 */ 25627 if (current_bsize == data) { 25628 break; 25629 } 25630 25631 /* Build the select data for the requested block size */ 25632 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 25633 select_mhp = (struct mode_header *)select; 25634 select_desc = 25635 (struct block_descriptor *)(select + MODE_HEADER_LENGTH); 25636 /* 25637 * The LBA size is changed via the block descriptor, so the 25638 * descriptor is built according to the user data 25639 */ 25640 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH; 25641 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16); 25642 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8); 25643 select_desc->blksize_lo = (char)((data) & 0x000000ff); 25644 25645 /* Send the mode select for the requested block size */ 25646 ssc = sd_ssc_init(un); 25647 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, 25648 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 25649 SD_PATH_STANDARD); 25650 sd_ssc_fini(ssc); 25651 if (rval != 0) { 25652 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25653 "sr_change_blkmode: Mode Select Failed\n"); 25654 /* 25655 * The mode select failed for the requested block size, 25656 * so reset the data for the original block size and 25657 * send it to the target. The error is indicated by the 25658 * return value for the failed mode select. 25659 */ 25660 select_desc->blksize_hi = sense_desc->blksize_hi; 25661 select_desc->blksize_mid = sense_desc->blksize_mid; 25662 select_desc->blksize_lo = sense_desc->blksize_lo; 25663 ssc = sd_ssc_init(un); 25664 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, 25665 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 25666 SD_PATH_STANDARD); 25667 sd_ssc_fini(ssc); 25668 } else { 25669 ASSERT(!mutex_owned(SD_MUTEX(un))); 25670 mutex_enter(SD_MUTEX(un)); 25671 sd_update_block_info(un, (uint32_t)data, 0); 25672 mutex_exit(SD_MUTEX(un)); 25673 } 25674 break; 25675 default: 25676 /* should not reach here, but check anyway */ 25677 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25678 "sr_change_blkmode: Command '%x' Not Supported\n", cmd); 25679 rval = EINVAL; 25680 break; 25681 } 25682 25683 if (select) { 25684 kmem_free(select, BUFLEN_CHG_BLK_MODE); 25685 } 25686 if (sense) { 25687 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 25688 } 25689 return (rval); 25690 } 25691 25692 25693 /* 25694 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines 25695 * implement driver support for getting and setting the CD speed. The command 25696 * set used will be based on the device type. If the device has not been 25697 * identified as MMC the Toshiba vendor specific mode page will be used. If 25698 * the device is MMC but does not support the Real Time Streaming feature 25699 * the SET CD SPEED command will be used to set speed and mode page 0x2A will 25700 * be used to read the speed. 25701 */ 25702 25703 /* 25704 * Function: sr_change_speed() 25705 * 25706 * Description: This routine is the driver entry point for handling CD-ROM 25707 * drive speed ioctl requests for devices supporting the Toshiba 25708 * vendor specific drive speed mode page. Support for returning 25709 * and changing the current drive speed in use by the device is 25710 * implemented. 25711 * 25712 * Arguments: dev - the device 'dev_t' 25713 * cmd - the request type; one of CDROMGDRVSPEED (get) or 25714 * CDROMSDRVSPEED (set) 25715 * data - current drive speed or requested drive speed 25716 * flag - this argument is a pass through to ddi_copyxxx() directly 25717 * from the mode argument of ioctl(). 25718 * 25719 * Return Code: the code returned by sd_send_scsi_cmd() 25720 * EINVAL if invalid arguments are provided 25721 * EFAULT if ddi_copyxxx() fails 25722 * ENXIO if fail ddi_get_soft_state 25723 * EIO if invalid mode sense block descriptor length 25724 */ 25725 25726 static int 25727 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 25728 { 25729 struct sd_lun *un = NULL; 25730 struct mode_header *sense_mhp, *select_mhp; 25731 struct mode_speed *sense_page, *select_page; 25732 int current_speed; 25733 int rval = EINVAL; 25734 int bd_len; 25735 uchar_t *sense = NULL; 25736 uchar_t *select = NULL; 25737 sd_ssc_t *ssc; 25738 25739 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 25740 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25741 return (ENXIO); 25742 } 25743 25744 /* 25745 * Note: The drive speed is being modified here according to a Toshiba 25746 * vendor specific mode page (0x31). 25747 */ 25748 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 25749 25750 ssc = sd_ssc_init(un); 25751 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 25752 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED, 25753 SD_PATH_STANDARD); 25754 sd_ssc_fini(ssc); 25755 if (rval != 0) { 25756 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25757 "sr_change_speed: Mode Sense Failed\n"); 25758 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 25759 return (rval); 25760 } 25761 sense_mhp = (struct mode_header *)sense; 25762 25763 /* Check the block descriptor len to handle only 1 block descriptor */ 25764 bd_len = sense_mhp->bdesc_length; 25765 if (bd_len > MODE_BLK_DESC_LENGTH) { 25766 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25767 "sr_change_speed: Mode Sense returned invalid block " 25768 "descriptor length\n"); 25769 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 25770 return (EIO); 25771 } 25772 25773 sense_page = (struct mode_speed *) 25774 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 25775 current_speed = sense_page->speed; 25776 25777 /* Process command */ 25778 switch (cmd) { 25779 case CDROMGDRVSPEED: 25780 /* Return the drive speed obtained during the mode sense */ 25781 if (current_speed == 0x2) { 25782 current_speed = CDROM_TWELVE_SPEED; 25783 } 25784 if (ddi_copyout(¤t_speed, (void *)data, 25785 sizeof (int), flag) != 0) { 25786 rval = EFAULT; 25787 } 25788 break; 25789 case CDROMSDRVSPEED: 25790 /* Validate the requested drive speed */ 25791 switch ((uchar_t)data) { 25792 case CDROM_TWELVE_SPEED: 25793 data = 0x2; 25794 /*FALLTHROUGH*/ 25795 case CDROM_NORMAL_SPEED: 25796 case CDROM_DOUBLE_SPEED: 25797 case CDROM_QUAD_SPEED: 25798 case CDROM_MAXIMUM_SPEED: 25799 break; 25800 default: 25801 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25802 "sr_change_speed: " 25803 "Drive Speed '%d' Not Supported\n", (uchar_t)data); 25804 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 25805 return (EINVAL); 25806 } 25807 25808 /* 25809 * The current drive speed matches the requested drive speed so 25810 * there is no need to send the mode select to change the speed 25811 */ 25812 if (current_speed == data) { 25813 break; 25814 } 25815 25816 /* Build the select data for the requested drive speed */ 25817 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 25818 select_mhp = (struct mode_header *)select; 25819 select_mhp->bdesc_length = 0; 25820 select_page = 25821 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 25822 select_page = 25823 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 25824 select_page->mode_page.code = CDROM_MODE_SPEED; 25825 select_page->mode_page.length = 2; 25826 select_page->speed = (uchar_t)data; 25827 25828 /* Send the mode select for the requested block size */ 25829 ssc = sd_ssc_init(un); 25830 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 25831 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 25832 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 25833 sd_ssc_fini(ssc); 25834 if (rval != 0) { 25835 /* 25836 * The mode select failed for the requested drive speed, 25837 * so reset the data for the original drive speed and 25838 * send it to the target. The error is indicated by the 25839 * return value for the failed mode select. 25840 */ 25841 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25842 "sr_drive_speed: Mode Select Failed\n"); 25843 select_page->speed = sense_page->speed; 25844 ssc = sd_ssc_init(un); 25845 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 25846 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 25847 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 25848 sd_ssc_fini(ssc); 25849 } 25850 break; 25851 default: 25852 /* should not reach here, but check anyway */ 25853 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25854 "sr_change_speed: Command '%x' Not Supported\n", cmd); 25855 rval = EINVAL; 25856 break; 25857 } 25858 25859 if (select) { 25860 kmem_free(select, BUFLEN_MODE_CDROM_SPEED); 25861 } 25862 if (sense) { 25863 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 25864 } 25865 25866 return (rval); 25867 } 25868 25869 25870 /* 25871 * Function: sr_atapi_change_speed() 25872 * 25873 * Description: This routine is the driver entry point for handling CD-ROM 25874 * drive speed ioctl requests for MMC devices that do not support 25875 * the Real Time Streaming feature (0x107). 25876 * 25877 * Note: This routine will use the SET SPEED command which may not 25878 * be supported by all devices. 25879 * 25880 * Arguments: dev- the device 'dev_t' 25881 * cmd- the request type; one of CDROMGDRVSPEED (get) or 25882 * CDROMSDRVSPEED (set) 25883 * data- current drive speed or requested drive speed 25884 * flag- this argument is a pass through to ddi_copyxxx() directly 25885 * from the mode argument of ioctl(). 25886 * 25887 * Return Code: the code returned by sd_send_scsi_cmd() 25888 * EINVAL if invalid arguments are provided 25889 * EFAULT if ddi_copyxxx() fails 25890 * ENXIO if fail ddi_get_soft_state 25891 * EIO if invalid mode sense block descriptor length 25892 */ 25893 25894 static int 25895 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 25896 { 25897 struct sd_lun *un; 25898 struct uscsi_cmd *com = NULL; 25899 struct mode_header_grp2 *sense_mhp; 25900 uchar_t *sense_page; 25901 uchar_t *sense = NULL; 25902 char cdb[CDB_GROUP5]; 25903 int bd_len; 25904 int current_speed = 0; 25905 int max_speed = 0; 25906 int rval; 25907 sd_ssc_t *ssc; 25908 25909 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 25910 25911 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25912 return (ENXIO); 25913 } 25914 25915 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 25916 25917 ssc = sd_ssc_init(un); 25918 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, 25919 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, 25920 SD_PATH_STANDARD); 25921 sd_ssc_fini(ssc); 25922 if (rval != 0) { 25923 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25924 "sr_atapi_change_speed: Mode Sense Failed\n"); 25925 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 25926 return (rval); 25927 } 25928 25929 /* Check the block descriptor len to handle only 1 block descriptor */ 25930 sense_mhp = (struct mode_header_grp2 *)sense; 25931 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 25932 if (bd_len > MODE_BLK_DESC_LENGTH) { 25933 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25934 "sr_atapi_change_speed: Mode Sense returned invalid " 25935 "block descriptor length\n"); 25936 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 25937 return (EIO); 25938 } 25939 25940 /* Calculate the current and maximum drive speeds */ 25941 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 25942 current_speed = (sense_page[14] << 8) | sense_page[15]; 25943 max_speed = (sense_page[8] << 8) | sense_page[9]; 25944 25945 /* Process the command */ 25946 switch (cmd) { 25947 case CDROMGDRVSPEED: 25948 current_speed /= SD_SPEED_1X; 25949 if (ddi_copyout(¤t_speed, (void *)data, 25950 sizeof (int), flag) != 0) 25951 rval = EFAULT; 25952 break; 25953 case CDROMSDRVSPEED: 25954 /* Convert the speed code to KB/sec */ 25955 switch ((uchar_t)data) { 25956 case CDROM_NORMAL_SPEED: 25957 current_speed = SD_SPEED_1X; 25958 break; 25959 case CDROM_DOUBLE_SPEED: 25960 current_speed = 2 * SD_SPEED_1X; 25961 break; 25962 case CDROM_QUAD_SPEED: 25963 current_speed = 4 * SD_SPEED_1X; 25964 break; 25965 case CDROM_TWELVE_SPEED: 25966 current_speed = 12 * SD_SPEED_1X; 25967 break; 25968 case CDROM_MAXIMUM_SPEED: 25969 current_speed = 0xffff; 25970 break; 25971 default: 25972 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25973 "sr_atapi_change_speed: invalid drive speed %d\n", 25974 (uchar_t)data); 25975 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 25976 return (EINVAL); 25977 } 25978 25979 /* Check the request against the drive's max speed. */ 25980 if (current_speed != 0xffff) { 25981 if (current_speed > max_speed) { 25982 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 25983 return (EINVAL); 25984 } 25985 } 25986 25987 /* 25988 * Build and send the SET SPEED command 25989 * 25990 * Note: The SET SPEED (0xBB) command used in this routine is 25991 * obsolete per the SCSI MMC spec but still supported in the 25992 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 25993 * therefore the command is still implemented in this routine. 25994 */ 25995 bzero(cdb, sizeof (cdb)); 25996 cdb[0] = (char)SCMD_SET_CDROM_SPEED; 25997 cdb[2] = (uchar_t)(current_speed >> 8); 25998 cdb[3] = (uchar_t)current_speed; 25999 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26000 com->uscsi_cdb = (caddr_t)cdb; 26001 com->uscsi_cdblen = CDB_GROUP5; 26002 com->uscsi_bufaddr = NULL; 26003 com->uscsi_buflen = 0; 26004 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26005 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, 0, SD_PATH_STANDARD); 26006 break; 26007 default: 26008 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26009 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd); 26010 rval = EINVAL; 26011 } 26012 26013 if (sense) { 26014 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26015 } 26016 if (com) { 26017 kmem_free(com, sizeof (*com)); 26018 } 26019 return (rval); 26020 } 26021 26022 26023 /* 26024 * Function: sr_pause_resume() 26025 * 26026 * Description: This routine is the driver entry point for handling CD-ROM 26027 * pause/resume ioctl requests. This only affects the audio play 26028 * operation. 26029 * 26030 * Arguments: dev - the device 'dev_t' 26031 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used 26032 * for setting the resume bit of the cdb. 26033 * 26034 * Return Code: the code returned by sd_send_scsi_cmd() 26035 * EINVAL if invalid mode specified 26036 * 26037 */ 26038 26039 static int 26040 sr_pause_resume(dev_t dev, int cmd) 26041 { 26042 struct sd_lun *un; 26043 struct uscsi_cmd *com; 26044 char cdb[CDB_GROUP1]; 26045 int rval; 26046 26047 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26048 return (ENXIO); 26049 } 26050 26051 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26052 bzero(cdb, CDB_GROUP1); 26053 cdb[0] = SCMD_PAUSE_RESUME; 26054 switch (cmd) { 26055 case CDROMRESUME: 26056 cdb[8] = 1; 26057 break; 26058 case CDROMPAUSE: 26059 cdb[8] = 0; 26060 break; 26061 default: 26062 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:" 26063 " Command '%x' Not Supported\n", cmd); 26064 rval = EINVAL; 26065 goto done; 26066 } 26067 26068 com->uscsi_cdb = cdb; 26069 com->uscsi_cdblen = CDB_GROUP1; 26070 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26071 26072 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26073 SD_PATH_STANDARD); 26074 26075 done: 26076 kmem_free(com, sizeof (*com)); 26077 return (rval); 26078 } 26079 26080 26081 /* 26082 * Function: sr_play_msf() 26083 * 26084 * Description: This routine is the driver entry point for handling CD-ROM 26085 * ioctl requests to output the audio signals at the specified 26086 * starting address and continue the audio play until the specified 26087 * ending address (CDROMPLAYMSF) The address is in Minute Second 26088 * Frame (MSF) format. 26089 * 26090 * Arguments: dev - the device 'dev_t' 26091 * data - pointer to user provided audio msf structure, 26092 * specifying start/end addresses. 26093 * flag - this argument is a pass through to ddi_copyxxx() 26094 * directly from the mode argument of ioctl(). 26095 * 26096 * Return Code: the code returned by sd_send_scsi_cmd() 26097 * EFAULT if ddi_copyxxx() fails 26098 * ENXIO if fail ddi_get_soft_state 26099 * EINVAL if data pointer is NULL 26100 */ 26101 26102 static int 26103 sr_play_msf(dev_t dev, caddr_t data, int flag) 26104 { 26105 struct sd_lun *un; 26106 struct uscsi_cmd *com; 26107 struct cdrom_msf msf_struct; 26108 struct cdrom_msf *msf = &msf_struct; 26109 char cdb[CDB_GROUP1]; 26110 int rval; 26111 26112 if (data == NULL) { 26113 return (EINVAL); 26114 } 26115 26116 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26117 return (ENXIO); 26118 } 26119 26120 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) { 26121 return (EFAULT); 26122 } 26123 26124 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26125 bzero(cdb, CDB_GROUP1); 26126 cdb[0] = SCMD_PLAYAUDIO_MSF; 26127 if (un->un_f_cfg_playmsf_bcd == TRUE) { 26128 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0); 26129 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0); 26130 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0); 26131 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1); 26132 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1); 26133 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1); 26134 } else { 26135 cdb[3] = msf->cdmsf_min0; 26136 cdb[4] = msf->cdmsf_sec0; 26137 cdb[5] = msf->cdmsf_frame0; 26138 cdb[6] = msf->cdmsf_min1; 26139 cdb[7] = msf->cdmsf_sec1; 26140 cdb[8] = msf->cdmsf_frame1; 26141 } 26142 com->uscsi_cdb = cdb; 26143 com->uscsi_cdblen = CDB_GROUP1; 26144 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26145 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26146 SD_PATH_STANDARD); 26147 kmem_free(com, sizeof (*com)); 26148 return (rval); 26149 } 26150 26151 26152 /* 26153 * Function: sr_play_trkind() 26154 * 26155 * Description: This routine is the driver entry point for handling CD-ROM 26156 * ioctl requests to output the audio signals at the specified 26157 * starting address and continue the audio play until the specified 26158 * ending address (CDROMPLAYTRKIND). The address is in Track Index 26159 * format. 26160 * 26161 * Arguments: dev - the device 'dev_t' 26162 * data - pointer to user provided audio track/index structure, 26163 * specifying start/end addresses. 26164 * flag - this argument is a pass through to ddi_copyxxx() 26165 * directly from the mode argument of ioctl(). 26166 * 26167 * Return Code: the code returned by sd_send_scsi_cmd() 26168 * EFAULT if ddi_copyxxx() fails 26169 * ENXIO if fail ddi_get_soft_state 26170 * EINVAL if data pointer is NULL 26171 */ 26172 26173 static int 26174 sr_play_trkind(dev_t dev, caddr_t data, int flag) 26175 { 26176 struct cdrom_ti ti_struct; 26177 struct cdrom_ti *ti = &ti_struct; 26178 struct uscsi_cmd *com = NULL; 26179 char cdb[CDB_GROUP1]; 26180 int rval; 26181 26182 if (data == NULL) { 26183 return (EINVAL); 26184 } 26185 26186 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) { 26187 return (EFAULT); 26188 } 26189 26190 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26191 bzero(cdb, CDB_GROUP1); 26192 cdb[0] = SCMD_PLAYAUDIO_TI; 26193 cdb[4] = ti->cdti_trk0; 26194 cdb[5] = ti->cdti_ind0; 26195 cdb[7] = ti->cdti_trk1; 26196 cdb[8] = ti->cdti_ind1; 26197 com->uscsi_cdb = cdb; 26198 com->uscsi_cdblen = CDB_GROUP1; 26199 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26200 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26201 SD_PATH_STANDARD); 26202 kmem_free(com, sizeof (*com)); 26203 return (rval); 26204 } 26205 26206 26207 /* 26208 * Function: sr_read_all_subcodes() 26209 * 26210 * Description: This routine is the driver entry point for handling CD-ROM 26211 * ioctl requests to return raw subcode data while the target is 26212 * playing audio (CDROMSUBCODE). 26213 * 26214 * Arguments: dev - the device 'dev_t' 26215 * data - pointer to user provided cdrom subcode structure, 26216 * specifying the transfer length and address. 26217 * flag - this argument is a pass through to ddi_copyxxx() 26218 * directly from the mode argument of ioctl(). 26219 * 26220 * Return Code: the code returned by sd_send_scsi_cmd() 26221 * EFAULT if ddi_copyxxx() fails 26222 * ENXIO if fail ddi_get_soft_state 26223 * EINVAL if data pointer is NULL 26224 */ 26225 26226 static int 26227 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag) 26228 { 26229 struct sd_lun *un = NULL; 26230 struct uscsi_cmd *com = NULL; 26231 struct cdrom_subcode *subcode = NULL; 26232 int rval; 26233 size_t buflen; 26234 char cdb[CDB_GROUP5]; 26235 26236 #ifdef _MULTI_DATAMODEL 26237 /* To support ILP32 applications in an LP64 world */ 26238 struct cdrom_subcode32 cdrom_subcode32; 26239 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32; 26240 #endif 26241 if (data == NULL) { 26242 return (EINVAL); 26243 } 26244 26245 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26246 return (ENXIO); 26247 } 26248 26249 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP); 26250 26251 #ifdef _MULTI_DATAMODEL 26252 switch (ddi_model_convert_from(flag & FMODELS)) { 26253 case DDI_MODEL_ILP32: 26254 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) { 26255 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26256 "sr_read_all_subcodes: ddi_copyin Failed\n"); 26257 kmem_free(subcode, sizeof (struct cdrom_subcode)); 26258 return (EFAULT); 26259 } 26260 /* Convert the ILP32 uscsi data from the application to LP64 */ 26261 cdrom_subcode32tocdrom_subcode(cdsc32, subcode); 26262 break; 26263 case DDI_MODEL_NONE: 26264 if (ddi_copyin(data, subcode, 26265 sizeof (struct cdrom_subcode), flag)) { 26266 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26267 "sr_read_all_subcodes: ddi_copyin Failed\n"); 26268 kmem_free(subcode, sizeof (struct cdrom_subcode)); 26269 return (EFAULT); 26270 } 26271 break; 26272 } 26273 #else /* ! _MULTI_DATAMODEL */ 26274 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) { 26275 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26276 "sr_read_all_subcodes: ddi_copyin Failed\n"); 26277 kmem_free(subcode, sizeof (struct cdrom_subcode)); 26278 return (EFAULT); 26279 } 26280 #endif /* _MULTI_DATAMODEL */ 26281 26282 /* 26283 * Since MMC-2 expects max 3 bytes for length, check if the 26284 * length input is greater than 3 bytes 26285 */ 26286 if ((subcode->cdsc_length & 0xFF000000) != 0) { 26287 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26288 "sr_read_all_subcodes: " 26289 "cdrom transfer length too large: %d (limit %d)\n", 26290 subcode->cdsc_length, 0xFFFFFF); 26291 kmem_free(subcode, sizeof (struct cdrom_subcode)); 26292 return (EINVAL); 26293 } 26294 26295 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length; 26296 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26297 bzero(cdb, CDB_GROUP5); 26298 26299 if (un->un_f_mmc_cap == TRUE) { 26300 cdb[0] = (char)SCMD_READ_CD; 26301 cdb[2] = (char)0xff; 26302 cdb[3] = (char)0xff; 26303 cdb[4] = (char)0xff; 26304 cdb[5] = (char)0xff; 26305 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 26306 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 26307 cdb[8] = ((subcode->cdsc_length) & 0x000000ff); 26308 cdb[10] = 1; 26309 } else { 26310 /* 26311 * Note: A vendor specific command (0xDF) is being used her to 26312 * request a read of all subcodes. 26313 */ 26314 cdb[0] = (char)SCMD_READ_ALL_SUBCODES; 26315 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24); 26316 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 26317 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 26318 cdb[9] = ((subcode->cdsc_length) & 0x000000ff); 26319 } 26320 com->uscsi_cdb = cdb; 26321 com->uscsi_cdblen = CDB_GROUP5; 26322 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr; 26323 com->uscsi_buflen = buflen; 26324 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26325 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 26326 SD_PATH_STANDARD); 26327 kmem_free(subcode, sizeof (struct cdrom_subcode)); 26328 kmem_free(com, sizeof (*com)); 26329 return (rval); 26330 } 26331 26332 26333 /* 26334 * Function: sr_read_subchannel() 26335 * 26336 * Description: This routine is the driver entry point for handling CD-ROM 26337 * ioctl requests to return the Q sub-channel data of the CD 26338 * current position block. (CDROMSUBCHNL) The data includes the 26339 * track number, index number, absolute CD-ROM address (LBA or MSF 26340 * format per the user) , track relative CD-ROM address (LBA or MSF 26341 * format per the user), control data and audio status. 26342 * 26343 * Arguments: dev - the device 'dev_t' 26344 * data - pointer to user provided cdrom sub-channel structure 26345 * flag - this argument is a pass through to ddi_copyxxx() 26346 * directly from the mode argument of ioctl(). 26347 * 26348 * Return Code: the code returned by sd_send_scsi_cmd() 26349 * EFAULT if ddi_copyxxx() fails 26350 * ENXIO if fail ddi_get_soft_state 26351 * EINVAL if data pointer is NULL 26352 */ 26353 26354 static int 26355 sr_read_subchannel(dev_t dev, caddr_t data, int flag) 26356 { 26357 struct sd_lun *un; 26358 struct uscsi_cmd *com; 26359 struct cdrom_subchnl subchanel; 26360 struct cdrom_subchnl *subchnl = &subchanel; 26361 char cdb[CDB_GROUP1]; 26362 caddr_t buffer; 26363 int rval; 26364 26365 if (data == NULL) { 26366 return (EINVAL); 26367 } 26368 26369 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26370 (un->un_state == SD_STATE_OFFLINE)) { 26371 return (ENXIO); 26372 } 26373 26374 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) { 26375 return (EFAULT); 26376 } 26377 26378 buffer = kmem_zalloc((size_t)16, KM_SLEEP); 26379 bzero(cdb, CDB_GROUP1); 26380 cdb[0] = SCMD_READ_SUBCHANNEL; 26381 /* Set the MSF bit based on the user requested address format */ 26382 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02; 26383 /* 26384 * Set the Q bit in byte 2 to indicate that Q sub-channel data be 26385 * returned 26386 */ 26387 cdb[2] = 0x40; 26388 /* 26389 * Set byte 3 to specify the return data format. A value of 0x01 26390 * indicates that the CD-ROM current position should be returned. 26391 */ 26392 cdb[3] = 0x01; 26393 cdb[8] = 0x10; 26394 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26395 com->uscsi_cdb = cdb; 26396 com->uscsi_cdblen = CDB_GROUP1; 26397 com->uscsi_bufaddr = buffer; 26398 com->uscsi_buflen = 16; 26399 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26400 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26401 SD_PATH_STANDARD); 26402 if (rval != 0) { 26403 kmem_free(buffer, 16); 26404 kmem_free(com, sizeof (*com)); 26405 return (rval); 26406 } 26407 26408 /* Process the returned Q sub-channel data */ 26409 subchnl->cdsc_audiostatus = buffer[1]; 26410 subchnl->cdsc_adr = (buffer[5] & 0xF0); 26411 subchnl->cdsc_ctrl = (buffer[5] & 0x0F); 26412 subchnl->cdsc_trk = buffer[6]; 26413 subchnl->cdsc_ind = buffer[7]; 26414 if (subchnl->cdsc_format & CDROM_LBA) { 26415 subchnl->cdsc_absaddr.lba = 26416 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 26417 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 26418 subchnl->cdsc_reladdr.lba = 26419 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) + 26420 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]); 26421 } else if (un->un_f_cfg_readsub_bcd == TRUE) { 26422 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]); 26423 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]); 26424 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]); 26425 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]); 26426 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]); 26427 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]); 26428 } else { 26429 subchnl->cdsc_absaddr.msf.minute = buffer[9]; 26430 subchnl->cdsc_absaddr.msf.second = buffer[10]; 26431 subchnl->cdsc_absaddr.msf.frame = buffer[11]; 26432 subchnl->cdsc_reladdr.msf.minute = buffer[13]; 26433 subchnl->cdsc_reladdr.msf.second = buffer[14]; 26434 subchnl->cdsc_reladdr.msf.frame = buffer[15]; 26435 } 26436 kmem_free(buffer, 16); 26437 kmem_free(com, sizeof (*com)); 26438 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag) 26439 != 0) { 26440 return (EFAULT); 26441 } 26442 return (rval); 26443 } 26444 26445 26446 /* 26447 * Function: sr_read_tocentry() 26448 * 26449 * Description: This routine is the driver entry point for handling CD-ROM 26450 * ioctl requests to read from the Table of Contents (TOC) 26451 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL 26452 * fields, the starting address (LBA or MSF format per the user) 26453 * and the data mode if the user specified track is a data track. 26454 * 26455 * Note: The READ HEADER (0x44) command used in this routine is 26456 * obsolete per the SCSI MMC spec but still supported in the 26457 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 26458 * therefore the command is still implemented in this routine. 26459 * 26460 * Arguments: dev - the device 'dev_t' 26461 * data - pointer to user provided toc entry structure, 26462 * specifying the track # and the address format 26463 * (LBA or MSF). 26464 * flag - this argument is a pass through to ddi_copyxxx() 26465 * directly from the mode argument of ioctl(). 26466 * 26467 * Return Code: the code returned by sd_send_scsi_cmd() 26468 * EFAULT if ddi_copyxxx() fails 26469 * ENXIO if fail ddi_get_soft_state 26470 * EINVAL if data pointer is NULL 26471 */ 26472 26473 static int 26474 sr_read_tocentry(dev_t dev, caddr_t data, int flag) 26475 { 26476 struct sd_lun *un = NULL; 26477 struct uscsi_cmd *com; 26478 struct cdrom_tocentry toc_entry; 26479 struct cdrom_tocentry *entry = &toc_entry; 26480 caddr_t buffer; 26481 int rval; 26482 char cdb[CDB_GROUP1]; 26483 26484 if (data == NULL) { 26485 return (EINVAL); 26486 } 26487 26488 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26489 (un->un_state == SD_STATE_OFFLINE)) { 26490 return (ENXIO); 26491 } 26492 26493 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) { 26494 return (EFAULT); 26495 } 26496 26497 /* Validate the requested track and address format */ 26498 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) { 26499 return (EINVAL); 26500 } 26501 26502 if (entry->cdte_track == 0) { 26503 return (EINVAL); 26504 } 26505 26506 buffer = kmem_zalloc((size_t)12, KM_SLEEP); 26507 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26508 bzero(cdb, CDB_GROUP1); 26509 26510 cdb[0] = SCMD_READ_TOC; 26511 /* Set the MSF bit based on the user requested address format */ 26512 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2); 26513 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 26514 cdb[6] = BYTE_TO_BCD(entry->cdte_track); 26515 } else { 26516 cdb[6] = entry->cdte_track; 26517 } 26518 26519 /* 26520 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 26521 * (4 byte TOC response header + 8 byte track descriptor) 26522 */ 26523 cdb[8] = 12; 26524 com->uscsi_cdb = cdb; 26525 com->uscsi_cdblen = CDB_GROUP1; 26526 com->uscsi_bufaddr = buffer; 26527 com->uscsi_buflen = 0x0C; 26528 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ); 26529 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26530 SD_PATH_STANDARD); 26531 if (rval != 0) { 26532 kmem_free(buffer, 12); 26533 kmem_free(com, sizeof (*com)); 26534 return (rval); 26535 } 26536 26537 /* Process the toc entry */ 26538 entry->cdte_adr = (buffer[5] & 0xF0) >> 4; 26539 entry->cdte_ctrl = (buffer[5] & 0x0F); 26540 if (entry->cdte_format & CDROM_LBA) { 26541 entry->cdte_addr.lba = 26542 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 26543 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 26544 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) { 26545 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]); 26546 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]); 26547 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]); 26548 /* 26549 * Send a READ TOC command using the LBA address format to get 26550 * the LBA for the track requested so it can be used in the 26551 * READ HEADER request 26552 * 26553 * Note: The MSF bit of the READ HEADER command specifies the 26554 * output format. The block address specified in that command 26555 * must be in LBA format. 26556 */ 26557 cdb[1] = 0; 26558 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26559 SD_PATH_STANDARD); 26560 if (rval != 0) { 26561 kmem_free(buffer, 12); 26562 kmem_free(com, sizeof (*com)); 26563 return (rval); 26564 } 26565 } else { 26566 entry->cdte_addr.msf.minute = buffer[9]; 26567 entry->cdte_addr.msf.second = buffer[10]; 26568 entry->cdte_addr.msf.frame = buffer[11]; 26569 /* 26570 * Send a READ TOC command using the LBA address format to get 26571 * the LBA for the track requested so it can be used in the 26572 * READ HEADER request 26573 * 26574 * Note: The MSF bit of the READ HEADER command specifies the 26575 * output format. The block address specified in that command 26576 * must be in LBA format. 26577 */ 26578 cdb[1] = 0; 26579 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26580 SD_PATH_STANDARD); 26581 if (rval != 0) { 26582 kmem_free(buffer, 12); 26583 kmem_free(com, sizeof (*com)); 26584 return (rval); 26585 } 26586 } 26587 26588 /* 26589 * Build and send the READ HEADER command to determine the data mode of 26590 * the user specified track. 26591 */ 26592 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) && 26593 (entry->cdte_track != CDROM_LEADOUT)) { 26594 bzero(cdb, CDB_GROUP1); 26595 cdb[0] = SCMD_READ_HEADER; 26596 cdb[2] = buffer[8]; 26597 cdb[3] = buffer[9]; 26598 cdb[4] = buffer[10]; 26599 cdb[5] = buffer[11]; 26600 cdb[8] = 0x08; 26601 com->uscsi_buflen = 0x08; 26602 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26603 SD_PATH_STANDARD); 26604 if (rval == 0) { 26605 entry->cdte_datamode = buffer[0]; 26606 } else { 26607 /* 26608 * READ HEADER command failed, since this is 26609 * obsoleted in one spec, its better to return 26610 * -1 for an invlid track so that we can still 26611 * receive the rest of the TOC data. 26612 */ 26613 entry->cdte_datamode = (uchar_t)-1; 26614 } 26615 } else { 26616 entry->cdte_datamode = (uchar_t)-1; 26617 } 26618 26619 kmem_free(buffer, 12); 26620 kmem_free(com, sizeof (*com)); 26621 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0) 26622 return (EFAULT); 26623 26624 return (rval); 26625 } 26626 26627 26628 /* 26629 * Function: sr_read_tochdr() 26630 * 26631 * Description: This routine is the driver entry point for handling CD-ROM 26632 * ioctl requests to read the Table of Contents (TOC) header 26633 * (CDROMREADTOHDR). The TOC header consists of the disk starting 26634 * and ending track numbers 26635 * 26636 * Arguments: dev - the device 'dev_t' 26637 * data - pointer to user provided toc header structure, 26638 * specifying the starting and ending track numbers. 26639 * flag - this argument is a pass through to ddi_copyxxx() 26640 * directly from the mode argument of ioctl(). 26641 * 26642 * Return Code: the code returned by sd_send_scsi_cmd() 26643 * EFAULT if ddi_copyxxx() fails 26644 * ENXIO if fail ddi_get_soft_state 26645 * EINVAL if data pointer is NULL 26646 */ 26647 26648 static int 26649 sr_read_tochdr(dev_t dev, caddr_t data, int flag) 26650 { 26651 struct sd_lun *un; 26652 struct uscsi_cmd *com; 26653 struct cdrom_tochdr toc_header; 26654 struct cdrom_tochdr *hdr = &toc_header; 26655 char cdb[CDB_GROUP1]; 26656 int rval; 26657 caddr_t buffer; 26658 26659 if (data == NULL) { 26660 return (EINVAL); 26661 } 26662 26663 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26664 (un->un_state == SD_STATE_OFFLINE)) { 26665 return (ENXIO); 26666 } 26667 26668 buffer = kmem_zalloc(4, KM_SLEEP); 26669 bzero(cdb, CDB_GROUP1); 26670 cdb[0] = SCMD_READ_TOC; 26671 /* 26672 * Specifying a track number of 0x00 in the READ TOC command indicates 26673 * that the TOC header should be returned 26674 */ 26675 cdb[6] = 0x00; 26676 /* 26677 * Bytes 7 & 8 are the 4 byte allocation length for TOC header. 26678 * (2 byte data len + 1 byte starting track # + 1 byte ending track #) 26679 */ 26680 cdb[8] = 0x04; 26681 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26682 com->uscsi_cdb = cdb; 26683 com->uscsi_cdblen = CDB_GROUP1; 26684 com->uscsi_bufaddr = buffer; 26685 com->uscsi_buflen = 0x04; 26686 com->uscsi_timeout = 300; 26687 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26688 26689 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26690 SD_PATH_STANDARD); 26691 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 26692 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]); 26693 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]); 26694 } else { 26695 hdr->cdth_trk0 = buffer[2]; 26696 hdr->cdth_trk1 = buffer[3]; 26697 } 26698 kmem_free(buffer, 4); 26699 kmem_free(com, sizeof (*com)); 26700 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) { 26701 return (EFAULT); 26702 } 26703 return (rval); 26704 } 26705 26706 26707 /* 26708 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(), 26709 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for 26710 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data, 26711 * digital audio and extended architecture digital audio. These modes are 26712 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3 26713 * MMC specs. 26714 * 26715 * In addition to support for the various data formats these routines also 26716 * include support for devices that implement only the direct access READ 26717 * commands (0x08, 0x28), devices that implement the READ_CD commands 26718 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and 26719 * READ CDXA commands (0xD8, 0xDB) 26720 */ 26721 26722 /* 26723 * Function: sr_read_mode1() 26724 * 26725 * Description: This routine is the driver entry point for handling CD-ROM 26726 * ioctl read mode1 requests (CDROMREADMODE1). 26727 * 26728 * Arguments: dev - the device 'dev_t' 26729 * data - pointer to user provided cd read structure specifying 26730 * the lba buffer address and length. 26731 * flag - this argument is a pass through to ddi_copyxxx() 26732 * directly from the mode argument of ioctl(). 26733 * 26734 * Return Code: the code returned by sd_send_scsi_cmd() 26735 * EFAULT if ddi_copyxxx() fails 26736 * ENXIO if fail ddi_get_soft_state 26737 * EINVAL if data pointer is NULL 26738 */ 26739 26740 static int 26741 sr_read_mode1(dev_t dev, caddr_t data, int flag) 26742 { 26743 struct sd_lun *un; 26744 struct cdrom_read mode1_struct; 26745 struct cdrom_read *mode1 = &mode1_struct; 26746 int rval; 26747 sd_ssc_t *ssc; 26748 26749 #ifdef _MULTI_DATAMODEL 26750 /* To support ILP32 applications in an LP64 world */ 26751 struct cdrom_read32 cdrom_read32; 26752 struct cdrom_read32 *cdrd32 = &cdrom_read32; 26753 #endif /* _MULTI_DATAMODEL */ 26754 26755 if (data == NULL) { 26756 return (EINVAL); 26757 } 26758 26759 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26760 (un->un_state == SD_STATE_OFFLINE)) { 26761 return (ENXIO); 26762 } 26763 26764 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 26765 "sd_read_mode1: entry: un:0x%p\n", un); 26766 26767 #ifdef _MULTI_DATAMODEL 26768 switch (ddi_model_convert_from(flag & FMODELS)) { 26769 case DDI_MODEL_ILP32: 26770 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 26771 return (EFAULT); 26772 } 26773 /* Convert the ILP32 uscsi data from the application to LP64 */ 26774 cdrom_read32tocdrom_read(cdrd32, mode1); 26775 break; 26776 case DDI_MODEL_NONE: 26777 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 26778 return (EFAULT); 26779 } 26780 } 26781 #else /* ! _MULTI_DATAMODEL */ 26782 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 26783 return (EFAULT); 26784 } 26785 #endif /* _MULTI_DATAMODEL */ 26786 26787 ssc = sd_ssc_init(un); 26788 rval = sd_send_scsi_READ(ssc, mode1->cdread_bufaddr, 26789 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD); 26790 sd_ssc_fini(ssc); 26791 26792 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 26793 "sd_read_mode1: exit: un:0x%p\n", un); 26794 26795 return (rval); 26796 } 26797 26798 26799 /* 26800 * Function: sr_read_cd_mode2() 26801 * 26802 * Description: This routine is the driver entry point for handling CD-ROM 26803 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 26804 * support the READ CD (0xBE) command or the 1st generation 26805 * READ CD (0xD4) command. 26806 * 26807 * Arguments: dev - the device 'dev_t' 26808 * data - pointer to user provided cd read structure specifying 26809 * the lba buffer address and length. 26810 * flag - this argument is a pass through to ddi_copyxxx() 26811 * directly from the mode argument of ioctl(). 26812 * 26813 * Return Code: the code returned by sd_send_scsi_cmd() 26814 * EFAULT if ddi_copyxxx() fails 26815 * ENXIO if fail ddi_get_soft_state 26816 * EINVAL if data pointer is NULL 26817 */ 26818 26819 static int 26820 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag) 26821 { 26822 struct sd_lun *un; 26823 struct uscsi_cmd *com; 26824 struct cdrom_read mode2_struct; 26825 struct cdrom_read *mode2 = &mode2_struct; 26826 uchar_t cdb[CDB_GROUP5]; 26827 int nblocks; 26828 int rval; 26829 #ifdef _MULTI_DATAMODEL 26830 /* To support ILP32 applications in an LP64 world */ 26831 struct cdrom_read32 cdrom_read32; 26832 struct cdrom_read32 *cdrd32 = &cdrom_read32; 26833 #endif /* _MULTI_DATAMODEL */ 26834 26835 if (data == NULL) { 26836 return (EINVAL); 26837 } 26838 26839 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26840 (un->un_state == SD_STATE_OFFLINE)) { 26841 return (ENXIO); 26842 } 26843 26844 #ifdef _MULTI_DATAMODEL 26845 switch (ddi_model_convert_from(flag & FMODELS)) { 26846 case DDI_MODEL_ILP32: 26847 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 26848 return (EFAULT); 26849 } 26850 /* Convert the ILP32 uscsi data from the application to LP64 */ 26851 cdrom_read32tocdrom_read(cdrd32, mode2); 26852 break; 26853 case DDI_MODEL_NONE: 26854 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 26855 return (EFAULT); 26856 } 26857 break; 26858 } 26859 26860 #else /* ! _MULTI_DATAMODEL */ 26861 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 26862 return (EFAULT); 26863 } 26864 #endif /* _MULTI_DATAMODEL */ 26865 26866 bzero(cdb, sizeof (cdb)); 26867 if (un->un_f_cfg_read_cd_xd4 == TRUE) { 26868 /* Read command supported by 1st generation atapi drives */ 26869 cdb[0] = SCMD_READ_CDD4; 26870 } else { 26871 /* Universal CD Access Command */ 26872 cdb[0] = SCMD_READ_CD; 26873 } 26874 26875 /* 26876 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book 26877 */ 26878 cdb[1] = CDROM_SECTOR_TYPE_MODE2; 26879 26880 /* set the start address */ 26881 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF); 26882 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF); 26883 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 26884 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF); 26885 26886 /* set the transfer length */ 26887 nblocks = mode2->cdread_buflen / 2336; 26888 cdb[6] = (uchar_t)(nblocks >> 16); 26889 cdb[7] = (uchar_t)(nblocks >> 8); 26890 cdb[8] = (uchar_t)nblocks; 26891 26892 /* set the filter bits */ 26893 cdb[9] = CDROM_READ_CD_USERDATA; 26894 26895 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26896 com->uscsi_cdb = (caddr_t)cdb; 26897 com->uscsi_cdblen = sizeof (cdb); 26898 com->uscsi_bufaddr = mode2->cdread_bufaddr; 26899 com->uscsi_buflen = mode2->cdread_buflen; 26900 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26901 26902 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 26903 SD_PATH_STANDARD); 26904 kmem_free(com, sizeof (*com)); 26905 return (rval); 26906 } 26907 26908 26909 /* 26910 * Function: sr_read_mode2() 26911 * 26912 * Description: This routine is the driver entry point for handling CD-ROM 26913 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 26914 * do not support the READ CD (0xBE) command. 26915 * 26916 * Arguments: dev - the device 'dev_t' 26917 * data - pointer to user provided cd read structure specifying 26918 * the lba buffer address and length. 26919 * flag - this argument is a pass through to ddi_copyxxx() 26920 * directly from the mode argument of ioctl(). 26921 * 26922 * Return Code: the code returned by sd_send_scsi_cmd() 26923 * EFAULT if ddi_copyxxx() fails 26924 * ENXIO if fail ddi_get_soft_state 26925 * EINVAL if data pointer is NULL 26926 * EIO if fail to reset block size 26927 * EAGAIN if commands are in progress in the driver 26928 */ 26929 26930 static int 26931 sr_read_mode2(dev_t dev, caddr_t data, int flag) 26932 { 26933 struct sd_lun *un; 26934 struct cdrom_read mode2_struct; 26935 struct cdrom_read *mode2 = &mode2_struct; 26936 int rval; 26937 uint32_t restore_blksize; 26938 struct uscsi_cmd *com; 26939 uchar_t cdb[CDB_GROUP0]; 26940 int nblocks; 26941 26942 #ifdef _MULTI_DATAMODEL 26943 /* To support ILP32 applications in an LP64 world */ 26944 struct cdrom_read32 cdrom_read32; 26945 struct cdrom_read32 *cdrd32 = &cdrom_read32; 26946 #endif /* _MULTI_DATAMODEL */ 26947 26948 if (data == NULL) { 26949 return (EINVAL); 26950 } 26951 26952 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26953 (un->un_state == SD_STATE_OFFLINE)) { 26954 return (ENXIO); 26955 } 26956 26957 /* 26958 * Because this routine will update the device and driver block size 26959 * being used we want to make sure there are no commands in progress. 26960 * If commands are in progress the user will have to try again. 26961 * 26962 * We check for 1 instead of 0 because we increment un_ncmds_in_driver 26963 * in sdioctl to protect commands from sdioctl through to the top of 26964 * sd_uscsi_strategy. See sdioctl for details. 26965 */ 26966 mutex_enter(SD_MUTEX(un)); 26967 if (un->un_ncmds_in_driver != 1) { 26968 mutex_exit(SD_MUTEX(un)); 26969 return (EAGAIN); 26970 } 26971 mutex_exit(SD_MUTEX(un)); 26972 26973 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 26974 "sd_read_mode2: entry: un:0x%p\n", un); 26975 26976 #ifdef _MULTI_DATAMODEL 26977 switch (ddi_model_convert_from(flag & FMODELS)) { 26978 case DDI_MODEL_ILP32: 26979 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 26980 return (EFAULT); 26981 } 26982 /* Convert the ILP32 uscsi data from the application to LP64 */ 26983 cdrom_read32tocdrom_read(cdrd32, mode2); 26984 break; 26985 case DDI_MODEL_NONE: 26986 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 26987 return (EFAULT); 26988 } 26989 break; 26990 } 26991 #else /* ! _MULTI_DATAMODEL */ 26992 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) { 26993 return (EFAULT); 26994 } 26995 #endif /* _MULTI_DATAMODEL */ 26996 26997 /* Store the current target block size for restoration later */ 26998 restore_blksize = un->un_tgt_blocksize; 26999 27000 /* Change the device and soft state target block size to 2336 */ 27001 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) { 27002 rval = EIO; 27003 goto done; 27004 } 27005 27006 27007 bzero(cdb, sizeof (cdb)); 27008 27009 /* set READ operation */ 27010 cdb[0] = SCMD_READ; 27011 27012 /* adjust lba for 2kbyte blocks from 512 byte blocks */ 27013 mode2->cdread_lba >>= 2; 27014 27015 /* set the start address */ 27016 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F); 27017 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 27018 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF); 27019 27020 /* set the transfer length */ 27021 nblocks = mode2->cdread_buflen / 2336; 27022 cdb[4] = (uchar_t)nblocks & 0xFF; 27023 27024 /* build command */ 27025 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27026 com->uscsi_cdb = (caddr_t)cdb; 27027 com->uscsi_cdblen = sizeof (cdb); 27028 com->uscsi_bufaddr = mode2->cdread_bufaddr; 27029 com->uscsi_buflen = mode2->cdread_buflen; 27030 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27031 27032 /* 27033 * Issue SCSI command with user space address for read buffer. 27034 * 27035 * This sends the command through main channel in the driver. 27036 * 27037 * Since this is accessed via an IOCTL call, we go through the 27038 * standard path, so that if the device was powered down, then 27039 * it would be 'awakened' to handle the command. 27040 */ 27041 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27042 SD_PATH_STANDARD); 27043 27044 kmem_free(com, sizeof (*com)); 27045 27046 /* Restore the device and soft state target block size */ 27047 if (sr_sector_mode(dev, restore_blksize) != 0) { 27048 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27049 "can't do switch back to mode 1\n"); 27050 /* 27051 * If sd_send_scsi_READ succeeded we still need to report 27052 * an error because we failed to reset the block size 27053 */ 27054 if (rval == 0) { 27055 rval = EIO; 27056 } 27057 } 27058 27059 done: 27060 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27061 "sd_read_mode2: exit: un:0x%p\n", un); 27062 27063 return (rval); 27064 } 27065 27066 27067 /* 27068 * Function: sr_sector_mode() 27069 * 27070 * Description: This utility function is used by sr_read_mode2 to set the target 27071 * block size based on the user specified size. This is a legacy 27072 * implementation based upon a vendor specific mode page 27073 * 27074 * Arguments: dev - the device 'dev_t' 27075 * data - flag indicating if block size is being set to 2336 or 27076 * 512. 27077 * 27078 * Return Code: the code returned by sd_send_scsi_cmd() 27079 * EFAULT if ddi_copyxxx() fails 27080 * ENXIO if fail ddi_get_soft_state 27081 * EINVAL if data pointer is NULL 27082 */ 27083 27084 static int 27085 sr_sector_mode(dev_t dev, uint32_t blksize) 27086 { 27087 struct sd_lun *un; 27088 uchar_t *sense; 27089 uchar_t *select; 27090 int rval; 27091 sd_ssc_t *ssc; 27092 27093 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27094 (un->un_state == SD_STATE_OFFLINE)) { 27095 return (ENXIO); 27096 } 27097 27098 sense = kmem_zalloc(20, KM_SLEEP); 27099 27100 /* Note: This is a vendor specific mode page (0x81) */ 27101 ssc = sd_ssc_init(un); 27102 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 20, 0x81, 27103 SD_PATH_STANDARD); 27104 sd_ssc_fini(ssc); 27105 if (rval != 0) { 27106 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 27107 "sr_sector_mode: Mode Sense failed\n"); 27108 kmem_free(sense, 20); 27109 return (rval); 27110 } 27111 select = kmem_zalloc(20, KM_SLEEP); 27112 select[3] = 0x08; 27113 select[10] = ((blksize >> 8) & 0xff); 27114 select[11] = (blksize & 0xff); 27115 select[12] = 0x01; 27116 select[13] = 0x06; 27117 select[14] = sense[14]; 27118 select[15] = sense[15]; 27119 if (blksize == SD_MODE2_BLKSIZE) { 27120 select[14] |= 0x01; 27121 } 27122 27123 ssc = sd_ssc_init(un); 27124 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 20, 27125 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 27126 sd_ssc_fini(ssc); 27127 if (rval != 0) { 27128 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 27129 "sr_sector_mode: Mode Select failed\n"); 27130 } else { 27131 /* 27132 * Only update the softstate block size if we successfully 27133 * changed the device block mode. 27134 */ 27135 mutex_enter(SD_MUTEX(un)); 27136 sd_update_block_info(un, blksize, 0); 27137 mutex_exit(SD_MUTEX(un)); 27138 } 27139 kmem_free(sense, 20); 27140 kmem_free(select, 20); 27141 return (rval); 27142 } 27143 27144 27145 /* 27146 * Function: sr_read_cdda() 27147 * 27148 * Description: This routine is the driver entry point for handling CD-ROM 27149 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If 27150 * the target supports CDDA these requests are handled via a vendor 27151 * specific command (0xD8) If the target does not support CDDA 27152 * these requests are handled via the READ CD command (0xBE). 27153 * 27154 * Arguments: dev - the device 'dev_t' 27155 * data - pointer to user provided CD-DA structure specifying 27156 * the track starting address, transfer length, and 27157 * subcode options. 27158 * flag - this argument is a pass through to ddi_copyxxx() 27159 * directly from the mode argument of ioctl(). 27160 * 27161 * Return Code: the code returned by sd_send_scsi_cmd() 27162 * EFAULT if ddi_copyxxx() fails 27163 * ENXIO if fail ddi_get_soft_state 27164 * EINVAL if invalid arguments are provided 27165 * ENOTTY 27166 */ 27167 27168 static int 27169 sr_read_cdda(dev_t dev, caddr_t data, int flag) 27170 { 27171 struct sd_lun *un; 27172 struct uscsi_cmd *com; 27173 struct cdrom_cdda *cdda; 27174 int rval; 27175 size_t buflen; 27176 char cdb[CDB_GROUP5]; 27177 27178 #ifdef _MULTI_DATAMODEL 27179 /* To support ILP32 applications in an LP64 world */ 27180 struct cdrom_cdda32 cdrom_cdda32; 27181 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32; 27182 #endif /* _MULTI_DATAMODEL */ 27183 27184 if (data == NULL) { 27185 return (EINVAL); 27186 } 27187 27188 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27189 return (ENXIO); 27190 } 27191 27192 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP); 27193 27194 #ifdef _MULTI_DATAMODEL 27195 switch (ddi_model_convert_from(flag & FMODELS)) { 27196 case DDI_MODEL_ILP32: 27197 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) { 27198 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27199 "sr_read_cdda: ddi_copyin Failed\n"); 27200 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27201 return (EFAULT); 27202 } 27203 /* Convert the ILP32 uscsi data from the application to LP64 */ 27204 cdrom_cdda32tocdrom_cdda(cdda32, cdda); 27205 break; 27206 case DDI_MODEL_NONE: 27207 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 27208 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27209 "sr_read_cdda: ddi_copyin Failed\n"); 27210 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27211 return (EFAULT); 27212 } 27213 break; 27214 } 27215 #else /* ! _MULTI_DATAMODEL */ 27216 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 27217 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27218 "sr_read_cdda: ddi_copyin Failed\n"); 27219 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27220 return (EFAULT); 27221 } 27222 #endif /* _MULTI_DATAMODEL */ 27223 27224 /* 27225 * Since MMC-2 expects max 3 bytes for length, check if the 27226 * length input is greater than 3 bytes 27227 */ 27228 if ((cdda->cdda_length & 0xFF000000) != 0) { 27229 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: " 27230 "cdrom transfer length too large: %d (limit %d)\n", 27231 cdda->cdda_length, 0xFFFFFF); 27232 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27233 return (EINVAL); 27234 } 27235 27236 switch (cdda->cdda_subcode) { 27237 case CDROM_DA_NO_SUBCODE: 27238 buflen = CDROM_BLK_2352 * cdda->cdda_length; 27239 break; 27240 case CDROM_DA_SUBQ: 27241 buflen = CDROM_BLK_2368 * cdda->cdda_length; 27242 break; 27243 case CDROM_DA_ALL_SUBCODE: 27244 buflen = CDROM_BLK_2448 * cdda->cdda_length; 27245 break; 27246 case CDROM_DA_SUBCODE_ONLY: 27247 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length; 27248 break; 27249 default: 27250 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27251 "sr_read_cdda: Subcode '0x%x' Not Supported\n", 27252 cdda->cdda_subcode); 27253 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27254 return (EINVAL); 27255 } 27256 27257 /* Build and send the command */ 27258 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27259 bzero(cdb, CDB_GROUP5); 27260 27261 if (un->un_f_cfg_cdda == TRUE) { 27262 cdb[0] = (char)SCMD_READ_CD; 27263 cdb[1] = 0x04; 27264 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 27265 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 27266 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 27267 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 27268 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 27269 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 27270 cdb[8] = ((cdda->cdda_length) & 0x000000ff); 27271 cdb[9] = 0x10; 27272 switch (cdda->cdda_subcode) { 27273 case CDROM_DA_NO_SUBCODE : 27274 cdb[10] = 0x0; 27275 break; 27276 case CDROM_DA_SUBQ : 27277 cdb[10] = 0x2; 27278 break; 27279 case CDROM_DA_ALL_SUBCODE : 27280 cdb[10] = 0x1; 27281 break; 27282 case CDROM_DA_SUBCODE_ONLY : 27283 /* FALLTHROUGH */ 27284 default : 27285 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27286 kmem_free(com, sizeof (*com)); 27287 return (ENOTTY); 27288 } 27289 } else { 27290 cdb[0] = (char)SCMD_READ_CDDA; 27291 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 27292 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 27293 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 27294 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 27295 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24); 27296 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 27297 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 27298 cdb[9] = ((cdda->cdda_length) & 0x000000ff); 27299 cdb[10] = cdda->cdda_subcode; 27300 } 27301 27302 com->uscsi_cdb = cdb; 27303 com->uscsi_cdblen = CDB_GROUP5; 27304 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data; 27305 com->uscsi_buflen = buflen; 27306 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27307 27308 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27309 SD_PATH_STANDARD); 27310 27311 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27312 kmem_free(com, sizeof (*com)); 27313 return (rval); 27314 } 27315 27316 27317 /* 27318 * Function: sr_read_cdxa() 27319 * 27320 * Description: This routine is the driver entry point for handling CD-ROM 27321 * ioctl requests to return CD-XA (Extended Architecture) data. 27322 * (CDROMCDXA). 27323 * 27324 * Arguments: dev - the device 'dev_t' 27325 * data - pointer to user provided CD-XA structure specifying 27326 * the data starting address, transfer length, and format 27327 * flag - this argument is a pass through to ddi_copyxxx() 27328 * directly from the mode argument of ioctl(). 27329 * 27330 * Return Code: the code returned by sd_send_scsi_cmd() 27331 * EFAULT if ddi_copyxxx() fails 27332 * ENXIO if fail ddi_get_soft_state 27333 * EINVAL if data pointer is NULL 27334 */ 27335 27336 static int 27337 sr_read_cdxa(dev_t dev, caddr_t data, int flag) 27338 { 27339 struct sd_lun *un; 27340 struct uscsi_cmd *com; 27341 struct cdrom_cdxa *cdxa; 27342 int rval; 27343 size_t buflen; 27344 char cdb[CDB_GROUP5]; 27345 uchar_t read_flags; 27346 27347 #ifdef _MULTI_DATAMODEL 27348 /* To support ILP32 applications in an LP64 world */ 27349 struct cdrom_cdxa32 cdrom_cdxa32; 27350 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32; 27351 #endif /* _MULTI_DATAMODEL */ 27352 27353 if (data == NULL) { 27354 return (EINVAL); 27355 } 27356 27357 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27358 return (ENXIO); 27359 } 27360 27361 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP); 27362 27363 #ifdef _MULTI_DATAMODEL 27364 switch (ddi_model_convert_from(flag & FMODELS)) { 27365 case DDI_MODEL_ILP32: 27366 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) { 27367 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27368 return (EFAULT); 27369 } 27370 /* 27371 * Convert the ILP32 uscsi data from the 27372 * application to LP64 for internal use. 27373 */ 27374 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa); 27375 break; 27376 case DDI_MODEL_NONE: 27377 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 27378 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27379 return (EFAULT); 27380 } 27381 break; 27382 } 27383 #else /* ! _MULTI_DATAMODEL */ 27384 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 27385 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27386 return (EFAULT); 27387 } 27388 #endif /* _MULTI_DATAMODEL */ 27389 27390 /* 27391 * Since MMC-2 expects max 3 bytes for length, check if the 27392 * length input is greater than 3 bytes 27393 */ 27394 if ((cdxa->cdxa_length & 0xFF000000) != 0) { 27395 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: " 27396 "cdrom transfer length too large: %d (limit %d)\n", 27397 cdxa->cdxa_length, 0xFFFFFF); 27398 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27399 return (EINVAL); 27400 } 27401 27402 switch (cdxa->cdxa_format) { 27403 case CDROM_XA_DATA: 27404 buflen = CDROM_BLK_2048 * cdxa->cdxa_length; 27405 read_flags = 0x10; 27406 break; 27407 case CDROM_XA_SECTOR_DATA: 27408 buflen = CDROM_BLK_2352 * cdxa->cdxa_length; 27409 read_flags = 0xf8; 27410 break; 27411 case CDROM_XA_DATA_W_ERROR: 27412 buflen = CDROM_BLK_2646 * cdxa->cdxa_length; 27413 read_flags = 0xfc; 27414 break; 27415 default: 27416 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27417 "sr_read_cdxa: Format '0x%x' Not Supported\n", 27418 cdxa->cdxa_format); 27419 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27420 return (EINVAL); 27421 } 27422 27423 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27424 bzero(cdb, CDB_GROUP5); 27425 if (un->un_f_mmc_cap == TRUE) { 27426 cdb[0] = (char)SCMD_READ_CD; 27427 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 27428 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 27429 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 27430 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 27431 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 27432 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 27433 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff); 27434 cdb[9] = (char)read_flags; 27435 } else { 27436 /* 27437 * Note: A vendor specific command (0xDB) is being used her to 27438 * request a read of all subcodes. 27439 */ 27440 cdb[0] = (char)SCMD_READ_CDXA; 27441 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 27442 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 27443 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 27444 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 27445 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24); 27446 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 27447 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 27448 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff); 27449 cdb[10] = cdxa->cdxa_format; 27450 } 27451 com->uscsi_cdb = cdb; 27452 com->uscsi_cdblen = CDB_GROUP5; 27453 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data; 27454 com->uscsi_buflen = buflen; 27455 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27456 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27457 SD_PATH_STANDARD); 27458 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27459 kmem_free(com, sizeof (*com)); 27460 return (rval); 27461 } 27462 27463 27464 /* 27465 * Function: sr_eject() 27466 * 27467 * Description: This routine is the driver entry point for handling CD-ROM 27468 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT) 27469 * 27470 * Arguments: dev - the device 'dev_t' 27471 * 27472 * Return Code: the code returned by sd_send_scsi_cmd() 27473 */ 27474 27475 static int 27476 sr_eject(dev_t dev) 27477 { 27478 struct sd_lun *un; 27479 int rval; 27480 sd_ssc_t *ssc; 27481 27482 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27483 (un->un_state == SD_STATE_OFFLINE)) { 27484 return (ENXIO); 27485 } 27486 27487 /* 27488 * To prevent race conditions with the eject 27489 * command, keep track of an eject command as 27490 * it progresses. If we are already handling 27491 * an eject command in the driver for the given 27492 * unit and another request to eject is received 27493 * immediately return EAGAIN so we don't lose 27494 * the command if the current eject command fails. 27495 */ 27496 mutex_enter(SD_MUTEX(un)); 27497 if (un->un_f_ejecting == TRUE) { 27498 mutex_exit(SD_MUTEX(un)); 27499 return (EAGAIN); 27500 } 27501 un->un_f_ejecting = TRUE; 27502 mutex_exit(SD_MUTEX(un)); 27503 27504 ssc = sd_ssc_init(un); 27505 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW, 27506 SD_PATH_STANDARD); 27507 sd_ssc_fini(ssc); 27508 27509 if (rval != 0) { 27510 mutex_enter(SD_MUTEX(un)); 27511 un->un_f_ejecting = FALSE; 27512 mutex_exit(SD_MUTEX(un)); 27513 return (rval); 27514 } 27515 27516 ssc = sd_ssc_init(un); 27517 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_EJECT, 27518 SD_PATH_STANDARD); 27519 sd_ssc_fini(ssc); 27520 27521 if (rval == 0) { 27522 mutex_enter(SD_MUTEX(un)); 27523 sr_ejected(un); 27524 un->un_mediastate = DKIO_EJECTED; 27525 un->un_f_ejecting = FALSE; 27526 cv_broadcast(&un->un_state_cv); 27527 mutex_exit(SD_MUTEX(un)); 27528 } else { 27529 mutex_enter(SD_MUTEX(un)); 27530 un->un_f_ejecting = FALSE; 27531 mutex_exit(SD_MUTEX(un)); 27532 } 27533 return (rval); 27534 } 27535 27536 27537 /* 27538 * Function: sr_ejected() 27539 * 27540 * Description: This routine updates the soft state structure to invalidate the 27541 * geometry information after the media has been ejected or a 27542 * media eject has been detected. 27543 * 27544 * Arguments: un - driver soft state (unit) structure 27545 */ 27546 27547 static void 27548 sr_ejected(struct sd_lun *un) 27549 { 27550 struct sd_errstats *stp; 27551 27552 ASSERT(un != NULL); 27553 ASSERT(mutex_owned(SD_MUTEX(un))); 27554 27555 un->un_f_blockcount_is_valid = FALSE; 27556 un->un_f_tgt_blocksize_is_valid = FALSE; 27557 mutex_exit(SD_MUTEX(un)); 27558 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 27559 mutex_enter(SD_MUTEX(un)); 27560 27561 if (un->un_errstats != NULL) { 27562 stp = (struct sd_errstats *)un->un_errstats->ks_data; 27563 stp->sd_capacity.value.ui64 = 0; 27564 } 27565 } 27566 27567 27568 /* 27569 * Function: sr_check_wp() 27570 * 27571 * Description: This routine checks the write protection of a removable 27572 * media disk and hotpluggable devices via the write protect bit of 27573 * the Mode Page Header device specific field. Some devices choke 27574 * on unsupported mode page. In order to workaround this issue, 27575 * this routine has been implemented to use 0x3f mode page(request 27576 * for all pages) for all device types. 27577 * 27578 * Arguments: dev - the device 'dev_t' 27579 * 27580 * Return Code: int indicating if the device is write protected (1) or not (0) 27581 * 27582 * Context: Kernel thread. 27583 * 27584 */ 27585 27586 static int 27587 sr_check_wp(dev_t dev) 27588 { 27589 struct sd_lun *un; 27590 uchar_t device_specific; 27591 uchar_t *sense; 27592 int hdrlen; 27593 int rval = FALSE; 27594 int status; 27595 sd_ssc_t *ssc; 27596 27597 /* 27598 * Note: The return codes for this routine should be reworked to 27599 * properly handle the case of a NULL softstate. 27600 */ 27601 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27602 return (FALSE); 27603 } 27604 27605 if (un->un_f_cfg_is_atapi == TRUE) { 27606 /* 27607 * The mode page contents are not required; set the allocation 27608 * length for the mode page header only 27609 */ 27610 hdrlen = MODE_HEADER_LENGTH_GRP2; 27611 sense = kmem_zalloc(hdrlen, KM_SLEEP); 27612 ssc = sd_ssc_init(un); 27613 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, hdrlen, 27614 MODEPAGE_ALLPAGES, SD_PATH_STANDARD); 27615 sd_ssc_fini(ssc); 27616 if (status != 0) 27617 goto err_exit; 27618 device_specific = 27619 ((struct mode_header_grp2 *)sense)->device_specific; 27620 } else { 27621 hdrlen = MODE_HEADER_LENGTH; 27622 sense = kmem_zalloc(hdrlen, KM_SLEEP); 27623 ssc = sd_ssc_init(un); 27624 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, hdrlen, 27625 MODEPAGE_ALLPAGES, SD_PATH_STANDARD); 27626 sd_ssc_fini(ssc); 27627 if (status != 0) 27628 goto err_exit; 27629 device_specific = 27630 ((struct mode_header *)sense)->device_specific; 27631 } 27632 27633 27634 /* 27635 * Write protect mode sense failed; not all disks 27636 * understand this query. Return FALSE assuming that 27637 * these devices are not writable. 27638 */ 27639 if (device_specific & WRITE_PROTECT) { 27640 rval = TRUE; 27641 } 27642 27643 err_exit: 27644 kmem_free(sense, hdrlen); 27645 return (rval); 27646 } 27647 27648 /* 27649 * Function: sr_volume_ctrl() 27650 * 27651 * Description: This routine is the driver entry point for handling CD-ROM 27652 * audio output volume ioctl requests. (CDROMVOLCTRL) 27653 * 27654 * Arguments: dev - the device 'dev_t' 27655 * data - pointer to user audio volume control structure 27656 * flag - this argument is a pass through to ddi_copyxxx() 27657 * directly from the mode argument of ioctl(). 27658 * 27659 * Return Code: the code returned by sd_send_scsi_cmd() 27660 * EFAULT if ddi_copyxxx() fails 27661 * ENXIO if fail ddi_get_soft_state 27662 * EINVAL if data pointer is NULL 27663 * 27664 */ 27665 27666 static int 27667 sr_volume_ctrl(dev_t dev, caddr_t data, int flag) 27668 { 27669 struct sd_lun *un; 27670 struct cdrom_volctrl volume; 27671 struct cdrom_volctrl *vol = &volume; 27672 uchar_t *sense_page; 27673 uchar_t *select_page; 27674 uchar_t *sense; 27675 uchar_t *select; 27676 int sense_buflen; 27677 int select_buflen; 27678 int rval; 27679 sd_ssc_t *ssc; 27680 27681 if (data == NULL) { 27682 return (EINVAL); 27683 } 27684 27685 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27686 (un->un_state == SD_STATE_OFFLINE)) { 27687 return (ENXIO); 27688 } 27689 27690 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) { 27691 return (EFAULT); 27692 } 27693 27694 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 27695 struct mode_header_grp2 *sense_mhp; 27696 struct mode_header_grp2 *select_mhp; 27697 int bd_len; 27698 27699 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN; 27700 select_buflen = MODE_HEADER_LENGTH_GRP2 + 27701 MODEPAGE_AUDIO_CTRL_LEN; 27702 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 27703 select = kmem_zalloc(select_buflen, KM_SLEEP); 27704 ssc = sd_ssc_init(un); 27705 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, 27706 sense_buflen, MODEPAGE_AUDIO_CTRL, 27707 SD_PATH_STANDARD); 27708 sd_ssc_fini(ssc); 27709 27710 if (rval != 0) { 27711 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 27712 "sr_volume_ctrl: Mode Sense Failed\n"); 27713 kmem_free(sense, sense_buflen); 27714 kmem_free(select, select_buflen); 27715 return (rval); 27716 } 27717 sense_mhp = (struct mode_header_grp2 *)sense; 27718 select_mhp = (struct mode_header_grp2 *)select; 27719 bd_len = (sense_mhp->bdesc_length_hi << 8) | 27720 sense_mhp->bdesc_length_lo; 27721 if (bd_len > MODE_BLK_DESC_LENGTH) { 27722 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27723 "sr_volume_ctrl: Mode Sense returned invalid " 27724 "block descriptor length\n"); 27725 kmem_free(sense, sense_buflen); 27726 kmem_free(select, select_buflen); 27727 return (EIO); 27728 } 27729 sense_page = (uchar_t *) 27730 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 27731 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2); 27732 select_mhp->length_msb = 0; 27733 select_mhp->length_lsb = 0; 27734 select_mhp->bdesc_length_hi = 0; 27735 select_mhp->bdesc_length_lo = 0; 27736 } else { 27737 struct mode_header *sense_mhp, *select_mhp; 27738 27739 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 27740 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 27741 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 27742 select = kmem_zalloc(select_buflen, KM_SLEEP); 27743 ssc = sd_ssc_init(un); 27744 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 27745 sense_buflen, MODEPAGE_AUDIO_CTRL, 27746 SD_PATH_STANDARD); 27747 sd_ssc_fini(ssc); 27748 27749 if (rval != 0) { 27750 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27751 "sr_volume_ctrl: Mode Sense Failed\n"); 27752 kmem_free(sense, sense_buflen); 27753 kmem_free(select, select_buflen); 27754 return (rval); 27755 } 27756 sense_mhp = (struct mode_header *)sense; 27757 select_mhp = (struct mode_header *)select; 27758 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) { 27759 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27760 "sr_volume_ctrl: Mode Sense returned invalid " 27761 "block descriptor length\n"); 27762 kmem_free(sense, sense_buflen); 27763 kmem_free(select, select_buflen); 27764 return (EIO); 27765 } 27766 sense_page = (uchar_t *) 27767 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 27768 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH); 27769 select_mhp->length = 0; 27770 select_mhp->bdesc_length = 0; 27771 } 27772 /* 27773 * Note: An audio control data structure could be created and overlayed 27774 * on the following in place of the array indexing method implemented. 27775 */ 27776 27777 /* Build the select data for the user volume data */ 27778 select_page[0] = MODEPAGE_AUDIO_CTRL; 27779 select_page[1] = 0xE; 27780 /* Set the immediate bit */ 27781 select_page[2] = 0x04; 27782 /* Zero out reserved fields */ 27783 select_page[3] = 0x00; 27784 select_page[4] = 0x00; 27785 /* Return sense data for fields not to be modified */ 27786 select_page[5] = sense_page[5]; 27787 select_page[6] = sense_page[6]; 27788 select_page[7] = sense_page[7]; 27789 /* Set the user specified volume levels for channel 0 and 1 */ 27790 select_page[8] = 0x01; 27791 select_page[9] = vol->channel0; 27792 select_page[10] = 0x02; 27793 select_page[11] = vol->channel1; 27794 /* Channel 2 and 3 are currently unsupported so return the sense data */ 27795 select_page[12] = sense_page[12]; 27796 select_page[13] = sense_page[13]; 27797 select_page[14] = sense_page[14]; 27798 select_page[15] = sense_page[15]; 27799 27800 ssc = sd_ssc_init(un); 27801 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 27802 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP1, select, 27803 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 27804 } else { 27805 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 27806 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 27807 } 27808 sd_ssc_fini(ssc); 27809 27810 kmem_free(sense, sense_buflen); 27811 kmem_free(select, select_buflen); 27812 return (rval); 27813 } 27814 27815 27816 /* 27817 * Function: sr_read_sony_session_offset() 27818 * 27819 * Description: This routine is the driver entry point for handling CD-ROM 27820 * ioctl requests for session offset information. (CDROMREADOFFSET) 27821 * The address of the first track in the last session of a 27822 * multi-session CD-ROM is returned 27823 * 27824 * Note: This routine uses a vendor specific key value in the 27825 * command control field without implementing any vendor check here 27826 * or in the ioctl routine. 27827 * 27828 * Arguments: dev - the device 'dev_t' 27829 * data - pointer to an int to hold the requested address 27830 * flag - this argument is a pass through to ddi_copyxxx() 27831 * directly from the mode argument of ioctl(). 27832 * 27833 * Return Code: the code returned by sd_send_scsi_cmd() 27834 * EFAULT if ddi_copyxxx() fails 27835 * ENXIO if fail ddi_get_soft_state 27836 * EINVAL if data pointer is NULL 27837 */ 27838 27839 static int 27840 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag) 27841 { 27842 struct sd_lun *un; 27843 struct uscsi_cmd *com; 27844 caddr_t buffer; 27845 char cdb[CDB_GROUP1]; 27846 int session_offset = 0; 27847 int rval; 27848 27849 if (data == NULL) { 27850 return (EINVAL); 27851 } 27852 27853 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27854 (un->un_state == SD_STATE_OFFLINE)) { 27855 return (ENXIO); 27856 } 27857 27858 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP); 27859 bzero(cdb, CDB_GROUP1); 27860 cdb[0] = SCMD_READ_TOC; 27861 /* 27862 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 27863 * (4 byte TOC response header + 8 byte response data) 27864 */ 27865 cdb[8] = SONY_SESSION_OFFSET_LEN; 27866 /* Byte 9 is the control byte. A vendor specific value is used */ 27867 cdb[9] = SONY_SESSION_OFFSET_KEY; 27868 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27869 com->uscsi_cdb = cdb; 27870 com->uscsi_cdblen = CDB_GROUP1; 27871 com->uscsi_bufaddr = buffer; 27872 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN; 27873 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27874 27875 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27876 SD_PATH_STANDARD); 27877 if (rval != 0) { 27878 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 27879 kmem_free(com, sizeof (*com)); 27880 return (rval); 27881 } 27882 if (buffer[1] == SONY_SESSION_OFFSET_VALID) { 27883 session_offset = 27884 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 27885 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 27886 /* 27887 * Offset returned offset in current lbasize block's. Convert to 27888 * 2k block's to return to the user 27889 */ 27890 if (un->un_tgt_blocksize == CDROM_BLK_512) { 27891 session_offset >>= 2; 27892 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) { 27893 session_offset >>= 1; 27894 } 27895 } 27896 27897 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) { 27898 rval = EFAULT; 27899 } 27900 27901 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 27902 kmem_free(com, sizeof (*com)); 27903 return (rval); 27904 } 27905 27906 27907 /* 27908 * Function: sd_wm_cache_constructor() 27909 * 27910 * Description: Cache Constructor for the wmap cache for the read/modify/write 27911 * devices. 27912 * 27913 * Arguments: wm - A pointer to the sd_w_map to be initialized. 27914 * un - sd_lun structure for the device. 27915 * flag - the km flags passed to constructor 27916 * 27917 * Return Code: 0 on success. 27918 * -1 on failure. 27919 */ 27920 27921 /*ARGSUSED*/ 27922 static int 27923 sd_wm_cache_constructor(void *wm, void *un, int flags) 27924 { 27925 bzero(wm, sizeof (struct sd_w_map)); 27926 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL); 27927 return (0); 27928 } 27929 27930 27931 /* 27932 * Function: sd_wm_cache_destructor() 27933 * 27934 * Description: Cache destructor for the wmap cache for the read/modify/write 27935 * devices. 27936 * 27937 * Arguments: wm - A pointer to the sd_w_map to be initialized. 27938 * un - sd_lun structure for the device. 27939 */ 27940 /*ARGSUSED*/ 27941 static void 27942 sd_wm_cache_destructor(void *wm, void *un) 27943 { 27944 cv_destroy(&((struct sd_w_map *)wm)->wm_avail); 27945 } 27946 27947 27948 /* 27949 * Function: sd_range_lock() 27950 * 27951 * Description: Lock the range of blocks specified as parameter to ensure 27952 * that read, modify write is atomic and no other i/o writes 27953 * to the same location. The range is specified in terms 27954 * of start and end blocks. Block numbers are the actual 27955 * media block numbers and not system. 27956 * 27957 * Arguments: un - sd_lun structure for the device. 27958 * startb - The starting block number 27959 * endb - The end block number 27960 * typ - type of i/o - simple/read_modify_write 27961 * 27962 * Return Code: wm - pointer to the wmap structure. 27963 * 27964 * Context: This routine can sleep. 27965 */ 27966 27967 static struct sd_w_map * 27968 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ) 27969 { 27970 struct sd_w_map *wmp = NULL; 27971 struct sd_w_map *sl_wmp = NULL; 27972 struct sd_w_map *tmp_wmp; 27973 wm_state state = SD_WM_CHK_LIST; 27974 27975 27976 ASSERT(un != NULL); 27977 ASSERT(!mutex_owned(SD_MUTEX(un))); 27978 27979 mutex_enter(SD_MUTEX(un)); 27980 27981 while (state != SD_WM_DONE) { 27982 27983 switch (state) { 27984 case SD_WM_CHK_LIST: 27985 /* 27986 * This is the starting state. Check the wmap list 27987 * to see if the range is currently available. 27988 */ 27989 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) { 27990 /* 27991 * If this is a simple write and no rmw 27992 * i/o is pending then try to lock the 27993 * range as the range should be available. 27994 */ 27995 state = SD_WM_LOCK_RANGE; 27996 } else { 27997 tmp_wmp = sd_get_range(un, startb, endb); 27998 if (tmp_wmp != NULL) { 27999 if ((wmp != NULL) && ONLIST(un, wmp)) { 28000 /* 28001 * Should not keep onlist wmps 28002 * while waiting this macro 28003 * will also do wmp = NULL; 28004 */ 28005 FREE_ONLIST_WMAP(un, wmp); 28006 } 28007 /* 28008 * sl_wmp is the wmap on which wait 28009 * is done, since the tmp_wmp points 28010 * to the inuse wmap, set sl_wmp to 28011 * tmp_wmp and change the state to sleep 28012 */ 28013 sl_wmp = tmp_wmp; 28014 state = SD_WM_WAIT_MAP; 28015 } else { 28016 state = SD_WM_LOCK_RANGE; 28017 } 28018 28019 } 28020 break; 28021 28022 case SD_WM_LOCK_RANGE: 28023 ASSERT(un->un_wm_cache); 28024 /* 28025 * The range need to be locked, try to get a wmap. 28026 * First attempt it with NO_SLEEP, want to avoid a sleep 28027 * if possible as we will have to release the sd mutex 28028 * if we have to sleep. 28029 */ 28030 if (wmp == NULL) 28031 wmp = kmem_cache_alloc(un->un_wm_cache, 28032 KM_NOSLEEP); 28033 if (wmp == NULL) { 28034 mutex_exit(SD_MUTEX(un)); 28035 _NOTE(DATA_READABLE_WITHOUT_LOCK 28036 (sd_lun::un_wm_cache)) 28037 wmp = kmem_cache_alloc(un->un_wm_cache, 28038 KM_SLEEP); 28039 mutex_enter(SD_MUTEX(un)); 28040 /* 28041 * we released the mutex so recheck and go to 28042 * check list state. 28043 */ 28044 state = SD_WM_CHK_LIST; 28045 } else { 28046 /* 28047 * We exit out of state machine since we 28048 * have the wmap. Do the housekeeping first. 28049 * place the wmap on the wmap list if it is not 28050 * on it already and then set the state to done. 28051 */ 28052 wmp->wm_start = startb; 28053 wmp->wm_end = endb; 28054 wmp->wm_flags = typ | SD_WM_BUSY; 28055 if (typ & SD_WTYPE_RMW) { 28056 un->un_rmw_count++; 28057 } 28058 /* 28059 * If not already on the list then link 28060 */ 28061 if (!ONLIST(un, wmp)) { 28062 wmp->wm_next = un->un_wm; 28063 wmp->wm_prev = NULL; 28064 if (wmp->wm_next) 28065 wmp->wm_next->wm_prev = wmp; 28066 un->un_wm = wmp; 28067 } 28068 state = SD_WM_DONE; 28069 } 28070 break; 28071 28072 case SD_WM_WAIT_MAP: 28073 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY); 28074 /* 28075 * Wait is done on sl_wmp, which is set in the 28076 * check_list state. 28077 */ 28078 sl_wmp->wm_wanted_count++; 28079 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un)); 28080 sl_wmp->wm_wanted_count--; 28081 /* 28082 * We can reuse the memory from the completed sl_wmp 28083 * lock range for our new lock, but only if noone is 28084 * waiting for it. 28085 */ 28086 ASSERT(!(sl_wmp->wm_flags & SD_WM_BUSY)); 28087 if (sl_wmp->wm_wanted_count == 0) { 28088 if (wmp != NULL) 28089 CHK_N_FREEWMP(un, wmp); 28090 wmp = sl_wmp; 28091 } 28092 sl_wmp = NULL; 28093 /* 28094 * After waking up, need to recheck for availability of 28095 * range. 28096 */ 28097 state = SD_WM_CHK_LIST; 28098 break; 28099 28100 default: 28101 panic("sd_range_lock: " 28102 "Unknown state %d in sd_range_lock", state); 28103 /*NOTREACHED*/ 28104 } /* switch(state) */ 28105 28106 } /* while(state != SD_WM_DONE) */ 28107 28108 mutex_exit(SD_MUTEX(un)); 28109 28110 ASSERT(wmp != NULL); 28111 28112 return (wmp); 28113 } 28114 28115 28116 /* 28117 * Function: sd_get_range() 28118 * 28119 * Description: Find if there any overlapping I/O to this one 28120 * Returns the write-map of 1st such I/O, NULL otherwise. 28121 * 28122 * Arguments: un - sd_lun structure for the device. 28123 * startb - The starting block number 28124 * endb - The end block number 28125 * 28126 * Return Code: wm - pointer to the wmap structure. 28127 */ 28128 28129 static struct sd_w_map * 28130 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb) 28131 { 28132 struct sd_w_map *wmp; 28133 28134 ASSERT(un != NULL); 28135 28136 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) { 28137 if (!(wmp->wm_flags & SD_WM_BUSY)) { 28138 continue; 28139 } 28140 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) { 28141 break; 28142 } 28143 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) { 28144 break; 28145 } 28146 } 28147 28148 return (wmp); 28149 } 28150 28151 28152 /* 28153 * Function: sd_free_inlist_wmap() 28154 * 28155 * Description: Unlink and free a write map struct. 28156 * 28157 * Arguments: un - sd_lun structure for the device. 28158 * wmp - sd_w_map which needs to be unlinked. 28159 */ 28160 28161 static void 28162 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp) 28163 { 28164 ASSERT(un != NULL); 28165 28166 if (un->un_wm == wmp) { 28167 un->un_wm = wmp->wm_next; 28168 } else { 28169 wmp->wm_prev->wm_next = wmp->wm_next; 28170 } 28171 28172 if (wmp->wm_next) { 28173 wmp->wm_next->wm_prev = wmp->wm_prev; 28174 } 28175 28176 wmp->wm_next = wmp->wm_prev = NULL; 28177 28178 kmem_cache_free(un->un_wm_cache, wmp); 28179 } 28180 28181 28182 /* 28183 * Function: sd_range_unlock() 28184 * 28185 * Description: Unlock the range locked by wm. 28186 * Free write map if nobody else is waiting on it. 28187 * 28188 * Arguments: un - sd_lun structure for the device. 28189 * wmp - sd_w_map which needs to be unlinked. 28190 */ 28191 28192 static void 28193 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm) 28194 { 28195 ASSERT(un != NULL); 28196 ASSERT(wm != NULL); 28197 ASSERT(!mutex_owned(SD_MUTEX(un))); 28198 28199 mutex_enter(SD_MUTEX(un)); 28200 28201 if (wm->wm_flags & SD_WTYPE_RMW) { 28202 un->un_rmw_count--; 28203 } 28204 28205 if (wm->wm_wanted_count) { 28206 wm->wm_flags = 0; 28207 /* 28208 * Broadcast that the wmap is available now. 28209 */ 28210 cv_broadcast(&wm->wm_avail); 28211 } else { 28212 /* 28213 * If no one is waiting on the map, it should be free'ed. 28214 */ 28215 sd_free_inlist_wmap(un, wm); 28216 } 28217 28218 mutex_exit(SD_MUTEX(un)); 28219 } 28220 28221 28222 /* 28223 * Function: sd_read_modify_write_task 28224 * 28225 * Description: Called from a taskq thread to initiate the write phase of 28226 * a read-modify-write request. This is used for targets where 28227 * un->un_sys_blocksize != un->un_tgt_blocksize. 28228 * 28229 * Arguments: arg - a pointer to the buf(9S) struct for the write command. 28230 * 28231 * Context: Called under taskq thread context. 28232 */ 28233 28234 static void 28235 sd_read_modify_write_task(void *arg) 28236 { 28237 struct sd_mapblocksize_info *bsp; 28238 struct buf *bp; 28239 struct sd_xbuf *xp; 28240 struct sd_lun *un; 28241 28242 bp = arg; /* The bp is given in arg */ 28243 ASSERT(bp != NULL); 28244 28245 /* Get the pointer to the layer-private data struct */ 28246 xp = SD_GET_XBUF(bp); 28247 ASSERT(xp != NULL); 28248 bsp = xp->xb_private; 28249 ASSERT(bsp != NULL); 28250 28251 un = SD_GET_UN(bp); 28252 ASSERT(un != NULL); 28253 ASSERT(!mutex_owned(SD_MUTEX(un))); 28254 28255 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 28256 "sd_read_modify_write_task: entry: buf:0x%p\n", bp); 28257 28258 /* 28259 * This is the write phase of a read-modify-write request, called 28260 * under the context of a taskq thread in response to the completion 28261 * of the read portion of the rmw request completing under interrupt 28262 * context. The write request must be sent from here down the iostart 28263 * chain as if it were being sent from sd_mapblocksize_iostart(), so 28264 * we use the layer index saved in the layer-private data area. 28265 */ 28266 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp); 28267 28268 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 28269 "sd_read_modify_write_task: exit: buf:0x%p\n", bp); 28270 } 28271 28272 28273 /* 28274 * Function: sddump_do_read_of_rmw() 28275 * 28276 * Description: This routine will be called from sddump, If sddump is called 28277 * with an I/O which not aligned on device blocksize boundary 28278 * then the write has to be converted to read-modify-write. 28279 * Do the read part here in order to keep sddump simple. 28280 * Note - That the sd_mutex is held across the call to this 28281 * routine. 28282 * 28283 * Arguments: un - sd_lun 28284 * blkno - block number in terms of media block size. 28285 * nblk - number of blocks. 28286 * bpp - pointer to pointer to the buf structure. On return 28287 * from this function, *bpp points to the valid buffer 28288 * to which the write has to be done. 28289 * 28290 * Return Code: 0 for success or errno-type return code 28291 */ 28292 28293 static int 28294 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 28295 struct buf **bpp) 28296 { 28297 int err; 28298 int i; 28299 int rval; 28300 struct buf *bp; 28301 struct scsi_pkt *pkt = NULL; 28302 uint32_t target_blocksize; 28303 28304 ASSERT(un != NULL); 28305 ASSERT(mutex_owned(SD_MUTEX(un))); 28306 28307 target_blocksize = un->un_tgt_blocksize; 28308 28309 mutex_exit(SD_MUTEX(un)); 28310 28311 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL, 28312 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL); 28313 if (bp == NULL) { 28314 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28315 "no resources for dumping; giving up"); 28316 err = ENOMEM; 28317 goto done; 28318 } 28319 28320 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL, 28321 blkno, nblk); 28322 if (rval != 0) { 28323 scsi_free_consistent_buf(bp); 28324 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28325 "no resources for dumping; giving up"); 28326 err = ENOMEM; 28327 goto done; 28328 } 28329 28330 pkt->pkt_flags |= FLAG_NOINTR; 28331 28332 err = EIO; 28333 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 28334 28335 /* 28336 * Scsi_poll returns 0 (success) if the command completes and 28337 * the status block is STATUS_GOOD. We should only check 28338 * errors if this condition is not true. Even then we should 28339 * send our own request sense packet only if we have a check 28340 * condition and auto request sense has not been performed by 28341 * the hba. 28342 */ 28343 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n"); 28344 28345 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) { 28346 err = 0; 28347 break; 28348 } 28349 28350 /* 28351 * Check CMD_DEV_GONE 1st, give up if device is gone, 28352 * no need to read RQS data. 28353 */ 28354 if (pkt->pkt_reason == CMD_DEV_GONE) { 28355 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28356 "Error while dumping state with rmw..." 28357 "Device is gone\n"); 28358 break; 28359 } 28360 28361 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) { 28362 SD_INFO(SD_LOG_DUMP, un, 28363 "sddump: read failed with CHECK, try # %d\n", i); 28364 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) { 28365 (void) sd_send_polled_RQS(un); 28366 } 28367 28368 continue; 28369 } 28370 28371 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) { 28372 int reset_retval = 0; 28373 28374 SD_INFO(SD_LOG_DUMP, un, 28375 "sddump: read failed with BUSY, try # %d\n", i); 28376 28377 if (un->un_f_lun_reset_enabled == TRUE) { 28378 reset_retval = scsi_reset(SD_ADDRESS(un), 28379 RESET_LUN); 28380 } 28381 if (reset_retval == 0) { 28382 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 28383 } 28384 (void) sd_send_polled_RQS(un); 28385 28386 } else { 28387 SD_INFO(SD_LOG_DUMP, un, 28388 "sddump: read failed with 0x%x, try # %d\n", 28389 SD_GET_PKT_STATUS(pkt), i); 28390 mutex_enter(SD_MUTEX(un)); 28391 sd_reset_target(un, pkt); 28392 mutex_exit(SD_MUTEX(un)); 28393 } 28394 28395 /* 28396 * If we are not getting anywhere with lun/target resets, 28397 * let's reset the bus. 28398 */ 28399 if (i > SD_NDUMP_RETRIES/2) { 28400 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 28401 (void) sd_send_polled_RQS(un); 28402 } 28403 28404 } 28405 scsi_destroy_pkt(pkt); 28406 28407 if (err != 0) { 28408 scsi_free_consistent_buf(bp); 28409 *bpp = NULL; 28410 } else { 28411 *bpp = bp; 28412 } 28413 28414 done: 28415 mutex_enter(SD_MUTEX(un)); 28416 return (err); 28417 } 28418 28419 28420 /* 28421 * Function: sd_failfast_flushq 28422 * 28423 * Description: Take all bp's on the wait queue that have B_FAILFAST set 28424 * in b_flags and move them onto the failfast queue, then kick 28425 * off a thread to return all bp's on the failfast queue to 28426 * their owners with an error set. 28427 * 28428 * Arguments: un - pointer to the soft state struct for the instance. 28429 * 28430 * Context: may execute in interrupt context. 28431 */ 28432 28433 static void 28434 sd_failfast_flushq(struct sd_lun *un) 28435 { 28436 struct buf *bp; 28437 struct buf *next_waitq_bp; 28438 struct buf *prev_waitq_bp = NULL; 28439 28440 ASSERT(un != NULL); 28441 ASSERT(mutex_owned(SD_MUTEX(un))); 28442 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE); 28443 ASSERT(un->un_failfast_bp == NULL); 28444 28445 SD_TRACE(SD_LOG_IO_FAILFAST, un, 28446 "sd_failfast_flushq: entry: un:0x%p\n", un); 28447 28448 /* 28449 * Check if we should flush all bufs when entering failfast state, or 28450 * just those with B_FAILFAST set. 28451 */ 28452 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) { 28453 /* 28454 * Move *all* bp's on the wait queue to the failfast flush 28455 * queue, including those that do NOT have B_FAILFAST set. 28456 */ 28457 if (un->un_failfast_headp == NULL) { 28458 ASSERT(un->un_failfast_tailp == NULL); 28459 un->un_failfast_headp = un->un_waitq_headp; 28460 } else { 28461 ASSERT(un->un_failfast_tailp != NULL); 28462 un->un_failfast_tailp->av_forw = un->un_waitq_headp; 28463 } 28464 28465 un->un_failfast_tailp = un->un_waitq_tailp; 28466 28467 /* update kstat for each bp moved out of the waitq */ 28468 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) { 28469 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 28470 } 28471 28472 /* empty the waitq */ 28473 un->un_waitq_headp = un->un_waitq_tailp = NULL; 28474 28475 } else { 28476 /* 28477 * Go thru the wait queue, pick off all entries with 28478 * B_FAILFAST set, and move these onto the failfast queue. 28479 */ 28480 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) { 28481 /* 28482 * Save the pointer to the next bp on the wait queue, 28483 * so we get to it on the next iteration of this loop. 28484 */ 28485 next_waitq_bp = bp->av_forw; 28486 28487 /* 28488 * If this bp from the wait queue does NOT have 28489 * B_FAILFAST set, just move on to the next element 28490 * in the wait queue. Note, this is the only place 28491 * where it is correct to set prev_waitq_bp. 28492 */ 28493 if ((bp->b_flags & B_FAILFAST) == 0) { 28494 prev_waitq_bp = bp; 28495 continue; 28496 } 28497 28498 /* 28499 * Remove the bp from the wait queue. 28500 */ 28501 if (bp == un->un_waitq_headp) { 28502 /* The bp is the first element of the waitq. */ 28503 un->un_waitq_headp = next_waitq_bp; 28504 if (un->un_waitq_headp == NULL) { 28505 /* The wait queue is now empty */ 28506 un->un_waitq_tailp = NULL; 28507 } 28508 } else { 28509 /* 28510 * The bp is either somewhere in the middle 28511 * or at the end of the wait queue. 28512 */ 28513 ASSERT(un->un_waitq_headp != NULL); 28514 ASSERT(prev_waitq_bp != NULL); 28515 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST) 28516 == 0); 28517 if (bp == un->un_waitq_tailp) { 28518 /* bp is the last entry on the waitq. */ 28519 ASSERT(next_waitq_bp == NULL); 28520 un->un_waitq_tailp = prev_waitq_bp; 28521 } 28522 prev_waitq_bp->av_forw = next_waitq_bp; 28523 } 28524 bp->av_forw = NULL; 28525 28526 /* 28527 * update kstat since the bp is moved out of 28528 * the waitq 28529 */ 28530 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 28531 28532 /* 28533 * Now put the bp onto the failfast queue. 28534 */ 28535 if (un->un_failfast_headp == NULL) { 28536 /* failfast queue is currently empty */ 28537 ASSERT(un->un_failfast_tailp == NULL); 28538 un->un_failfast_headp = 28539 un->un_failfast_tailp = bp; 28540 } else { 28541 /* Add the bp to the end of the failfast q */ 28542 ASSERT(un->un_failfast_tailp != NULL); 28543 ASSERT(un->un_failfast_tailp->b_flags & 28544 B_FAILFAST); 28545 un->un_failfast_tailp->av_forw = bp; 28546 un->un_failfast_tailp = bp; 28547 } 28548 } 28549 } 28550 28551 /* 28552 * Now return all bp's on the failfast queue to their owners. 28553 */ 28554 while ((bp = un->un_failfast_headp) != NULL) { 28555 28556 un->un_failfast_headp = bp->av_forw; 28557 if (un->un_failfast_headp == NULL) { 28558 un->un_failfast_tailp = NULL; 28559 } 28560 28561 /* 28562 * We want to return the bp with a failure error code, but 28563 * we do not want a call to sd_start_cmds() to occur here, 28564 * so use sd_return_failed_command_no_restart() instead of 28565 * sd_return_failed_command(). 28566 */ 28567 sd_return_failed_command_no_restart(un, bp, EIO); 28568 } 28569 28570 /* Flush the xbuf queues if required. */ 28571 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) { 28572 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback); 28573 } 28574 28575 SD_TRACE(SD_LOG_IO_FAILFAST, un, 28576 "sd_failfast_flushq: exit: un:0x%p\n", un); 28577 } 28578 28579 28580 /* 28581 * Function: sd_failfast_flushq_callback 28582 * 28583 * Description: Return TRUE if the given bp meets the criteria for failfast 28584 * flushing. Used with ddi_xbuf_flushq(9F). 28585 * 28586 * Arguments: bp - ptr to buf struct to be examined. 28587 * 28588 * Context: Any 28589 */ 28590 28591 static int 28592 sd_failfast_flushq_callback(struct buf *bp) 28593 { 28594 /* 28595 * Return TRUE if (1) we want to flush ALL bufs when the failfast 28596 * state is entered; OR (2) the given bp has B_FAILFAST set. 28597 */ 28598 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) || 28599 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE); 28600 } 28601 28602 28603 28604 /* 28605 * Function: sd_setup_next_xfer 28606 * 28607 * Description: Prepare next I/O operation using DMA_PARTIAL 28608 * 28609 */ 28610 28611 static int 28612 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 28613 struct scsi_pkt *pkt, struct sd_xbuf *xp) 28614 { 28615 ssize_t num_blks_not_xfered; 28616 daddr_t strt_blk_num; 28617 ssize_t bytes_not_xfered; 28618 int rval; 28619 28620 ASSERT(pkt->pkt_resid == 0); 28621 28622 /* 28623 * Calculate next block number and amount to be transferred. 28624 * 28625 * How much data NOT transfered to the HBA yet. 28626 */ 28627 bytes_not_xfered = xp->xb_dma_resid; 28628 28629 /* 28630 * figure how many blocks NOT transfered to the HBA yet. 28631 */ 28632 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered); 28633 28634 /* 28635 * set starting block number to the end of what WAS transfered. 28636 */ 28637 strt_blk_num = xp->xb_blkno + 28638 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered); 28639 28640 /* 28641 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt 28642 * will call scsi_initpkt with NULL_FUNC so we do not have to release 28643 * the disk mutex here. 28644 */ 28645 rval = sd_setup_next_rw_pkt(un, pkt, bp, 28646 strt_blk_num, num_blks_not_xfered); 28647 28648 if (rval == 0) { 28649 28650 /* 28651 * Success. 28652 * 28653 * Adjust things if there are still more blocks to be 28654 * transfered. 28655 */ 28656 xp->xb_dma_resid = pkt->pkt_resid; 28657 pkt->pkt_resid = 0; 28658 28659 return (1); 28660 } 28661 28662 /* 28663 * There's really only one possible return value from 28664 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt 28665 * returns NULL. 28666 */ 28667 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 28668 28669 bp->b_resid = bp->b_bcount; 28670 bp->b_flags |= B_ERROR; 28671 28672 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28673 "Error setting up next portion of DMA transfer\n"); 28674 28675 return (0); 28676 } 28677 28678 /* 28679 * Function: sd_panic_for_res_conflict 28680 * 28681 * Description: Call panic with a string formatted with "Reservation Conflict" 28682 * and a human readable identifier indicating the SD instance 28683 * that experienced the reservation conflict. 28684 * 28685 * Arguments: un - pointer to the soft state struct for the instance. 28686 * 28687 * Context: may execute in interrupt context. 28688 */ 28689 28690 #define SD_RESV_CONFLICT_FMT_LEN 40 28691 void 28692 sd_panic_for_res_conflict(struct sd_lun *un) 28693 { 28694 char panic_str[SD_RESV_CONFLICT_FMT_LEN+MAXPATHLEN]; 28695 char path_str[MAXPATHLEN]; 28696 28697 (void) snprintf(panic_str, sizeof (panic_str), 28698 "Reservation Conflict\nDisk: %s", 28699 ddi_pathname(SD_DEVINFO(un), path_str)); 28700 28701 panic(panic_str); 28702 } 28703 28704 /* 28705 * Note: The following sd_faultinjection_ioctl( ) routines implement 28706 * driver support for handling fault injection for error analysis 28707 * causing faults in multiple layers of the driver. 28708 * 28709 */ 28710 28711 #ifdef SD_FAULT_INJECTION 28712 static uint_t sd_fault_injection_on = 0; 28713 28714 /* 28715 * Function: sd_faultinjection_ioctl() 28716 * 28717 * Description: This routine is the driver entry point for handling 28718 * faultinjection ioctls to inject errors into the 28719 * layer model 28720 * 28721 * Arguments: cmd - the ioctl cmd received 28722 * arg - the arguments from user and returns 28723 */ 28724 28725 static void 28726 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) { 28727 28728 uint_t i = 0; 28729 uint_t rval; 28730 28731 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); 28732 28733 mutex_enter(SD_MUTEX(un)); 28734 28735 switch (cmd) { 28736 case SDIOCRUN: 28737 /* Allow pushed faults to be injected */ 28738 SD_INFO(SD_LOG_SDTEST, un, 28739 "sd_faultinjection_ioctl: Injecting Fault Run\n"); 28740 28741 sd_fault_injection_on = 1; 28742 28743 SD_INFO(SD_LOG_IOERR, un, 28744 "sd_faultinjection_ioctl: run finished\n"); 28745 break; 28746 28747 case SDIOCSTART: 28748 /* Start Injection Session */ 28749 SD_INFO(SD_LOG_SDTEST, un, 28750 "sd_faultinjection_ioctl: Injecting Fault Start\n"); 28751 28752 sd_fault_injection_on = 0; 28753 un->sd_injection_mask = 0xFFFFFFFF; 28754 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 28755 un->sd_fi_fifo_pkt[i] = NULL; 28756 un->sd_fi_fifo_xb[i] = NULL; 28757 un->sd_fi_fifo_un[i] = NULL; 28758 un->sd_fi_fifo_arq[i] = NULL; 28759 } 28760 un->sd_fi_fifo_start = 0; 28761 un->sd_fi_fifo_end = 0; 28762 28763 mutex_enter(&(un->un_fi_mutex)); 28764 un->sd_fi_log[0] = '\0'; 28765 un->sd_fi_buf_len = 0; 28766 mutex_exit(&(un->un_fi_mutex)); 28767 28768 SD_INFO(SD_LOG_IOERR, un, 28769 "sd_faultinjection_ioctl: start finished\n"); 28770 break; 28771 28772 case SDIOCSTOP: 28773 /* Stop Injection Session */ 28774 SD_INFO(SD_LOG_SDTEST, un, 28775 "sd_faultinjection_ioctl: Injecting Fault Stop\n"); 28776 sd_fault_injection_on = 0; 28777 un->sd_injection_mask = 0x0; 28778 28779 /* Empty stray or unuseds structs from fifo */ 28780 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 28781 if (un->sd_fi_fifo_pkt[i] != NULL) { 28782 kmem_free(un->sd_fi_fifo_pkt[i], 28783 sizeof (struct sd_fi_pkt)); 28784 } 28785 if (un->sd_fi_fifo_xb[i] != NULL) { 28786 kmem_free(un->sd_fi_fifo_xb[i], 28787 sizeof (struct sd_fi_xb)); 28788 } 28789 if (un->sd_fi_fifo_un[i] != NULL) { 28790 kmem_free(un->sd_fi_fifo_un[i], 28791 sizeof (struct sd_fi_un)); 28792 } 28793 if (un->sd_fi_fifo_arq[i] != NULL) { 28794 kmem_free(un->sd_fi_fifo_arq[i], 28795 sizeof (struct sd_fi_arq)); 28796 } 28797 un->sd_fi_fifo_pkt[i] = NULL; 28798 un->sd_fi_fifo_un[i] = NULL; 28799 un->sd_fi_fifo_xb[i] = NULL; 28800 un->sd_fi_fifo_arq[i] = NULL; 28801 } 28802 un->sd_fi_fifo_start = 0; 28803 un->sd_fi_fifo_end = 0; 28804 28805 SD_INFO(SD_LOG_IOERR, un, 28806 "sd_faultinjection_ioctl: stop finished\n"); 28807 break; 28808 28809 case SDIOCINSERTPKT: 28810 /* Store a packet struct to be pushed onto fifo */ 28811 SD_INFO(SD_LOG_SDTEST, un, 28812 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n"); 28813 28814 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 28815 28816 sd_fault_injection_on = 0; 28817 28818 /* No more that SD_FI_MAX_ERROR allowed in Queue */ 28819 if (un->sd_fi_fifo_pkt[i] != NULL) { 28820 kmem_free(un->sd_fi_fifo_pkt[i], 28821 sizeof (struct sd_fi_pkt)); 28822 } 28823 if (arg != NULL) { 28824 un->sd_fi_fifo_pkt[i] = 28825 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP); 28826 if (un->sd_fi_fifo_pkt[i] == NULL) { 28827 /* Alloc failed don't store anything */ 28828 break; 28829 } 28830 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i], 28831 sizeof (struct sd_fi_pkt), 0); 28832 if (rval == -1) { 28833 kmem_free(un->sd_fi_fifo_pkt[i], 28834 sizeof (struct sd_fi_pkt)); 28835 un->sd_fi_fifo_pkt[i] = NULL; 28836 } 28837 } else { 28838 SD_INFO(SD_LOG_IOERR, un, 28839 "sd_faultinjection_ioctl: pkt null\n"); 28840 } 28841 break; 28842 28843 case SDIOCINSERTXB: 28844 /* Store a xb struct to be pushed onto fifo */ 28845 SD_INFO(SD_LOG_SDTEST, un, 28846 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n"); 28847 28848 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 28849 28850 sd_fault_injection_on = 0; 28851 28852 if (un->sd_fi_fifo_xb[i] != NULL) { 28853 kmem_free(un->sd_fi_fifo_xb[i], 28854 sizeof (struct sd_fi_xb)); 28855 un->sd_fi_fifo_xb[i] = NULL; 28856 } 28857 if (arg != NULL) { 28858 un->sd_fi_fifo_xb[i] = 28859 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP); 28860 if (un->sd_fi_fifo_xb[i] == NULL) { 28861 /* Alloc failed don't store anything */ 28862 break; 28863 } 28864 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i], 28865 sizeof (struct sd_fi_xb), 0); 28866 28867 if (rval == -1) { 28868 kmem_free(un->sd_fi_fifo_xb[i], 28869 sizeof (struct sd_fi_xb)); 28870 un->sd_fi_fifo_xb[i] = NULL; 28871 } 28872 } else { 28873 SD_INFO(SD_LOG_IOERR, un, 28874 "sd_faultinjection_ioctl: xb null\n"); 28875 } 28876 break; 28877 28878 case SDIOCINSERTUN: 28879 /* Store a un struct to be pushed onto fifo */ 28880 SD_INFO(SD_LOG_SDTEST, un, 28881 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n"); 28882 28883 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 28884 28885 sd_fault_injection_on = 0; 28886 28887 if (un->sd_fi_fifo_un[i] != NULL) { 28888 kmem_free(un->sd_fi_fifo_un[i], 28889 sizeof (struct sd_fi_un)); 28890 un->sd_fi_fifo_un[i] = NULL; 28891 } 28892 if (arg != NULL) { 28893 un->sd_fi_fifo_un[i] = 28894 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP); 28895 if (un->sd_fi_fifo_un[i] == NULL) { 28896 /* Alloc failed don't store anything */ 28897 break; 28898 } 28899 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i], 28900 sizeof (struct sd_fi_un), 0); 28901 if (rval == -1) { 28902 kmem_free(un->sd_fi_fifo_un[i], 28903 sizeof (struct sd_fi_un)); 28904 un->sd_fi_fifo_un[i] = NULL; 28905 } 28906 28907 } else { 28908 SD_INFO(SD_LOG_IOERR, un, 28909 "sd_faultinjection_ioctl: un null\n"); 28910 } 28911 28912 break; 28913 28914 case SDIOCINSERTARQ: 28915 /* Store a arq struct to be pushed onto fifo */ 28916 SD_INFO(SD_LOG_SDTEST, un, 28917 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n"); 28918 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 28919 28920 sd_fault_injection_on = 0; 28921 28922 if (un->sd_fi_fifo_arq[i] != NULL) { 28923 kmem_free(un->sd_fi_fifo_arq[i], 28924 sizeof (struct sd_fi_arq)); 28925 un->sd_fi_fifo_arq[i] = NULL; 28926 } 28927 if (arg != NULL) { 28928 un->sd_fi_fifo_arq[i] = 28929 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP); 28930 if (un->sd_fi_fifo_arq[i] == NULL) { 28931 /* Alloc failed don't store anything */ 28932 break; 28933 } 28934 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i], 28935 sizeof (struct sd_fi_arq), 0); 28936 if (rval == -1) { 28937 kmem_free(un->sd_fi_fifo_arq[i], 28938 sizeof (struct sd_fi_arq)); 28939 un->sd_fi_fifo_arq[i] = NULL; 28940 } 28941 28942 } else { 28943 SD_INFO(SD_LOG_IOERR, un, 28944 "sd_faultinjection_ioctl: arq null\n"); 28945 } 28946 28947 break; 28948 28949 case SDIOCPUSH: 28950 /* Push stored xb, pkt, un, and arq onto fifo */ 28951 sd_fault_injection_on = 0; 28952 28953 if (arg != NULL) { 28954 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); 28955 if (rval != -1 && 28956 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 28957 un->sd_fi_fifo_end += i; 28958 } 28959 } else { 28960 SD_INFO(SD_LOG_IOERR, un, 28961 "sd_faultinjection_ioctl: push arg null\n"); 28962 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 28963 un->sd_fi_fifo_end++; 28964 } 28965 } 28966 SD_INFO(SD_LOG_IOERR, un, 28967 "sd_faultinjection_ioctl: push to end=%d\n", 28968 un->sd_fi_fifo_end); 28969 break; 28970 28971 case SDIOCRETRIEVE: 28972 /* Return buffer of log from Injection session */ 28973 SD_INFO(SD_LOG_SDTEST, un, 28974 "sd_faultinjection_ioctl: Injecting Fault Retreive"); 28975 28976 sd_fault_injection_on = 0; 28977 28978 mutex_enter(&(un->un_fi_mutex)); 28979 rval = ddi_copyout(un->sd_fi_log, (void *)arg, 28980 un->sd_fi_buf_len+1, 0); 28981 mutex_exit(&(un->un_fi_mutex)); 28982 28983 if (rval == -1) { 28984 /* 28985 * arg is possibly invalid setting 28986 * it to NULL for return 28987 */ 28988 arg = NULL; 28989 } 28990 break; 28991 } 28992 28993 mutex_exit(SD_MUTEX(un)); 28994 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl:" 28995 " exit\n"); 28996 } 28997 28998 28999 /* 29000 * Function: sd_injection_log() 29001 * 29002 * Description: This routine adds buff to the already existing injection log 29003 * for retrieval via faultinjection_ioctl for use in fault 29004 * detection and recovery 29005 * 29006 * Arguments: buf - the string to add to the log 29007 */ 29008 29009 static void 29010 sd_injection_log(char *buf, struct sd_lun *un) 29011 { 29012 uint_t len; 29013 29014 ASSERT(un != NULL); 29015 ASSERT(buf != NULL); 29016 29017 mutex_enter(&(un->un_fi_mutex)); 29018 29019 len = min(strlen(buf), 255); 29020 /* Add logged value to Injection log to be returned later */ 29021 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) { 29022 uint_t offset = strlen((char *)un->sd_fi_log); 29023 char *destp = (char *)un->sd_fi_log + offset; 29024 int i; 29025 for (i = 0; i < len; i++) { 29026 *destp++ = *buf++; 29027 } 29028 un->sd_fi_buf_len += len; 29029 un->sd_fi_log[un->sd_fi_buf_len] = '\0'; 29030 } 29031 29032 mutex_exit(&(un->un_fi_mutex)); 29033 } 29034 29035 29036 /* 29037 * Function: sd_faultinjection() 29038 * 29039 * Description: This routine takes the pkt and changes its 29040 * content based on error injection scenerio. 29041 * 29042 * Arguments: pktp - packet to be changed 29043 */ 29044 29045 static void 29046 sd_faultinjection(struct scsi_pkt *pktp) 29047 { 29048 uint_t i; 29049 struct sd_fi_pkt *fi_pkt; 29050 struct sd_fi_xb *fi_xb; 29051 struct sd_fi_un *fi_un; 29052 struct sd_fi_arq *fi_arq; 29053 struct buf *bp; 29054 struct sd_xbuf *xb; 29055 struct sd_lun *un; 29056 29057 ASSERT(pktp != NULL); 29058 29059 /* pull bp xb and un from pktp */ 29060 bp = (struct buf *)pktp->pkt_private; 29061 xb = SD_GET_XBUF(bp); 29062 un = SD_GET_UN(bp); 29063 29064 ASSERT(un != NULL); 29065 29066 mutex_enter(SD_MUTEX(un)); 29067 29068 SD_TRACE(SD_LOG_SDTEST, un, 29069 "sd_faultinjection: entry Injection from sdintr\n"); 29070 29071 /* if injection is off return */ 29072 if (sd_fault_injection_on == 0 || 29073 un->sd_fi_fifo_start == un->sd_fi_fifo_end) { 29074 mutex_exit(SD_MUTEX(un)); 29075 return; 29076 } 29077 29078 SD_INFO(SD_LOG_SDTEST, un, 29079 "sd_faultinjection: is working for copying\n"); 29080 29081 /* take next set off fifo */ 29082 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR; 29083 29084 fi_pkt = un->sd_fi_fifo_pkt[i]; 29085 fi_xb = un->sd_fi_fifo_xb[i]; 29086 fi_un = un->sd_fi_fifo_un[i]; 29087 fi_arq = un->sd_fi_fifo_arq[i]; 29088 29089 29090 /* set variables accordingly */ 29091 /* set pkt if it was on fifo */ 29092 if (fi_pkt != NULL) { 29093 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags"); 29094 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp"); 29095 if (fi_pkt->pkt_cdbp != 0xff) 29096 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp"); 29097 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state"); 29098 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics"); 29099 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason"); 29100 29101 } 29102 /* set xb if it was on fifo */ 29103 if (fi_xb != NULL) { 29104 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno"); 29105 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid"); 29106 if (fi_xb->xb_retry_count != 0) 29107 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count"); 29108 SD_CONDSET(xb, xb, xb_victim_retry_count, 29109 "xb_victim_retry_count"); 29110 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status"); 29111 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state"); 29112 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid"); 29113 29114 /* copy in block data from sense */ 29115 /* 29116 * if (fi_xb->xb_sense_data[0] != -1) { 29117 * bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, 29118 * SENSE_LENGTH); 29119 * } 29120 */ 29121 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, SENSE_LENGTH); 29122 29123 /* copy in extended sense codes */ 29124 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 29125 xb, es_code, "es_code"); 29126 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 29127 xb, es_key, "es_key"); 29128 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 29129 xb, es_add_code, "es_add_code"); 29130 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 29131 xb, es_qual_code, "es_qual_code"); 29132 struct scsi_extended_sense *esp; 29133 esp = (struct scsi_extended_sense *)xb->xb_sense_data; 29134 esp->es_class = CLASS_EXTENDED_SENSE; 29135 } 29136 29137 /* set un if it was on fifo */ 29138 if (fi_un != NULL) { 29139 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb"); 29140 SD_CONDSET(un, un, un_ctype, "un_ctype"); 29141 SD_CONDSET(un, un, un_reset_retry_count, 29142 "un_reset_retry_count"); 29143 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type"); 29144 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status"); 29145 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled"); 29146 SD_CONDSET(un, un, un_f_allow_bus_device_reset, 29147 "un_f_allow_bus_device_reset"); 29148 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing"); 29149 29150 } 29151 29152 /* copy in auto request sense if it was on fifo */ 29153 if (fi_arq != NULL) { 29154 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq)); 29155 } 29156 29157 /* free structs */ 29158 if (un->sd_fi_fifo_pkt[i] != NULL) { 29159 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt)); 29160 } 29161 if (un->sd_fi_fifo_xb[i] != NULL) { 29162 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb)); 29163 } 29164 if (un->sd_fi_fifo_un[i] != NULL) { 29165 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un)); 29166 } 29167 if (un->sd_fi_fifo_arq[i] != NULL) { 29168 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); 29169 } 29170 29171 /* 29172 * kmem_free does not gurantee to set to NULL 29173 * since we uses these to determine if we set 29174 * values or not lets confirm they are always 29175 * NULL after free 29176 */ 29177 un->sd_fi_fifo_pkt[i] = NULL; 29178 un->sd_fi_fifo_un[i] = NULL; 29179 un->sd_fi_fifo_xb[i] = NULL; 29180 un->sd_fi_fifo_arq[i] = NULL; 29181 29182 un->sd_fi_fifo_start++; 29183 29184 mutex_exit(SD_MUTEX(un)); 29185 29186 SD_INFO(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n"); 29187 } 29188 29189 #endif /* SD_FAULT_INJECTION */ 29190 29191 /* 29192 * This routine is invoked in sd_unit_attach(). Before calling it, the 29193 * properties in conf file should be processed already, and "hotpluggable" 29194 * property was processed also. 29195 * 29196 * The sd driver distinguishes 3 different type of devices: removable media, 29197 * non-removable media, and hotpluggable. Below the differences are defined: 29198 * 29199 * 1. Device ID 29200 * 29201 * The device ID of a device is used to identify this device. Refer to 29202 * ddi_devid_register(9F). 29203 * 29204 * For a non-removable media disk device which can provide 0x80 or 0x83 29205 * VPD page (refer to INQUIRY command of SCSI SPC specification), a unique 29206 * device ID is created to identify this device. For other non-removable 29207 * media devices, a default device ID is created only if this device has 29208 * at least 2 alter cylinders. Otherwise, this device has no devid. 29209 * 29210 * ------------------------------------------------------- 29211 * removable media hotpluggable | Can Have Device ID 29212 * ------------------------------------------------------- 29213 * false false | Yes 29214 * false true | Yes 29215 * true x | No 29216 * ------------------------------------------------------ 29217 * 29218 * 29219 * 2. SCSI group 4 commands 29220 * 29221 * In SCSI specs, only some commands in group 4 command set can use 29222 * 8-byte addresses that can be used to access >2TB storage spaces. 29223 * Other commands have no such capability. Without supporting group4, 29224 * it is impossible to make full use of storage spaces of a disk with 29225 * capacity larger than 2TB. 29226 * 29227 * ----------------------------------------------- 29228 * removable media hotpluggable LP64 | Group 29229 * ----------------------------------------------- 29230 * false false false | 1 29231 * false false true | 4 29232 * false true false | 1 29233 * false true true | 4 29234 * true x x | 5 29235 * ----------------------------------------------- 29236 * 29237 * 29238 * 3. Check for VTOC Label 29239 * 29240 * If a direct-access disk has no EFI label, sd will check if it has a 29241 * valid VTOC label. Now, sd also does that check for removable media 29242 * and hotpluggable devices. 29243 * 29244 * -------------------------------------------------------------- 29245 * Direct-Access removable media hotpluggable | Check Label 29246 * ------------------------------------------------------------- 29247 * false false false | No 29248 * false false true | No 29249 * false true false | Yes 29250 * false true true | Yes 29251 * true x x | Yes 29252 * -------------------------------------------------------------- 29253 * 29254 * 29255 * 4. Building default VTOC label 29256 * 29257 * As section 3 says, sd checks if some kinds of devices have VTOC label. 29258 * If those devices have no valid VTOC label, sd(7d) will attempt to 29259 * create default VTOC for them. Currently sd creates default VTOC label 29260 * for all devices on x86 platform (VTOC_16), but only for removable 29261 * media devices on SPARC (VTOC_8). 29262 * 29263 * ----------------------------------------------------------- 29264 * removable media hotpluggable platform | Default Label 29265 * ----------------------------------------------------------- 29266 * false false sparc | No 29267 * false true x86 | Yes 29268 * false true sparc | Yes 29269 * true x x | Yes 29270 * ---------------------------------------------------------- 29271 * 29272 * 29273 * 5. Supported blocksizes of target devices 29274 * 29275 * Sd supports non-512-byte blocksize for removable media devices only. 29276 * For other devices, only 512-byte blocksize is supported. This may be 29277 * changed in near future because some RAID devices require non-512-byte 29278 * blocksize 29279 * 29280 * ----------------------------------------------------------- 29281 * removable media hotpluggable | non-512-byte blocksize 29282 * ----------------------------------------------------------- 29283 * false false | No 29284 * false true | No 29285 * true x | Yes 29286 * ----------------------------------------------------------- 29287 * 29288 * 29289 * 6. Automatic mount & unmount 29290 * 29291 * Sd(7d) driver provides DKIOCREMOVABLE ioctl. This ioctl is used to query 29292 * if a device is removable media device. It return 1 for removable media 29293 * devices, and 0 for others. 29294 * 29295 * The automatic mounting subsystem should distinguish between the types 29296 * of devices and apply automounting policies to each. 29297 * 29298 * 29299 * 7. fdisk partition management 29300 * 29301 * Fdisk is traditional partition method on x86 platform. Sd(7d) driver 29302 * just supports fdisk partitions on x86 platform. On sparc platform, sd 29303 * doesn't support fdisk partitions at all. Note: pcfs(7fs) can recognize 29304 * fdisk partitions on both x86 and SPARC platform. 29305 * 29306 * ----------------------------------------------------------- 29307 * platform removable media USB/1394 | fdisk supported 29308 * ----------------------------------------------------------- 29309 * x86 X X | true 29310 * ------------------------------------------------------------ 29311 * sparc X X | false 29312 * ------------------------------------------------------------ 29313 * 29314 * 29315 * 8. MBOOT/MBR 29316 * 29317 * Although sd(7d) doesn't support fdisk on SPARC platform, it does support 29318 * read/write mboot for removable media devices on sparc platform. 29319 * 29320 * ----------------------------------------------------------- 29321 * platform removable media USB/1394 | mboot supported 29322 * ----------------------------------------------------------- 29323 * x86 X X | true 29324 * ------------------------------------------------------------ 29325 * sparc false false | false 29326 * sparc false true | true 29327 * sparc true false | true 29328 * sparc true true | true 29329 * ------------------------------------------------------------ 29330 * 29331 * 29332 * 9. error handling during opening device 29333 * 29334 * If failed to open a disk device, an errno is returned. For some kinds 29335 * of errors, different errno is returned depending on if this device is 29336 * a removable media device. This brings USB/1394 hard disks in line with 29337 * expected hard disk behavior. It is not expected that this breaks any 29338 * application. 29339 * 29340 * ------------------------------------------------------ 29341 * removable media hotpluggable | errno 29342 * ------------------------------------------------------ 29343 * false false | EIO 29344 * false true | EIO 29345 * true x | ENXIO 29346 * ------------------------------------------------------ 29347 * 29348 * 29349 * 11. ioctls: DKIOCEJECT, CDROMEJECT 29350 * 29351 * These IOCTLs are applicable only to removable media devices. 29352 * 29353 * ----------------------------------------------------------- 29354 * removable media hotpluggable |DKIOCEJECT, CDROMEJECT 29355 * ----------------------------------------------------------- 29356 * false false | No 29357 * false true | No 29358 * true x | Yes 29359 * ----------------------------------------------------------- 29360 * 29361 * 29362 * 12. Kstats for partitions 29363 * 29364 * sd creates partition kstat for non-removable media devices. USB and 29365 * Firewire hard disks now have partition kstats 29366 * 29367 * ------------------------------------------------------ 29368 * removable media hotpluggable | kstat 29369 * ------------------------------------------------------ 29370 * false false | Yes 29371 * false true | Yes 29372 * true x | No 29373 * ------------------------------------------------------ 29374 * 29375 * 29376 * 13. Removable media & hotpluggable properties 29377 * 29378 * Sd driver creates a "removable-media" property for removable media 29379 * devices. Parent nexus drivers create a "hotpluggable" property if 29380 * it supports hotplugging. 29381 * 29382 * --------------------------------------------------------------------- 29383 * removable media hotpluggable | "removable-media" " hotpluggable" 29384 * --------------------------------------------------------------------- 29385 * false false | No No 29386 * false true | No Yes 29387 * true false | Yes No 29388 * true true | Yes Yes 29389 * --------------------------------------------------------------------- 29390 * 29391 * 29392 * 14. Power Management 29393 * 29394 * sd only power manages removable media devices or devices that support 29395 * LOG_SENSE or have a "pm-capable" property (PSARC/2002/250) 29396 * 29397 * A parent nexus that supports hotplugging can also set "pm-capable" 29398 * if the disk can be power managed. 29399 * 29400 * ------------------------------------------------------------ 29401 * removable media hotpluggable pm-capable | power manage 29402 * ------------------------------------------------------------ 29403 * false false false | No 29404 * false false true | Yes 29405 * false true false | No 29406 * false true true | Yes 29407 * true x x | Yes 29408 * ------------------------------------------------------------ 29409 * 29410 * USB and firewire hard disks can now be power managed independently 29411 * of the framebuffer 29412 * 29413 * 29414 * 15. Support for USB disks with capacity larger than 1TB 29415 * 29416 * Currently, sd doesn't permit a fixed disk device with capacity 29417 * larger than 1TB to be used in a 32-bit operating system environment. 29418 * However, sd doesn't do that for removable media devices. Instead, it 29419 * assumes that removable media devices cannot have a capacity larger 29420 * than 1TB. Therefore, using those devices on 32-bit system is partially 29421 * supported, which can cause some unexpected results. 29422 * 29423 * --------------------------------------------------------------------- 29424 * removable media USB/1394 | Capacity > 1TB | Used in 32-bit env 29425 * --------------------------------------------------------------------- 29426 * false false | true | no 29427 * false true | true | no 29428 * true false | true | Yes 29429 * true true | true | Yes 29430 * --------------------------------------------------------------------- 29431 * 29432 * 29433 * 16. Check write-protection at open time 29434 * 29435 * When a removable media device is being opened for writing without NDELAY 29436 * flag, sd will check if this device is writable. If attempting to open 29437 * without NDELAY flag a write-protected device, this operation will abort. 29438 * 29439 * ------------------------------------------------------------ 29440 * removable media USB/1394 | WP Check 29441 * ------------------------------------------------------------ 29442 * false false | No 29443 * false true | No 29444 * true false | Yes 29445 * true true | Yes 29446 * ------------------------------------------------------------ 29447 * 29448 * 29449 * 17. syslog when corrupted VTOC is encountered 29450 * 29451 * Currently, if an invalid VTOC is encountered, sd only print syslog 29452 * for fixed SCSI disks. 29453 * ------------------------------------------------------------ 29454 * removable media USB/1394 | print syslog 29455 * ------------------------------------------------------------ 29456 * false false | Yes 29457 * false true | No 29458 * true false | No 29459 * true true | No 29460 * ------------------------------------------------------------ 29461 */ 29462 static void 29463 sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi) 29464 { 29465 int pm_capable_prop; 29466 29467 ASSERT(un->un_sd); 29468 ASSERT(un->un_sd->sd_inq); 29469 29470 /* 29471 * Enable SYNC CACHE support for all devices. 29472 */ 29473 un->un_f_sync_cache_supported = TRUE; 29474 29475 /* 29476 * Set the sync cache required flag to false. 29477 * This would ensure that there is no SYNC CACHE 29478 * sent when there are no writes 29479 */ 29480 un->un_f_sync_cache_required = FALSE; 29481 29482 if (un->un_sd->sd_inq->inq_rmb) { 29483 /* 29484 * The media of this device is removable. And for this kind 29485 * of devices, it is possible to change medium after opening 29486 * devices. Thus we should support this operation. 29487 */ 29488 un->un_f_has_removable_media = TRUE; 29489 29490 /* 29491 * support non-512-byte blocksize of removable media devices 29492 */ 29493 un->un_f_non_devbsize_supported = TRUE; 29494 29495 /* 29496 * Assume that all removable media devices support DOOR_LOCK 29497 */ 29498 un->un_f_doorlock_supported = TRUE; 29499 29500 /* 29501 * For a removable media device, it is possible to be opened 29502 * with NDELAY flag when there is no media in drive, in this 29503 * case we don't care if device is writable. But if without 29504 * NDELAY flag, we need to check if media is write-protected. 29505 */ 29506 un->un_f_chk_wp_open = TRUE; 29507 29508 /* 29509 * need to start a SCSI watch thread to monitor media state, 29510 * when media is being inserted or ejected, notify syseventd. 29511 */ 29512 un->un_f_monitor_media_state = TRUE; 29513 29514 /* 29515 * Some devices don't support START_STOP_UNIT command. 29516 * Therefore, we'd better check if a device supports it 29517 * before sending it. 29518 */ 29519 un->un_f_check_start_stop = TRUE; 29520 29521 /* 29522 * support eject media ioctl: 29523 * FDEJECT, DKIOCEJECT, CDROMEJECT 29524 */ 29525 un->un_f_eject_media_supported = TRUE; 29526 29527 /* 29528 * Because many removable-media devices don't support 29529 * LOG_SENSE, we couldn't use this command to check if 29530 * a removable media device support power-management. 29531 * We assume that they support power-management via 29532 * START_STOP_UNIT command and can be spun up and down 29533 * without limitations. 29534 */ 29535 un->un_f_pm_supported = TRUE; 29536 29537 /* 29538 * Need to create a zero length (Boolean) property 29539 * removable-media for the removable media devices. 29540 * Note that the return value of the property is not being 29541 * checked, since if unable to create the property 29542 * then do not want the attach to fail altogether. Consistent 29543 * with other property creation in attach. 29544 */ 29545 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, 29546 DDI_PROP_CANSLEEP, "removable-media", NULL, 0); 29547 29548 } else { 29549 /* 29550 * create device ID for device 29551 */ 29552 un->un_f_devid_supported = TRUE; 29553 29554 /* 29555 * Spin up non-removable-media devices once it is attached 29556 */ 29557 un->un_f_attach_spinup = TRUE; 29558 29559 /* 29560 * According to SCSI specification, Sense data has two kinds of 29561 * format: fixed format, and descriptor format. At present, we 29562 * don't support descriptor format sense data for removable 29563 * media. 29564 */ 29565 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT) { 29566 un->un_f_descr_format_supported = TRUE; 29567 } 29568 29569 /* 29570 * kstats are created only for non-removable media devices. 29571 * 29572 * Set this in sd.conf to 0 in order to disable kstats. The 29573 * default is 1, so they are enabled by default. 29574 */ 29575 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY, 29576 SD_DEVINFO(un), DDI_PROP_DONTPASS, 29577 "enable-partition-kstats", 1)); 29578 29579 /* 29580 * Check if HBA has set the "pm-capable" property. 29581 * If "pm-capable" exists and is non-zero then we can 29582 * power manage the device without checking the start/stop 29583 * cycle count log sense page. 29584 * 29585 * If "pm-capable" exists and is SD_PM_CAPABLE_FALSE (0) 29586 * then we should not power manage the device. 29587 * 29588 * If "pm-capable" doesn't exist then pm_capable_prop will 29589 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case, 29590 * sd will check the start/stop cycle count log sense page 29591 * and power manage the device if the cycle count limit has 29592 * not been exceeded. 29593 */ 29594 pm_capable_prop = ddi_prop_get_int(DDI_DEV_T_ANY, devi, 29595 DDI_PROP_DONTPASS, "pm-capable", SD_PM_CAPABLE_UNDEFINED); 29596 if (pm_capable_prop == SD_PM_CAPABLE_UNDEFINED) { 29597 un->un_f_log_sense_supported = TRUE; 29598 } else { 29599 /* 29600 * pm-capable property exists. 29601 * 29602 * Convert "TRUE" values for pm_capable_prop to 29603 * SD_PM_CAPABLE_TRUE (1) to make it easier to check 29604 * later. "TRUE" values are any values except 29605 * SD_PM_CAPABLE_FALSE (0) and 29606 * SD_PM_CAPABLE_UNDEFINED (-1) 29607 */ 29608 if (pm_capable_prop == SD_PM_CAPABLE_FALSE) { 29609 un->un_f_log_sense_supported = FALSE; 29610 } else { 29611 un->un_f_pm_supported = TRUE; 29612 } 29613 29614 SD_INFO(SD_LOG_ATTACH_DETACH, un, 29615 "sd_unit_attach: un:0x%p pm-capable " 29616 "property set to %d.\n", un, un->un_f_pm_supported); 29617 } 29618 } 29619 29620 if (un->un_f_is_hotpluggable) { 29621 29622 /* 29623 * Have to watch hotpluggable devices as well, since 29624 * that's the only way for userland applications to 29625 * detect hot removal while device is busy/mounted. 29626 */ 29627 un->un_f_monitor_media_state = TRUE; 29628 29629 un->un_f_check_start_stop = TRUE; 29630 29631 } 29632 } 29633 29634 /* 29635 * sd_tg_rdwr: 29636 * Provides rdwr access for cmlb via sd_tgops. The start_block is 29637 * in sys block size, req_length in bytes. 29638 * 29639 */ 29640 static int 29641 sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 29642 diskaddr_t start_block, size_t reqlength, void *tg_cookie) 29643 { 29644 struct sd_lun *un; 29645 int path_flag = (int)(uintptr_t)tg_cookie; 29646 char *dkl = NULL; 29647 diskaddr_t real_addr = start_block; 29648 diskaddr_t first_byte, end_block; 29649 29650 size_t buffer_size = reqlength; 29651 int rval = 0; 29652 diskaddr_t cap; 29653 uint32_t lbasize; 29654 sd_ssc_t *ssc; 29655 29656 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 29657 if (un == NULL) 29658 return (ENXIO); 29659 29660 if (cmd != TG_READ && cmd != TG_WRITE) 29661 return (EINVAL); 29662 29663 ssc = sd_ssc_init(un); 29664 mutex_enter(SD_MUTEX(un)); 29665 if (un->un_f_tgt_blocksize_is_valid == FALSE) { 29666 mutex_exit(SD_MUTEX(un)); 29667 rval = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap, 29668 &lbasize, path_flag); 29669 if (rval != 0) 29670 goto done1; 29671 mutex_enter(SD_MUTEX(un)); 29672 sd_update_block_info(un, lbasize, cap); 29673 if ((un->un_f_tgt_blocksize_is_valid == FALSE)) { 29674 mutex_exit(SD_MUTEX(un)); 29675 rval = EIO; 29676 goto done; 29677 } 29678 } 29679 29680 if (NOT_DEVBSIZE(un)) { 29681 /* 29682 * sys_blocksize != tgt_blocksize, need to re-adjust 29683 * blkno and save the index to beginning of dk_label 29684 */ 29685 first_byte = SD_SYSBLOCKS2BYTES(un, start_block); 29686 real_addr = first_byte / un->un_tgt_blocksize; 29687 29688 end_block = (first_byte + reqlength + 29689 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize; 29690 29691 /* round up buffer size to multiple of target block size */ 29692 buffer_size = (end_block - real_addr) * un->un_tgt_blocksize; 29693 29694 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_tg_rdwr", 29695 "label_addr: 0x%x allocation size: 0x%x\n", 29696 real_addr, buffer_size); 29697 29698 if (((first_byte % un->un_tgt_blocksize) != 0) || 29699 (reqlength % un->un_tgt_blocksize) != 0) 29700 /* the request is not aligned */ 29701 dkl = kmem_zalloc(buffer_size, KM_SLEEP); 29702 } 29703 29704 /* 29705 * The MMC standard allows READ CAPACITY to be 29706 * inaccurate by a bounded amount (in the interest of 29707 * response latency). As a result, failed READs are 29708 * commonplace (due to the reading of metadata and not 29709 * data). Depending on the per-Vendor/drive Sense data, 29710 * the failed READ can cause many (unnecessary) retries. 29711 */ 29712 29713 if (ISCD(un) && (cmd == TG_READ) && 29714 (un->un_f_blockcount_is_valid == TRUE) && 29715 ((start_block == (un->un_blockcount - 1))|| 29716 (start_block == (un->un_blockcount - 2)))) { 29717 path_flag = SD_PATH_DIRECT_PRIORITY; 29718 } 29719 29720 mutex_exit(SD_MUTEX(un)); 29721 if (cmd == TG_READ) { 29722 rval = sd_send_scsi_READ(ssc, (dkl != NULL)? dkl: bufaddr, 29723 buffer_size, real_addr, path_flag); 29724 if (dkl != NULL) 29725 bcopy(dkl + SD_TGTBYTEOFFSET(un, start_block, 29726 real_addr), bufaddr, reqlength); 29727 } else { 29728 if (dkl) { 29729 rval = sd_send_scsi_READ(ssc, dkl, buffer_size, 29730 real_addr, path_flag); 29731 if (rval) { 29732 goto done1; 29733 } 29734 bcopy(bufaddr, dkl + SD_TGTBYTEOFFSET(un, start_block, 29735 real_addr), reqlength); 29736 } 29737 rval = sd_send_scsi_WRITE(ssc, (dkl != NULL)? dkl: bufaddr, 29738 buffer_size, real_addr, path_flag); 29739 } 29740 29741 done1: 29742 if (dkl != NULL) 29743 kmem_free(dkl, buffer_size); 29744 29745 if (rval != 0) { 29746 if (rval == EIO) 29747 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 29748 else 29749 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 29750 } 29751 done: 29752 sd_ssc_fini(ssc); 29753 return (rval); 29754 } 29755 29756 29757 static int 29758 sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie) 29759 { 29760 29761 struct sd_lun *un; 29762 diskaddr_t cap; 29763 uint32_t lbasize; 29764 int path_flag = (int)(uintptr_t)tg_cookie; 29765 int ret = 0; 29766 29767 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 29768 if (un == NULL) 29769 return (ENXIO); 29770 29771 switch (cmd) { 29772 case TG_GETPHYGEOM: 29773 case TG_GETVIRTGEOM: 29774 case TG_GETCAPACITY: 29775 case TG_GETBLOCKSIZE: 29776 mutex_enter(SD_MUTEX(un)); 29777 29778 if ((un->un_f_blockcount_is_valid == TRUE) && 29779 (un->un_f_tgt_blocksize_is_valid == TRUE)) { 29780 cap = un->un_blockcount; 29781 lbasize = un->un_tgt_blocksize; 29782 mutex_exit(SD_MUTEX(un)); 29783 } else { 29784 sd_ssc_t *ssc; 29785 mutex_exit(SD_MUTEX(un)); 29786 ssc = sd_ssc_init(un); 29787 ret = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap, 29788 &lbasize, path_flag); 29789 if (ret != 0) { 29790 if (ret == EIO) 29791 sd_ssc_assessment(ssc, 29792 SD_FMT_STATUS_CHECK); 29793 else 29794 sd_ssc_assessment(ssc, 29795 SD_FMT_IGNORE); 29796 sd_ssc_fini(ssc); 29797 return (ret); 29798 } 29799 sd_ssc_fini(ssc); 29800 mutex_enter(SD_MUTEX(un)); 29801 sd_update_block_info(un, lbasize, cap); 29802 if ((un->un_f_blockcount_is_valid == FALSE) || 29803 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 29804 mutex_exit(SD_MUTEX(un)); 29805 return (EIO); 29806 } 29807 mutex_exit(SD_MUTEX(un)); 29808 } 29809 29810 if (cmd == TG_GETCAPACITY) { 29811 *(diskaddr_t *)arg = cap; 29812 return (0); 29813 } 29814 29815 if (cmd == TG_GETBLOCKSIZE) { 29816 *(uint32_t *)arg = lbasize; 29817 return (0); 29818 } 29819 29820 if (cmd == TG_GETPHYGEOM) 29821 ret = sd_get_physical_geometry(un, (cmlb_geom_t *)arg, 29822 cap, lbasize, path_flag); 29823 else 29824 /* TG_GETVIRTGEOM */ 29825 ret = sd_get_virtual_geometry(un, 29826 (cmlb_geom_t *)arg, cap, lbasize); 29827 29828 return (ret); 29829 29830 case TG_GETATTR: 29831 mutex_enter(SD_MUTEX(un)); 29832 ((tg_attribute_t *)arg)->media_is_writable = 29833 un->un_f_mmc_writable_media; 29834 mutex_exit(SD_MUTEX(un)); 29835 return (0); 29836 default: 29837 return (ENOTTY); 29838 29839 } 29840 } 29841 29842 /* 29843 * Function: sd_ssc_ereport_post 29844 * 29845 * Description: Will be called when SD driver need to post an ereport. 29846 * 29847 * Context: Kernel thread or interrupt context. 29848 */ 29849 static void 29850 sd_ssc_ereport_post(sd_ssc_t *ssc, enum sd_driver_assessment drv_assess) 29851 { 29852 int uscsi_path_instance = 0; 29853 uchar_t uscsi_pkt_reason; 29854 uint32_t uscsi_pkt_state; 29855 uint32_t uscsi_pkt_statistics; 29856 uint64_t uscsi_ena; 29857 uchar_t op_code; 29858 uint8_t *sensep; 29859 union scsi_cdb *cdbp; 29860 uint_t cdblen = 0; 29861 uint_t senlen = 0; 29862 struct sd_lun *un; 29863 dev_info_t *dip; 29864 char *devid; 29865 int ssc_invalid_flags = SSC_FLAGS_INVALID_PKT_REASON | 29866 SSC_FLAGS_INVALID_STATUS | 29867 SSC_FLAGS_INVALID_SENSE | 29868 SSC_FLAGS_INVALID_DATA; 29869 char assessment[16]; 29870 29871 ASSERT(ssc != NULL); 29872 ASSERT(ssc->ssc_uscsi_cmd != NULL); 29873 ASSERT(ssc->ssc_uscsi_info != NULL); 29874 29875 un = ssc->ssc_un; 29876 ASSERT(un != NULL); 29877 29878 dip = un->un_sd->sd_dev; 29879 29880 /* 29881 * Get the devid: 29882 * devid will only be passed to non-transport error reports. 29883 */ 29884 devid = DEVI(dip)->devi_devid_str; 29885 29886 /* 29887 * If we are syncing or dumping, the command will not be executed 29888 * so we bypass this situation. 29889 */ 29890 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 29891 (un->un_state == SD_STATE_DUMPING)) 29892 return; 29893 29894 uscsi_pkt_reason = ssc->ssc_uscsi_info->ui_pkt_reason; 29895 uscsi_path_instance = ssc->ssc_uscsi_cmd->uscsi_path_instance; 29896 uscsi_pkt_state = ssc->ssc_uscsi_info->ui_pkt_state; 29897 uscsi_pkt_statistics = ssc->ssc_uscsi_info->ui_pkt_statistics; 29898 uscsi_ena = ssc->ssc_uscsi_info->ui_ena; 29899 29900 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 29901 cdbp = (union scsi_cdb *)ssc->ssc_uscsi_cmd->uscsi_cdb; 29902 29903 /* In rare cases, EG:DOORLOCK, the cdb could be NULL */ 29904 if (cdbp == NULL) { 29905 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29906 "sd_ssc_ereport_post meet empty cdb\n"); 29907 return; 29908 } 29909 29910 op_code = cdbp->scc_cmd; 29911 29912 cdblen = (int)ssc->ssc_uscsi_cmd->uscsi_cdblen; 29913 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 29914 ssc->ssc_uscsi_cmd->uscsi_rqresid); 29915 29916 if (senlen > 0) 29917 ASSERT(sensep != NULL); 29918 29919 /* 29920 * Initialize drv_assess to corresponding values. 29921 * SD_FM_DRV_FATAL will be mapped to "fail" or "fatal" depending 29922 * on the sense-key returned back. 29923 */ 29924 switch (drv_assess) { 29925 case SD_FM_DRV_RECOVERY: 29926 (void) sprintf(assessment, "%s", "recovered"); 29927 break; 29928 case SD_FM_DRV_RETRY: 29929 (void) sprintf(assessment, "%s", "retry"); 29930 break; 29931 case SD_FM_DRV_NOTICE: 29932 (void) sprintf(assessment, "%s", "info"); 29933 break; 29934 case SD_FM_DRV_FATAL: 29935 default: 29936 (void) sprintf(assessment, "%s", "unknown"); 29937 } 29938 /* 29939 * If drv_assess == SD_FM_DRV_RECOVERY, this should be a recovered 29940 * command, we will post ereport.io.scsi.cmd.disk.recovered. 29941 * driver-assessment will always be "recovered" here. 29942 */ 29943 if (drv_assess == SD_FM_DRV_RECOVERY) { 29944 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 29945 "cmd.disk.recovered", uscsi_ena, devid, DDI_NOSLEEP, 29946 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 29947 "driver-assessment", DATA_TYPE_STRING, assessment, 29948 "op-code", DATA_TYPE_UINT8, op_code, 29949 "cdb", DATA_TYPE_UINT8_ARRAY, 29950 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 29951 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 29952 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 29953 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics, 29954 NULL); 29955 return; 29956 } 29957 29958 /* 29959 * If there is un-expected/un-decodable data, we should post 29960 * ereport.io.scsi.cmd.disk.dev.uderr. 29961 * driver-assessment will be set based on parameter drv_assess. 29962 * SSC_FLAGS_INVALID_SENSE - invalid sense data sent back. 29963 * SSC_FLAGS_INVALID_PKT_REASON - invalid pkt-reason encountered. 29964 * SSC_FLAGS_INVALID_STATUS - invalid stat-code encountered. 29965 * SSC_FLAGS_INVALID_DATA - invalid data sent back. 29966 */ 29967 if (ssc->ssc_flags & ssc_invalid_flags) { 29968 if (ssc->ssc_flags & SSC_FLAGS_INVALID_SENSE) { 29969 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 29970 "cmd.disk.dev.uderr", uscsi_ena, devid, DDI_NOSLEEP, 29971 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 29972 "driver-assessment", DATA_TYPE_STRING, 29973 drv_assess == SD_FM_DRV_FATAL ? 29974 "fail" : assessment, 29975 "op-code", DATA_TYPE_UINT8, op_code, 29976 "cdb", DATA_TYPE_UINT8_ARRAY, 29977 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 29978 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 29979 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 29980 "pkt-stats", DATA_TYPE_UINT32, 29981 uscsi_pkt_statistics, 29982 "stat-code", DATA_TYPE_UINT8, 29983 ssc->ssc_uscsi_cmd->uscsi_status, 29984 "un-decode-info", DATA_TYPE_STRING, 29985 ssc->ssc_info, 29986 "un-decode-value", DATA_TYPE_UINT8_ARRAY, 29987 senlen, sensep, 29988 NULL); 29989 } else { 29990 /* 29991 * For other type of invalid data, the 29992 * un-decode-value field would be empty because the 29993 * un-decodable content could be seen from upper 29994 * level payload or inside un-decode-info. 29995 */ 29996 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 29997 "cmd.disk.dev.uderr", uscsi_ena, devid, DDI_NOSLEEP, 29998 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 29999 "driver-assessment", DATA_TYPE_STRING, 30000 drv_assess == SD_FM_DRV_FATAL ? 30001 "fail" : assessment, 30002 "op-code", DATA_TYPE_UINT8, op_code, 30003 "cdb", DATA_TYPE_UINT8_ARRAY, 30004 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30005 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 30006 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 30007 "pkt-stats", DATA_TYPE_UINT32, 30008 uscsi_pkt_statistics, 30009 "stat-code", DATA_TYPE_UINT8, 30010 ssc->ssc_uscsi_cmd->uscsi_status, 30011 "un-decode-info", DATA_TYPE_STRING, 30012 ssc->ssc_info, 30013 "un-decode-value", DATA_TYPE_UINT8_ARRAY, 30014 0, NULL, 30015 NULL); 30016 } 30017 ssc->ssc_flags &= ~ssc_invalid_flags; 30018 return; 30019 } 30020 30021 if (uscsi_pkt_reason != CMD_CMPLT || 30022 (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT)) { 30023 /* 30024 * pkt-reason != CMD_CMPLT or SSC_FLAGS_TRAN_ABORT was 30025 * set inside sd_start_cmds due to errors(bad packet or 30026 * fatal transport error), we should take it as a 30027 * transport error, so we post ereport.io.scsi.cmd.disk.tran. 30028 * driver-assessment will be set based on drv_assess. 30029 * We will set devid to NULL because it is a transport 30030 * error. 30031 */ 30032 if (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT) 30033 ssc->ssc_flags &= ~SSC_FLAGS_TRAN_ABORT; 30034 30035 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 30036 "cmd.disk.tran", uscsi_ena, NULL, DDI_NOSLEEP, FM_VERSION, 30037 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30038 "driver-assessment", DATA_TYPE_STRING, 30039 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment, 30040 "op-code", DATA_TYPE_UINT8, op_code, 30041 "cdb", DATA_TYPE_UINT8_ARRAY, 30042 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30043 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 30044 "pkt-state", DATA_TYPE_UINT8, uscsi_pkt_state, 30045 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics, 30046 NULL); 30047 } else { 30048 /* 30049 * If we got here, we have a completed command, and we need 30050 * to further investigate the sense data to see what kind 30051 * of ereport we should post. 30052 * Post ereport.io.scsi.cmd.disk.dev.rqs.merr 30053 * if sense-key == 0x3. 30054 * Post ereport.io.scsi.cmd.disk.dev.rqs.derr otherwise. 30055 * driver-assessment will be set based on the parameter 30056 * drv_assess. 30057 */ 30058 if (senlen > 0) { 30059 /* 30060 * Here we have sense data available. 30061 */ 30062 uint8_t sense_key; 30063 sense_key = scsi_sense_key(sensep); 30064 if (sense_key == 0x3) { 30065 /* 30066 * sense-key == 0x3(medium error), 30067 * driver-assessment should be "fatal" if 30068 * drv_assess is SD_FM_DRV_FATAL. 30069 */ 30070 scsi_fm_ereport_post(un->un_sd, 30071 uscsi_path_instance, 30072 "cmd.disk.dev.rqs.merr", 30073 uscsi_ena, devid, DDI_NOSLEEP, FM_VERSION, 30074 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30075 "driver-assessment", 30076 DATA_TYPE_STRING, 30077 drv_assess == SD_FM_DRV_FATAL ? 30078 "fatal" : assessment, 30079 "op-code", 30080 DATA_TYPE_UINT8, op_code, 30081 "cdb", 30082 DATA_TYPE_UINT8_ARRAY, cdblen, 30083 ssc->ssc_uscsi_cmd->uscsi_cdb, 30084 "pkt-reason", 30085 DATA_TYPE_UINT8, uscsi_pkt_reason, 30086 "pkt-state", 30087 DATA_TYPE_UINT8, uscsi_pkt_state, 30088 "pkt-stats", 30089 DATA_TYPE_UINT32, 30090 uscsi_pkt_statistics, 30091 "stat-code", 30092 DATA_TYPE_UINT8, 30093 ssc->ssc_uscsi_cmd->uscsi_status, 30094 "key", 30095 DATA_TYPE_UINT8, 30096 scsi_sense_key(sensep), 30097 "asc", 30098 DATA_TYPE_UINT8, 30099 scsi_sense_asc(sensep), 30100 "ascq", 30101 DATA_TYPE_UINT8, 30102 scsi_sense_ascq(sensep), 30103 "sense-data", 30104 DATA_TYPE_UINT8_ARRAY, 30105 senlen, sensep, 30106 "lba", 30107 DATA_TYPE_UINT64, 30108 ssc->ssc_uscsi_info->ui_lba, 30109 NULL); 30110 } else { 30111 /* 30112 * if sense-key == 0x4(hardware 30113 * error), driver-assessment should 30114 * be "fatal" if drv_assess is 30115 * SD_FM_DRV_FATAL. 30116 */ 30117 scsi_fm_ereport_post(un->un_sd, 30118 uscsi_path_instance, 30119 "cmd.disk.dev.rqs.derr", 30120 uscsi_ena, devid, DDI_NOSLEEP, 30121 FM_VERSION, 30122 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30123 "driver-assessment", 30124 DATA_TYPE_STRING, 30125 drv_assess == SD_FM_DRV_FATAL ? 30126 (sense_key == 0x4 ? 30127 "fatal" : "fail") : assessment, 30128 "op-code", 30129 DATA_TYPE_UINT8, op_code, 30130 "cdb", 30131 DATA_TYPE_UINT8_ARRAY, cdblen, 30132 ssc->ssc_uscsi_cmd->uscsi_cdb, 30133 "pkt-reason", 30134 DATA_TYPE_UINT8, uscsi_pkt_reason, 30135 "pkt-state", 30136 DATA_TYPE_UINT8, uscsi_pkt_state, 30137 "pkt-stats", 30138 DATA_TYPE_UINT32, 30139 uscsi_pkt_statistics, 30140 "stat-code", 30141 DATA_TYPE_UINT8, 30142 ssc->ssc_uscsi_cmd->uscsi_status, 30143 "key", 30144 DATA_TYPE_UINT8, 30145 scsi_sense_key(sensep), 30146 "asc", 30147 DATA_TYPE_UINT8, 30148 scsi_sense_asc(sensep), 30149 "ascq", 30150 DATA_TYPE_UINT8, 30151 scsi_sense_ascq(sensep), 30152 "sense-data", 30153 DATA_TYPE_UINT8_ARRAY, 30154 senlen, sensep, 30155 NULL); 30156 } 30157 } else { 30158 /* 30159 * Post ereport.io.scsi.cmd.disk.dev.serr if we got the 30160 * stat-code but with sense data unavailable. 30161 * driver-assessment will be set based on parameter 30162 * drv_assess. 30163 */ 30164 scsi_fm_ereport_post(un->un_sd, 30165 uscsi_path_instance, "cmd.disk.dev.serr", uscsi_ena, 30166 devid, DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 30167 FM_EREPORT_VERS0, 30168 "driver-assessment", DATA_TYPE_STRING, 30169 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment, 30170 "op-code", DATA_TYPE_UINT8, op_code, 30171 "cdb", 30172 DATA_TYPE_UINT8_ARRAY, 30173 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30174 "pkt-reason", 30175 DATA_TYPE_UINT8, uscsi_pkt_reason, 30176 "pkt-state", 30177 DATA_TYPE_UINT8, uscsi_pkt_state, 30178 "pkt-stats", 30179 DATA_TYPE_UINT32, uscsi_pkt_statistics, 30180 "stat-code", 30181 DATA_TYPE_UINT8, 30182 ssc->ssc_uscsi_cmd->uscsi_status, 30183 NULL); 30184 } 30185 } 30186 } 30187 30188 /* 30189 * Function: sd_ssc_extract_info 30190 * 30191 * Description: Extract information available to help generate ereport. 30192 * 30193 * Context: Kernel thread or interrupt context. 30194 */ 30195 static void 30196 sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un, struct scsi_pkt *pktp, 30197 struct buf *bp, struct sd_xbuf *xp) 30198 { 30199 ASSERT(un != NULL); 30200 ASSERT(pktp != NULL); 30201 ASSERT(bp != NULL); 30202 ASSERT(xp != NULL); 30203 ASSERT(ssc != NULL); 30204 ASSERT(mutex_owned(SD_MUTEX(un))); 30205 30206 size_t senlen = 0; 30207 union scsi_cdb *cdbp; 30208 int path_instance; 30209 struct uscsi_cmd *uscmd; 30210 30211 /* 30212 * Need scsi_cdb_size array to determine the cdb length. 30213 */ 30214 extern uchar_t scsi_cdb_size[]; 30215 30216 /* 30217 * Transfer the cdb buffer pointer here. 30218 */ 30219 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 30220 30221 ssc->ssc_uscsi_cmd->uscsi_cdblen = scsi_cdb_size[GETGROUP(cdbp)]; 30222 ssc->ssc_uscsi_cmd->uscsi_cdb = (caddr_t)cdbp; 30223 30224 /* 30225 * Transfer the sense data buffer pointer if sense data is available, 30226 * calculate the sense data length first. 30227 */ 30228 30229 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 30230 /* 30231 * For non-arq case, we will enter this branch. 30232 * The algorithm is excerpted from sd_handle_request_sense. 30233 */ 30234 if (!(xp->xb_sense_state & STATE_XARQ_DONE) && 30235 !(xp->xb_sense_state & STATE_ARQ_DONE)) { 30236 if (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK && 30237 xp->xb_sense_state == STATE_XFERRED_DATA) { 30238 if ((xp->xb_pkt_flags & SD_XB_USCSICMD) && 30239 uscmd->uscsi_rqlen > SENSE_LENGTH) 30240 senlen = MAX_SENSE_LENGTH - xp->xb_sense_resid; 30241 else 30242 senlen = SENSE_LENGTH - xp->xb_sense_resid; 30243 } 30244 } else { 30245 /* 30246 * For arq case, we will enter here. 30247 * The algorithm is excerpted from 30248 * sd_handle_auto_request_sense. 30249 */ 30250 if (xp->xb_sense_state & STATE_XARQ_DONE) { 30251 senlen = MAX_SENSE_LENGTH - xp->xb_sense_resid; 30252 } else { 30253 if (xp->xb_pkt_flags & SD_XB_USCSICMD) 30254 senlen = SENSE_LENGTH; 30255 else { 30256 if (xp->xb_sense_resid > SENSE_LENGTH) { 30257 senlen = SENSE_LENGTH; 30258 } else { 30259 senlen = SENSE_LENGTH - 30260 xp->xb_sense_resid; 30261 } 30262 } 30263 } 30264 } 30265 30266 ssc->ssc_uscsi_cmd->uscsi_rqlen = (senlen & 0xff); 30267 ssc->ssc_uscsi_cmd->uscsi_rqresid = 0; 30268 ssc->ssc_uscsi_cmd->uscsi_rqbuf = (caddr_t)xp->xb_sense_data; 30269 30270 ssc->ssc_uscsi_cmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 30271 30272 /* 30273 * Only transfer path_instance when scsi_pkt was properly allocated. 30274 */ 30275 path_instance = pktp->pkt_path_instance; 30276 if (scsi_pkt_allocated_correctly(pktp) && path_instance) 30277 ssc->ssc_uscsi_cmd->uscsi_path_instance = path_instance; 30278 else 30279 ssc->ssc_uscsi_cmd->uscsi_path_instance = 0; 30280 30281 /* 30282 * Copy in the other fields we may need when posting ereport. 30283 */ 30284 ssc->ssc_uscsi_info->ui_pkt_reason = pktp->pkt_reason; 30285 ssc->ssc_uscsi_info->ui_pkt_state = pktp->pkt_state; 30286 ssc->ssc_uscsi_info->ui_pkt_statistics = pktp->pkt_statistics; 30287 ssc->ssc_uscsi_info->ui_lba = (uint64_t)SD_GET_BLKNO(bp); 30288 30289 /* 30290 * To associate ereports of a single command execution flow, we 30291 * need a shared ena for a specific command. 30292 */ 30293 if (xp->xb_ena == 0) 30294 xp->xb_ena = fm_ena_generate(0, FM_ENA_FMT1); 30295 ssc->ssc_uscsi_info->ui_ena = xp->xb_ena; 30296 } 30297