1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * SCSI disk target driver. 29 */ 30 #include <sys/scsi/scsi.h> 31 #include <sys/dkbad.h> 32 #include <sys/dklabel.h> 33 #include <sys/dkio.h> 34 #include <sys/fdio.h> 35 #include <sys/cdio.h> 36 #include <sys/mhd.h> 37 #include <sys/vtoc.h> 38 #include <sys/dktp/fdisk.h> 39 #include <sys/kstat.h> 40 #include <sys/vtrace.h> 41 #include <sys/note.h> 42 #include <sys/thread.h> 43 #include <sys/proc.h> 44 #include <sys/efi_partition.h> 45 #include <sys/var.h> 46 #include <sys/aio_req.h> 47 48 #ifdef __lock_lint 49 #define _LP64 50 #define __amd64 51 #endif 52 53 #if (defined(__fibre)) 54 /* Note: is there a leadville version of the following? */ 55 #include <sys/fc4/fcal_linkapp.h> 56 #endif 57 #include <sys/taskq.h> 58 #include <sys/uuid.h> 59 #include <sys/byteorder.h> 60 #include <sys/sdt.h> 61 62 #include "sd_xbuf.h" 63 64 #include <sys/scsi/targets/sddef.h> 65 #include <sys/cmlb.h> 66 #include <sys/sysevent/eventdefs.h> 67 #include <sys/sysevent/dev.h> 68 69 #include <sys/fm/protocol.h> 70 71 /* 72 * Loadable module info. 73 */ 74 #if (defined(__fibre)) 75 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver" 76 char _depends_on[] = "misc/scsi misc/cmlb drv/fcp"; 77 #else /* !__fibre */ 78 #define SD_MODULE_NAME "SCSI Disk Driver" 79 char _depends_on[] = "misc/scsi misc/cmlb"; 80 #endif /* !__fibre */ 81 82 /* 83 * Define the interconnect type, to allow the driver to distinguish 84 * between parallel SCSI (sd) and fibre channel (ssd) behaviors. 85 * 86 * This is really for backward compatibility. In the future, the driver 87 * should actually check the "interconnect-type" property as reported by 88 * the HBA; however at present this property is not defined by all HBAs, 89 * so we will use this #define (1) to permit the driver to run in 90 * backward-compatibility mode; and (2) to print a notification message 91 * if an FC HBA does not support the "interconnect-type" property. The 92 * behavior of the driver will be to assume parallel SCSI behaviors unless 93 * the "interconnect-type" property is defined by the HBA **AND** has a 94 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or 95 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre 96 * Channel behaviors (as per the old ssd). (Note that the 97 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and 98 * will result in the driver assuming parallel SCSI behaviors.) 99 * 100 * (see common/sys/scsi/impl/services.h) 101 * 102 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default 103 * since some FC HBAs may already support that, and there is some code in 104 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the 105 * default would confuse that code, and besides things should work fine 106 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the 107 * "interconnect_type" property. 108 * 109 */ 110 #if (defined(__fibre)) 111 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE 112 #else 113 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL 114 #endif 115 116 /* 117 * The name of the driver, established from the module name in _init. 118 */ 119 static char *sd_label = NULL; 120 121 /* 122 * Driver name is unfortunately prefixed on some driver.conf properties. 123 */ 124 #if (defined(__fibre)) 125 #define sd_max_xfer_size ssd_max_xfer_size 126 #define sd_config_list ssd_config_list 127 static char *sd_max_xfer_size = "ssd_max_xfer_size"; 128 static char *sd_config_list = "ssd-config-list"; 129 #else 130 static char *sd_max_xfer_size = "sd_max_xfer_size"; 131 static char *sd_config_list = "sd-config-list"; 132 #endif 133 134 /* 135 * Driver global variables 136 */ 137 138 #if (defined(__fibre)) 139 /* 140 * These #defines are to avoid namespace collisions that occur because this 141 * code is currently used to compile two separate driver modules: sd and ssd. 142 * All global variables need to be treated this way (even if declared static) 143 * in order to allow the debugger to resolve the names properly. 144 * It is anticipated that in the near future the ssd module will be obsoleted, 145 * at which time this namespace issue should go away. 146 */ 147 #define sd_state ssd_state 148 #define sd_io_time ssd_io_time 149 #define sd_failfast_enable ssd_failfast_enable 150 #define sd_ua_retry_count ssd_ua_retry_count 151 #define sd_report_pfa ssd_report_pfa 152 #define sd_max_throttle ssd_max_throttle 153 #define sd_min_throttle ssd_min_throttle 154 #define sd_rot_delay ssd_rot_delay 155 156 #define sd_retry_on_reservation_conflict \ 157 ssd_retry_on_reservation_conflict 158 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay 159 #define sd_resv_conflict_name ssd_resv_conflict_name 160 161 #define sd_component_mask ssd_component_mask 162 #define sd_level_mask ssd_level_mask 163 #define sd_debug_un ssd_debug_un 164 #define sd_error_level ssd_error_level 165 166 #define sd_xbuf_active_limit ssd_xbuf_active_limit 167 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit 168 169 #define sd_tr ssd_tr 170 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout 171 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout 172 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable 173 #define sd_check_media_time ssd_check_media_time 174 #define sd_wait_cmds_complete ssd_wait_cmds_complete 175 #define sd_label_mutex ssd_label_mutex 176 #define sd_detach_mutex ssd_detach_mutex 177 #define sd_log_buf ssd_log_buf 178 #define sd_log_mutex ssd_log_mutex 179 180 #define sd_disk_table ssd_disk_table 181 #define sd_disk_table_size ssd_disk_table_size 182 #define sd_sense_mutex ssd_sense_mutex 183 #define sd_cdbtab ssd_cdbtab 184 185 #define sd_cb_ops ssd_cb_ops 186 #define sd_ops ssd_ops 187 #define sd_additional_codes ssd_additional_codes 188 #define sd_tgops ssd_tgops 189 190 #define sd_minor_data ssd_minor_data 191 #define sd_minor_data_efi ssd_minor_data_efi 192 193 #define sd_tq ssd_tq 194 #define sd_wmr_tq ssd_wmr_tq 195 #define sd_taskq_name ssd_taskq_name 196 #define sd_wmr_taskq_name ssd_wmr_taskq_name 197 #define sd_taskq_minalloc ssd_taskq_minalloc 198 #define sd_taskq_maxalloc ssd_taskq_maxalloc 199 200 #define sd_dump_format_string ssd_dump_format_string 201 202 #define sd_iostart_chain ssd_iostart_chain 203 #define sd_iodone_chain ssd_iodone_chain 204 205 #define sd_pm_idletime ssd_pm_idletime 206 207 #define sd_force_pm_supported ssd_force_pm_supported 208 209 #define sd_dtype_optical_bind ssd_dtype_optical_bind 210 211 #define sd_ssc_init ssd_ssc_init 212 #define sd_ssc_send ssd_ssc_send 213 #define sd_ssc_fini ssd_ssc_fini 214 #define sd_ssc_assessment ssd_ssc_assessment 215 #define sd_ssc_post ssd_ssc_post 216 #define sd_ssc_print ssd_ssc_print 217 #define sd_ssc_ereport_post ssd_ssc_ereport_post 218 #define sd_ssc_set_info ssd_ssc_set_info 219 #define sd_ssc_extract_info ssd_ssc_extract_info 220 221 #endif 222 223 #ifdef SDDEBUG 224 int sd_force_pm_supported = 0; 225 #endif /* SDDEBUG */ 226 227 void *sd_state = NULL; 228 int sd_io_time = SD_IO_TIME; 229 int sd_failfast_enable = 1; 230 int sd_ua_retry_count = SD_UA_RETRY_COUNT; 231 int sd_report_pfa = 1; 232 int sd_max_throttle = SD_MAX_THROTTLE; 233 int sd_min_throttle = SD_MIN_THROTTLE; 234 int sd_rot_delay = 4; /* Default 4ms Rotation delay */ 235 int sd_qfull_throttle_enable = TRUE; 236 237 int sd_retry_on_reservation_conflict = 1; 238 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 239 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay)) 240 241 static int sd_dtype_optical_bind = -1; 242 243 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */ 244 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict"; 245 246 /* 247 * Global data for debug logging. To enable debug printing, sd_component_mask 248 * and sd_level_mask should be set to the desired bit patterns as outlined in 249 * sddef.h. 250 */ 251 uint_t sd_component_mask = 0x0; 252 uint_t sd_level_mask = 0x0; 253 struct sd_lun *sd_debug_un = NULL; 254 uint_t sd_error_level = SCSI_ERR_RETRYABLE; 255 256 /* Note: these may go away in the future... */ 257 static uint32_t sd_xbuf_active_limit = 512; 258 static uint32_t sd_xbuf_reserve_limit = 16; 259 260 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 }; 261 262 /* 263 * Timer value used to reset the throttle after it has been reduced 264 * (typically in response to TRAN_BUSY or STATUS_QFULL) 265 */ 266 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT; 267 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT; 268 269 /* 270 * Interval value associated with the media change scsi watch. 271 */ 272 static int sd_check_media_time = 3000000; 273 274 /* 275 * Wait value used for in progress operations during a DDI_SUSPEND 276 */ 277 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE; 278 279 /* 280 * sd_label_mutex protects a static buffer used in the disk label 281 * component of the driver 282 */ 283 static kmutex_t sd_label_mutex; 284 285 /* 286 * sd_detach_mutex protects un_layer_count, un_detach_count, and 287 * un_opens_in_progress in the sd_lun structure. 288 */ 289 static kmutex_t sd_detach_mutex; 290 291 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex, 292 sd_lun::{un_layer_count un_detach_count un_opens_in_progress})) 293 294 /* 295 * Global buffer and mutex for debug logging 296 */ 297 static char sd_log_buf[1024]; 298 static kmutex_t sd_log_mutex; 299 300 /* 301 * Structs and globals for recording attached lun information. 302 * This maintains a chain. Each node in the chain represents a SCSI controller. 303 * The structure records the number of luns attached to each target connected 304 * with the controller. 305 * For parallel scsi device only. 306 */ 307 struct sd_scsi_hba_tgt_lun { 308 struct sd_scsi_hba_tgt_lun *next; 309 dev_info_t *pdip; 310 int nlun[NTARGETS_WIDE]; 311 }; 312 313 /* 314 * Flag to indicate the lun is attached or detached 315 */ 316 #define SD_SCSI_LUN_ATTACH 0 317 #define SD_SCSI_LUN_DETACH 1 318 319 static kmutex_t sd_scsi_target_lun_mutex; 320 static struct sd_scsi_hba_tgt_lun *sd_scsi_target_lun_head = NULL; 321 322 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 323 sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip)) 324 325 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 326 sd_scsi_target_lun_head)) 327 328 /* 329 * "Smart" Probe Caching structs, globals, #defines, etc. 330 * For parallel scsi and non-self-identify device only. 331 */ 332 333 /* 334 * The following resources and routines are implemented to support 335 * "smart" probing, which caches the scsi_probe() results in an array, 336 * in order to help avoid long probe times. 337 */ 338 struct sd_scsi_probe_cache { 339 struct sd_scsi_probe_cache *next; 340 dev_info_t *pdip; 341 int cache[NTARGETS_WIDE]; 342 }; 343 344 static kmutex_t sd_scsi_probe_cache_mutex; 345 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; 346 347 /* 348 * Really we only need protection on the head of the linked list, but 349 * better safe than sorry. 350 */ 351 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 352 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip)) 353 354 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 355 sd_scsi_probe_cache_head)) 356 357 /* 358 * Power attribute table 359 */ 360 static sd_power_attr_ss sd_pwr_ss = { 361 { "NAME=spindle-motor", "0=off", "1=on", NULL }, 362 {0, 100}, 363 {30, 0}, 364 {20000, 0} 365 }; 366 367 static sd_power_attr_pc sd_pwr_pc = { 368 { "NAME=spindle-motor", "0=stopped", "1=standby", "2=idle", 369 "3=active", NULL }, 370 {0, 0, 0, 100}, 371 {90, 90, 20, 0}, 372 {15000, 15000, 1000, 0} 373 }; 374 375 /* 376 * Power level to power condition 377 */ 378 static int sd_pl2pc[] = { 379 SD_TARGET_START_VALID, 380 SD_TARGET_STANDBY, 381 SD_TARGET_IDLE, 382 SD_TARGET_ACTIVE 383 }; 384 385 /* 386 * Vendor specific data name property declarations 387 */ 388 389 #if defined(__fibre) || defined(__i386) ||defined(__amd64) 390 391 static sd_tunables seagate_properties = { 392 SEAGATE_THROTTLE_VALUE, 393 0, 394 0, 395 0, 396 0, 397 0, 398 0, 399 0, 400 0 401 }; 402 403 404 static sd_tunables fujitsu_properties = { 405 FUJITSU_THROTTLE_VALUE, 406 0, 407 0, 408 0, 409 0, 410 0, 411 0, 412 0, 413 0 414 }; 415 416 static sd_tunables ibm_properties = { 417 IBM_THROTTLE_VALUE, 418 0, 419 0, 420 0, 421 0, 422 0, 423 0, 424 0, 425 0 426 }; 427 428 static sd_tunables purple_properties = { 429 PURPLE_THROTTLE_VALUE, 430 0, 431 0, 432 PURPLE_BUSY_RETRIES, 433 PURPLE_RESET_RETRY_COUNT, 434 PURPLE_RESERVE_RELEASE_TIME, 435 0, 436 0, 437 0 438 }; 439 440 static sd_tunables sve_properties = { 441 SVE_THROTTLE_VALUE, 442 0, 443 0, 444 SVE_BUSY_RETRIES, 445 SVE_RESET_RETRY_COUNT, 446 SVE_RESERVE_RELEASE_TIME, 447 SVE_MIN_THROTTLE_VALUE, 448 SVE_DISKSORT_DISABLED_FLAG, 449 0 450 }; 451 452 static sd_tunables maserati_properties = { 453 0, 454 0, 455 0, 456 0, 457 0, 458 0, 459 0, 460 MASERATI_DISKSORT_DISABLED_FLAG, 461 MASERATI_LUN_RESET_ENABLED_FLAG 462 }; 463 464 static sd_tunables pirus_properties = { 465 PIRUS_THROTTLE_VALUE, 466 0, 467 PIRUS_NRR_COUNT, 468 PIRUS_BUSY_RETRIES, 469 PIRUS_RESET_RETRY_COUNT, 470 0, 471 PIRUS_MIN_THROTTLE_VALUE, 472 PIRUS_DISKSORT_DISABLED_FLAG, 473 PIRUS_LUN_RESET_ENABLED_FLAG 474 }; 475 476 #endif 477 478 #if (defined(__sparc) && !defined(__fibre)) || \ 479 (defined(__i386) || defined(__amd64)) 480 481 482 static sd_tunables elite_properties = { 483 ELITE_THROTTLE_VALUE, 484 0, 485 0, 486 0, 487 0, 488 0, 489 0, 490 0, 491 0 492 }; 493 494 static sd_tunables st31200n_properties = { 495 ST31200N_THROTTLE_VALUE, 496 0, 497 0, 498 0, 499 0, 500 0, 501 0, 502 0, 503 0 504 }; 505 506 #endif /* Fibre or not */ 507 508 static sd_tunables lsi_properties_scsi = { 509 LSI_THROTTLE_VALUE, 510 0, 511 LSI_NOTREADY_RETRIES, 512 0, 513 0, 514 0, 515 0, 516 0, 517 0 518 }; 519 520 static sd_tunables symbios_properties = { 521 SYMBIOS_THROTTLE_VALUE, 522 0, 523 SYMBIOS_NOTREADY_RETRIES, 524 0, 525 0, 526 0, 527 0, 528 0, 529 0 530 }; 531 532 static sd_tunables lsi_properties = { 533 0, 534 0, 535 LSI_NOTREADY_RETRIES, 536 0, 537 0, 538 0, 539 0, 540 0, 541 0 542 }; 543 544 static sd_tunables lsi_oem_properties = { 545 0, 546 0, 547 LSI_OEM_NOTREADY_RETRIES, 548 0, 549 0, 550 0, 551 0, 552 0, 553 0, 554 1 555 }; 556 557 558 559 #if (defined(SD_PROP_TST)) 560 561 #define SD_TST_CTYPE_VAL CTYPE_CDROM 562 #define SD_TST_THROTTLE_VAL 16 563 #define SD_TST_NOTREADY_VAL 12 564 #define SD_TST_BUSY_VAL 60 565 #define SD_TST_RST_RETRY_VAL 36 566 #define SD_TST_RSV_REL_TIME 60 567 568 static sd_tunables tst_properties = { 569 SD_TST_THROTTLE_VAL, 570 SD_TST_CTYPE_VAL, 571 SD_TST_NOTREADY_VAL, 572 SD_TST_BUSY_VAL, 573 SD_TST_RST_RETRY_VAL, 574 SD_TST_RSV_REL_TIME, 575 0, 576 0, 577 0 578 }; 579 #endif 580 581 /* This is similar to the ANSI toupper implementation */ 582 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C)) 583 584 /* 585 * Static Driver Configuration Table 586 * 587 * This is the table of disks which need throttle adjustment (or, perhaps 588 * something else as defined by the flags at a future time.) device_id 589 * is a string consisting of concatenated vid (vendor), pid (product/model) 590 * and revision strings as defined in the scsi_inquiry structure. Offsets of 591 * the parts of the string are as defined by the sizes in the scsi_inquiry 592 * structure. Device type is searched as far as the device_id string is 593 * defined. Flags defines which values are to be set in the driver from the 594 * properties list. 595 * 596 * Entries below which begin and end with a "*" are a special case. 597 * These do not have a specific vendor, and the string which follows 598 * can appear anywhere in the 16 byte PID portion of the inquiry data. 599 * 600 * Entries below which begin and end with a " " (blank) are a special 601 * case. The comparison function will treat multiple consecutive blanks 602 * as equivalent to a single blank. For example, this causes a 603 * sd_disk_table entry of " NEC CDROM " to match a device's id string 604 * of "NEC CDROM". 605 * 606 * Note: The MD21 controller type has been obsoleted. 607 * ST318202F is a Legacy device 608 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been 609 * made with an FC connection. The entries here are a legacy. 610 */ 611 static sd_disk_config_t sd_disk_table[] = { 612 #if defined(__fibre) || defined(__i386) || defined(__amd64) 613 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 614 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 615 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 616 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 617 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 618 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 619 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 620 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 621 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 622 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 623 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 624 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 625 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 626 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 627 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 628 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 629 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 630 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 631 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 632 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 633 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 634 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 635 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 636 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties }, 637 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties }, 638 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties }, 639 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties }, 640 { "IBM 1724-100", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 641 { "IBM 1726-2xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 642 { "IBM 1726-22x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 643 { "IBM 1726-4xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 644 { "IBM 1726-42x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 645 { "IBM 1726-3xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 646 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 647 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 648 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 649 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 650 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 651 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 652 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 653 { "IBM 1814", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 654 { "IBM 1814-200", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 655 { "IBM 1818", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 656 { "DELL MD3000", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 657 { "DELL MD3000i", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 658 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 659 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 660 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 661 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 662 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT | 663 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 664 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT | 665 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 666 { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties }, 667 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, 668 { "SUN T3", SD_CONF_BSET_THROTTLE | 669 SD_CONF_BSET_BSY_RETRY_COUNT| 670 SD_CONF_BSET_RST_RETRIES| 671 SD_CONF_BSET_RSV_REL_TIME, 672 &purple_properties }, 673 { "SUN SESS01", SD_CONF_BSET_THROTTLE | 674 SD_CONF_BSET_BSY_RETRY_COUNT| 675 SD_CONF_BSET_RST_RETRIES| 676 SD_CONF_BSET_RSV_REL_TIME| 677 SD_CONF_BSET_MIN_THROTTLE| 678 SD_CONF_BSET_DISKSORT_DISABLED, 679 &sve_properties }, 680 { "SUN T4", SD_CONF_BSET_THROTTLE | 681 SD_CONF_BSET_BSY_RETRY_COUNT| 682 SD_CONF_BSET_RST_RETRIES| 683 SD_CONF_BSET_RSV_REL_TIME, 684 &purple_properties }, 685 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED | 686 SD_CONF_BSET_LUN_RESET_ENABLED, 687 &maserati_properties }, 688 { "SUN SE6920", SD_CONF_BSET_THROTTLE | 689 SD_CONF_BSET_NRR_COUNT| 690 SD_CONF_BSET_BSY_RETRY_COUNT| 691 SD_CONF_BSET_RST_RETRIES| 692 SD_CONF_BSET_MIN_THROTTLE| 693 SD_CONF_BSET_DISKSORT_DISABLED| 694 SD_CONF_BSET_LUN_RESET_ENABLED, 695 &pirus_properties }, 696 { "SUN SE6940", SD_CONF_BSET_THROTTLE | 697 SD_CONF_BSET_NRR_COUNT| 698 SD_CONF_BSET_BSY_RETRY_COUNT| 699 SD_CONF_BSET_RST_RETRIES| 700 SD_CONF_BSET_MIN_THROTTLE| 701 SD_CONF_BSET_DISKSORT_DISABLED| 702 SD_CONF_BSET_LUN_RESET_ENABLED, 703 &pirus_properties }, 704 { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE | 705 SD_CONF_BSET_NRR_COUNT| 706 SD_CONF_BSET_BSY_RETRY_COUNT| 707 SD_CONF_BSET_RST_RETRIES| 708 SD_CONF_BSET_MIN_THROTTLE| 709 SD_CONF_BSET_DISKSORT_DISABLED| 710 SD_CONF_BSET_LUN_RESET_ENABLED, 711 &pirus_properties }, 712 { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE | 713 SD_CONF_BSET_NRR_COUNT| 714 SD_CONF_BSET_BSY_RETRY_COUNT| 715 SD_CONF_BSET_RST_RETRIES| 716 SD_CONF_BSET_MIN_THROTTLE| 717 SD_CONF_BSET_DISKSORT_DISABLED| 718 SD_CONF_BSET_LUN_RESET_ENABLED, 719 &pirus_properties }, 720 { "SUN PSX1000", SD_CONF_BSET_THROTTLE | 721 SD_CONF_BSET_NRR_COUNT| 722 SD_CONF_BSET_BSY_RETRY_COUNT| 723 SD_CONF_BSET_RST_RETRIES| 724 SD_CONF_BSET_MIN_THROTTLE| 725 SD_CONF_BSET_DISKSORT_DISABLED| 726 SD_CONF_BSET_LUN_RESET_ENABLED, 727 &pirus_properties }, 728 { "SUN SE6330", SD_CONF_BSET_THROTTLE | 729 SD_CONF_BSET_NRR_COUNT| 730 SD_CONF_BSET_BSY_RETRY_COUNT| 731 SD_CONF_BSET_RST_RETRIES| 732 SD_CONF_BSET_MIN_THROTTLE| 733 SD_CONF_BSET_DISKSORT_DISABLED| 734 SD_CONF_BSET_LUN_RESET_ENABLED, 735 &pirus_properties }, 736 { "SUN STK6580_6780", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 737 { "SUN SUN_6180", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 738 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 739 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 740 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 741 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 742 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties }, 743 #endif /* fibre or NON-sparc platforms */ 744 #if ((defined(__sparc) && !defined(__fibre)) ||\ 745 (defined(__i386) || defined(__amd64))) 746 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties }, 747 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties }, 748 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL }, 749 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL }, 750 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL }, 751 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL }, 752 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, 753 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, 754 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, 755 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, 756 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, 757 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL }, 758 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT, 759 &symbios_properties }, 760 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT, 761 &lsi_properties_scsi }, 762 #if defined(__i386) || defined(__amd64) 763 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD 764 | SD_CONF_BSET_READSUB_BCD 765 | SD_CONF_BSET_READ_TOC_ADDR_BCD 766 | SD_CONF_BSET_NO_READ_HEADER 767 | SD_CONF_BSET_READ_CD_XD4), NULL }, 768 769 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD 770 | SD_CONF_BSET_READSUB_BCD 771 | SD_CONF_BSET_READ_TOC_ADDR_BCD 772 | SD_CONF_BSET_NO_READ_HEADER 773 | SD_CONF_BSET_READ_CD_XD4), NULL }, 774 #endif /* __i386 || __amd64 */ 775 #endif /* sparc NON-fibre or NON-sparc platforms */ 776 777 #if (defined(SD_PROP_TST)) 778 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE 779 | SD_CONF_BSET_CTYPE 780 | SD_CONF_BSET_NRR_COUNT 781 | SD_CONF_BSET_FAB_DEVID 782 | SD_CONF_BSET_NOCACHE 783 | SD_CONF_BSET_BSY_RETRY_COUNT 784 | SD_CONF_BSET_PLAYMSF_BCD 785 | SD_CONF_BSET_READSUB_BCD 786 | SD_CONF_BSET_READ_TOC_TRK_BCD 787 | SD_CONF_BSET_READ_TOC_ADDR_BCD 788 | SD_CONF_BSET_NO_READ_HEADER 789 | SD_CONF_BSET_READ_CD_XD4 790 | SD_CONF_BSET_RST_RETRIES 791 | SD_CONF_BSET_RSV_REL_TIME 792 | SD_CONF_BSET_TUR_CHECK), &tst_properties}, 793 #endif 794 }; 795 796 static const int sd_disk_table_size = 797 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t); 798 799 800 801 #define SD_INTERCONNECT_PARALLEL 0 802 #define SD_INTERCONNECT_FABRIC 1 803 #define SD_INTERCONNECT_FIBRE 2 804 #define SD_INTERCONNECT_SSA 3 805 #define SD_INTERCONNECT_SATA 4 806 #define SD_INTERCONNECT_SAS 5 807 808 #define SD_IS_PARALLEL_SCSI(un) \ 809 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL) 810 #define SD_IS_SERIAL(un) \ 811 (((un)->un_interconnect_type == SD_INTERCONNECT_SATA) ||\ 812 ((un)->un_interconnect_type == SD_INTERCONNECT_SAS)) 813 814 /* 815 * Definitions used by device id registration routines 816 */ 817 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */ 818 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */ 819 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */ 820 821 static kmutex_t sd_sense_mutex = {0}; 822 823 /* 824 * Macros for updates of the driver state 825 */ 826 #define New_state(un, s) \ 827 (un)->un_last_state = (un)->un_state, (un)->un_state = (s) 828 #define Restore_state(un) \ 829 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); } 830 831 static struct sd_cdbinfo sd_cdbtab[] = { 832 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, }, 833 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, }, 834 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, }, 835 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, }, 836 }; 837 838 /* 839 * Specifies the number of seconds that must have elapsed since the last 840 * cmd. has completed for a device to be declared idle to the PM framework. 841 */ 842 static int sd_pm_idletime = 1; 843 844 /* 845 * Internal function prototypes 846 */ 847 848 #if (defined(__fibre)) 849 /* 850 * These #defines are to avoid namespace collisions that occur because this 851 * code is currently used to compile two separate driver modules: sd and ssd. 852 * All function names need to be treated this way (even if declared static) 853 * in order to allow the debugger to resolve the names properly. 854 * It is anticipated that in the near future the ssd module will be obsoleted, 855 * at which time this ugliness should go away. 856 */ 857 #define sd_log_trace ssd_log_trace 858 #define sd_log_info ssd_log_info 859 #define sd_log_err ssd_log_err 860 #define sdprobe ssdprobe 861 #define sdinfo ssdinfo 862 #define sd_prop_op ssd_prop_op 863 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init 864 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini 865 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache 866 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache 867 #define sd_scsi_target_lun_init ssd_scsi_target_lun_init 868 #define sd_scsi_target_lun_fini ssd_scsi_target_lun_fini 869 #define sd_scsi_get_target_lun_count ssd_scsi_get_target_lun_count 870 #define sd_scsi_update_lun_on_target ssd_scsi_update_lun_on_target 871 #define sd_spin_up_unit ssd_spin_up_unit 872 #define sd_enable_descr_sense ssd_enable_descr_sense 873 #define sd_reenable_dsense_task ssd_reenable_dsense_task 874 #define sd_set_mmc_caps ssd_set_mmc_caps 875 #define sd_read_unit_properties ssd_read_unit_properties 876 #define sd_process_sdconf_file ssd_process_sdconf_file 877 #define sd_process_sdconf_table ssd_process_sdconf_table 878 #define sd_sdconf_id_match ssd_sdconf_id_match 879 #define sd_blank_cmp ssd_blank_cmp 880 #define sd_chk_vers1_data ssd_chk_vers1_data 881 #define sd_set_vers1_properties ssd_set_vers1_properties 882 883 #define sd_get_physical_geometry ssd_get_physical_geometry 884 #define sd_get_virtual_geometry ssd_get_virtual_geometry 885 #define sd_update_block_info ssd_update_block_info 886 #define sd_register_devid ssd_register_devid 887 #define sd_get_devid ssd_get_devid 888 #define sd_create_devid ssd_create_devid 889 #define sd_write_deviceid ssd_write_deviceid 890 #define sd_check_vpd_page_support ssd_check_vpd_page_support 891 #define sd_setup_pm ssd_setup_pm 892 #define sd_create_pm_components ssd_create_pm_components 893 #define sd_ddi_suspend ssd_ddi_suspend 894 #define sd_ddi_resume ssd_ddi_resume 895 #define sd_pm_state_change ssd_pm_state_change 896 #define sdpower ssdpower 897 #define sdattach ssdattach 898 #define sddetach ssddetach 899 #define sd_unit_attach ssd_unit_attach 900 #define sd_unit_detach ssd_unit_detach 901 #define sd_set_unit_attributes ssd_set_unit_attributes 902 #define sd_create_errstats ssd_create_errstats 903 #define sd_set_errstats ssd_set_errstats 904 #define sd_set_pstats ssd_set_pstats 905 #define sddump ssddump 906 #define sd_scsi_poll ssd_scsi_poll 907 #define sd_send_polled_RQS ssd_send_polled_RQS 908 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll 909 #define sd_init_event_callbacks ssd_init_event_callbacks 910 #define sd_event_callback ssd_event_callback 911 #define sd_cache_control ssd_cache_control 912 #define sd_get_write_cache_enabled ssd_get_write_cache_enabled 913 #define sd_get_nv_sup ssd_get_nv_sup 914 #define sd_make_device ssd_make_device 915 #define sdopen ssdopen 916 #define sdclose ssdclose 917 #define sd_ready_and_valid ssd_ready_and_valid 918 #define sdmin ssdmin 919 #define sdread ssdread 920 #define sdwrite ssdwrite 921 #define sdaread ssdaread 922 #define sdawrite ssdawrite 923 #define sdstrategy ssdstrategy 924 #define sdioctl ssdioctl 925 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart 926 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart 927 #define sd_checksum_iostart ssd_checksum_iostart 928 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart 929 #define sd_pm_iostart ssd_pm_iostart 930 #define sd_core_iostart ssd_core_iostart 931 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone 932 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone 933 #define sd_checksum_iodone ssd_checksum_iodone 934 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone 935 #define sd_pm_iodone ssd_pm_iodone 936 #define sd_initpkt_for_buf ssd_initpkt_for_buf 937 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf 938 #define sd_setup_rw_pkt ssd_setup_rw_pkt 939 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt 940 #define sd_buf_iodone ssd_buf_iodone 941 #define sd_uscsi_strategy ssd_uscsi_strategy 942 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi 943 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi 944 #define sd_uscsi_iodone ssd_uscsi_iodone 945 #define sd_xbuf_strategy ssd_xbuf_strategy 946 #define sd_xbuf_init ssd_xbuf_init 947 #define sd_pm_entry ssd_pm_entry 948 #define sd_pm_exit ssd_pm_exit 949 950 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler 951 #define sd_pm_timeout_handler ssd_pm_timeout_handler 952 953 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq 954 #define sdintr ssdintr 955 #define sd_start_cmds ssd_start_cmds 956 #define sd_send_scsi_cmd ssd_send_scsi_cmd 957 #define sd_bioclone_alloc ssd_bioclone_alloc 958 #define sd_bioclone_free ssd_bioclone_free 959 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc 960 #define sd_shadow_buf_free ssd_shadow_buf_free 961 #define sd_print_transport_rejected_message \ 962 ssd_print_transport_rejected_message 963 #define sd_retry_command ssd_retry_command 964 #define sd_set_retry_bp ssd_set_retry_bp 965 #define sd_send_request_sense_command ssd_send_request_sense_command 966 #define sd_start_retry_command ssd_start_retry_command 967 #define sd_start_direct_priority_command \ 968 ssd_start_direct_priority_command 969 #define sd_return_failed_command ssd_return_failed_command 970 #define sd_return_failed_command_no_restart \ 971 ssd_return_failed_command_no_restart 972 #define sd_return_command ssd_return_command 973 #define sd_sync_with_callback ssd_sync_with_callback 974 #define sdrunout ssdrunout 975 #define sd_mark_rqs_busy ssd_mark_rqs_busy 976 #define sd_mark_rqs_idle ssd_mark_rqs_idle 977 #define sd_reduce_throttle ssd_reduce_throttle 978 #define sd_restore_throttle ssd_restore_throttle 979 #define sd_print_incomplete_msg ssd_print_incomplete_msg 980 #define sd_init_cdb_limits ssd_init_cdb_limits 981 #define sd_pkt_status_good ssd_pkt_status_good 982 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition 983 #define sd_pkt_status_busy ssd_pkt_status_busy 984 #define sd_pkt_status_reservation_conflict \ 985 ssd_pkt_status_reservation_conflict 986 #define sd_pkt_status_qfull ssd_pkt_status_qfull 987 #define sd_handle_request_sense ssd_handle_request_sense 988 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense 989 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg 990 #define sd_validate_sense_data ssd_validate_sense_data 991 #define sd_decode_sense ssd_decode_sense 992 #define sd_print_sense_msg ssd_print_sense_msg 993 #define sd_sense_key_no_sense ssd_sense_key_no_sense 994 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error 995 #define sd_sense_key_not_ready ssd_sense_key_not_ready 996 #define sd_sense_key_medium_or_hardware_error \ 997 ssd_sense_key_medium_or_hardware_error 998 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request 999 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention 1000 #define sd_sense_key_fail_command ssd_sense_key_fail_command 1001 #define sd_sense_key_blank_check ssd_sense_key_blank_check 1002 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command 1003 #define sd_sense_key_default ssd_sense_key_default 1004 #define sd_print_retry_msg ssd_print_retry_msg 1005 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg 1006 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete 1007 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err 1008 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset 1009 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted 1010 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout 1011 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free 1012 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject 1013 #define sd_pkt_reason_default ssd_pkt_reason_default 1014 #define sd_reset_target ssd_reset_target 1015 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback 1016 #define sd_start_stop_unit_task ssd_start_stop_unit_task 1017 #define sd_taskq_create ssd_taskq_create 1018 #define sd_taskq_delete ssd_taskq_delete 1019 #define sd_target_change_task ssd_target_change_task 1020 #define sd_log_lun_expansion_event ssd_log_lun_expansion_event 1021 #define sd_media_change_task ssd_media_change_task 1022 #define sd_handle_mchange ssd_handle_mchange 1023 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK 1024 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY 1025 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16 1026 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION 1027 #define sd_send_scsi_feature_GET_CONFIGURATION \ 1028 sd_send_scsi_feature_GET_CONFIGURATION 1029 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT 1030 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY 1031 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY 1032 #define sd_send_scsi_PERSISTENT_RESERVE_IN \ 1033 ssd_send_scsi_PERSISTENT_RESERVE_IN 1034 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \ 1035 ssd_send_scsi_PERSISTENT_RESERVE_OUT 1036 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE 1037 #define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \ 1038 ssd_send_scsi_SYNCHRONIZE_CACHE_biodone 1039 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE 1040 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT 1041 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR 1042 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE 1043 #define sd_alloc_rqs ssd_alloc_rqs 1044 #define sd_free_rqs ssd_free_rqs 1045 #define sd_dump_memory ssd_dump_memory 1046 #define sd_get_media_info ssd_get_media_info 1047 #define sd_get_media_info_ext ssd_get_media_info_ext 1048 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info 1049 #define sd_nvpair_str_decode ssd_nvpair_str_decode 1050 #define sd_strtok_r ssd_strtok_r 1051 #define sd_set_properties ssd_set_properties 1052 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf 1053 #define sd_setup_next_xfer ssd_setup_next_xfer 1054 #define sd_dkio_get_temp ssd_dkio_get_temp 1055 #define sd_check_mhd ssd_check_mhd 1056 #define sd_mhd_watch_cb ssd_mhd_watch_cb 1057 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete 1058 #define sd_sname ssd_sname 1059 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover 1060 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread 1061 #define sd_take_ownership ssd_take_ownership 1062 #define sd_reserve_release ssd_reserve_release 1063 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req 1064 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb 1065 #define sd_persistent_reservation_in_read_keys \ 1066 ssd_persistent_reservation_in_read_keys 1067 #define sd_persistent_reservation_in_read_resv \ 1068 ssd_persistent_reservation_in_read_resv 1069 #define sd_mhdioc_takeown ssd_mhdioc_takeown 1070 #define sd_mhdioc_failfast ssd_mhdioc_failfast 1071 #define sd_mhdioc_release ssd_mhdioc_release 1072 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid 1073 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys 1074 #define sd_mhdioc_inresv ssd_mhdioc_inresv 1075 #define sr_change_blkmode ssr_change_blkmode 1076 #define sr_change_speed ssr_change_speed 1077 #define sr_atapi_change_speed ssr_atapi_change_speed 1078 #define sr_pause_resume ssr_pause_resume 1079 #define sr_play_msf ssr_play_msf 1080 #define sr_play_trkind ssr_play_trkind 1081 #define sr_read_all_subcodes ssr_read_all_subcodes 1082 #define sr_read_subchannel ssr_read_subchannel 1083 #define sr_read_tocentry ssr_read_tocentry 1084 #define sr_read_tochdr ssr_read_tochdr 1085 #define sr_read_cdda ssr_read_cdda 1086 #define sr_read_cdxa ssr_read_cdxa 1087 #define sr_read_mode1 ssr_read_mode1 1088 #define sr_read_mode2 ssr_read_mode2 1089 #define sr_read_cd_mode2 ssr_read_cd_mode2 1090 #define sr_sector_mode ssr_sector_mode 1091 #define sr_eject ssr_eject 1092 #define sr_ejected ssr_ejected 1093 #define sr_check_wp ssr_check_wp 1094 #define sd_check_media ssd_check_media 1095 #define sd_media_watch_cb ssd_media_watch_cb 1096 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast 1097 #define sr_volume_ctrl ssr_volume_ctrl 1098 #define sr_read_sony_session_offset ssr_read_sony_session_offset 1099 #define sd_log_page_supported ssd_log_page_supported 1100 #define sd_check_for_writable_cd ssd_check_for_writable_cd 1101 #define sd_wm_cache_constructor ssd_wm_cache_constructor 1102 #define sd_wm_cache_destructor ssd_wm_cache_destructor 1103 #define sd_range_lock ssd_range_lock 1104 #define sd_get_range ssd_get_range 1105 #define sd_free_inlist_wmap ssd_free_inlist_wmap 1106 #define sd_range_unlock ssd_range_unlock 1107 #define sd_read_modify_write_task ssd_read_modify_write_task 1108 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw 1109 1110 #define sd_iostart_chain ssd_iostart_chain 1111 #define sd_iodone_chain ssd_iodone_chain 1112 #define sd_initpkt_map ssd_initpkt_map 1113 #define sd_destroypkt_map ssd_destroypkt_map 1114 #define sd_chain_type_map ssd_chain_type_map 1115 #define sd_chain_index_map ssd_chain_index_map 1116 1117 #define sd_failfast_flushctl ssd_failfast_flushctl 1118 #define sd_failfast_flushq ssd_failfast_flushq 1119 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback 1120 1121 #define sd_is_lsi ssd_is_lsi 1122 #define sd_tg_rdwr ssd_tg_rdwr 1123 #define sd_tg_getinfo ssd_tg_getinfo 1124 #define sd_rmw_msg_print_handler ssd_rmw_msg_print_handler 1125 1126 #endif /* #if (defined(__fibre)) */ 1127 1128 1129 int _init(void); 1130 int _fini(void); 1131 int _info(struct modinfo *modinfop); 1132 1133 /*PRINTFLIKE3*/ 1134 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1135 /*PRINTFLIKE3*/ 1136 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1137 /*PRINTFLIKE3*/ 1138 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1139 1140 static int sdprobe(dev_info_t *devi); 1141 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 1142 void **result); 1143 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1144 int mod_flags, char *name, caddr_t valuep, int *lengthp); 1145 1146 /* 1147 * Smart probe for parallel scsi 1148 */ 1149 static void sd_scsi_probe_cache_init(void); 1150 static void sd_scsi_probe_cache_fini(void); 1151 static void sd_scsi_clear_probe_cache(void); 1152 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)()); 1153 1154 /* 1155 * Attached luns on target for parallel scsi 1156 */ 1157 static void sd_scsi_target_lun_init(void); 1158 static void sd_scsi_target_lun_fini(void); 1159 static int sd_scsi_get_target_lun_count(dev_info_t *dip, int target); 1160 static void sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag); 1161 1162 static int sd_spin_up_unit(sd_ssc_t *ssc); 1163 1164 /* 1165 * Using sd_ssc_init to establish sd_ssc_t struct 1166 * Using sd_ssc_send to send uscsi internal command 1167 * Using sd_ssc_fini to free sd_ssc_t struct 1168 */ 1169 static sd_ssc_t *sd_ssc_init(struct sd_lun *un); 1170 static int sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, 1171 int flag, enum uio_seg dataspace, int path_flag); 1172 static void sd_ssc_fini(sd_ssc_t *ssc); 1173 1174 /* 1175 * Using sd_ssc_assessment to set correct type-of-assessment 1176 * Using sd_ssc_post to post ereport & system log 1177 * sd_ssc_post will call sd_ssc_print to print system log 1178 * sd_ssc_post will call sd_ssd_ereport_post to post ereport 1179 */ 1180 static void sd_ssc_assessment(sd_ssc_t *ssc, 1181 enum sd_type_assessment tp_assess); 1182 1183 static void sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess); 1184 static void sd_ssc_print(sd_ssc_t *ssc, int sd_severity); 1185 static void sd_ssc_ereport_post(sd_ssc_t *ssc, 1186 enum sd_driver_assessment drv_assess); 1187 1188 /* 1189 * Using sd_ssc_set_info to mark an un-decodable-data error. 1190 * Using sd_ssc_extract_info to transfer information from internal 1191 * data structures to sd_ssc_t. 1192 */ 1193 static void sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, uint_t comp, 1194 const char *fmt, ...); 1195 static void sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un, 1196 struct scsi_pkt *pktp, struct buf *bp, struct sd_xbuf *xp); 1197 1198 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1199 enum uio_seg dataspace, int path_flag); 1200 1201 #ifdef _LP64 1202 static void sd_enable_descr_sense(sd_ssc_t *ssc); 1203 static void sd_reenable_dsense_task(void *arg); 1204 #endif /* _LP64 */ 1205 1206 static void sd_set_mmc_caps(sd_ssc_t *ssc); 1207 1208 static void sd_read_unit_properties(struct sd_lun *un); 1209 static int sd_process_sdconf_file(struct sd_lun *un); 1210 static void sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str); 1211 static char *sd_strtok_r(char *string, const char *sepset, char **lasts); 1212 static void sd_set_properties(struct sd_lun *un, char *name, char *value); 1213 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags, 1214 int *data_list, sd_tunables *values); 1215 static void sd_process_sdconf_table(struct sd_lun *un); 1216 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen); 1217 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen); 1218 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 1219 int list_len, char *dataname_ptr); 1220 static void sd_set_vers1_properties(struct sd_lun *un, int flags, 1221 sd_tunables *prop_list); 1222 1223 static void sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi, 1224 int reservation_flag); 1225 static int sd_get_devid(sd_ssc_t *ssc); 1226 static ddi_devid_t sd_create_devid(sd_ssc_t *ssc); 1227 static int sd_write_deviceid(sd_ssc_t *ssc); 1228 static int sd_get_devid_page(struct sd_lun *un, uchar_t *wwn, int *len); 1229 static int sd_check_vpd_page_support(sd_ssc_t *ssc); 1230 1231 static void sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi); 1232 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); 1233 1234 static int sd_ddi_suspend(dev_info_t *devi); 1235 static int sd_ddi_resume(dev_info_t *devi); 1236 static int sd_pm_state_change(struct sd_lun *un, int level, int flag); 1237 static int sdpower(dev_info_t *devi, int component, int level); 1238 1239 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 1240 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 1241 static int sd_unit_attach(dev_info_t *devi); 1242 static int sd_unit_detach(dev_info_t *devi); 1243 1244 static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi); 1245 static void sd_create_errstats(struct sd_lun *un, int instance); 1246 static void sd_set_errstats(struct sd_lun *un); 1247 static void sd_set_pstats(struct sd_lun *un); 1248 1249 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 1250 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt); 1251 static int sd_send_polled_RQS(struct sd_lun *un); 1252 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt); 1253 1254 #if (defined(__fibre)) 1255 /* 1256 * Event callbacks (photon) 1257 */ 1258 static void sd_init_event_callbacks(struct sd_lun *un); 1259 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *); 1260 #endif 1261 1262 /* 1263 * Defines for sd_cache_control 1264 */ 1265 1266 #define SD_CACHE_ENABLE 1 1267 #define SD_CACHE_DISABLE 0 1268 #define SD_CACHE_NOCHANGE -1 1269 1270 static int sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag); 1271 static int sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled); 1272 static void sd_get_nv_sup(sd_ssc_t *ssc); 1273 static dev_t sd_make_device(dev_info_t *devi); 1274 1275 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize, 1276 uint64_t capacity); 1277 1278 /* 1279 * Driver entry point functions. 1280 */ 1281 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 1282 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 1283 static int sd_ready_and_valid(sd_ssc_t *ssc, int part); 1284 1285 static void sdmin(struct buf *bp); 1286 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p); 1287 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 1288 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1289 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1290 1291 static int sdstrategy(struct buf *bp); 1292 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 1293 1294 /* 1295 * Function prototypes for layering functions in the iostart chain. 1296 */ 1297 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un, 1298 struct buf *bp); 1299 static void sd_mapblocksize_iostart(int index, struct sd_lun *un, 1300 struct buf *bp); 1301 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp); 1302 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un, 1303 struct buf *bp); 1304 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp); 1305 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp); 1306 1307 /* 1308 * Function prototypes for layering functions in the iodone chain. 1309 */ 1310 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp); 1311 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp); 1312 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un, 1313 struct buf *bp); 1314 static void sd_mapblocksize_iodone(int index, struct sd_lun *un, 1315 struct buf *bp); 1316 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp); 1317 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un, 1318 struct buf *bp); 1319 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp); 1320 1321 /* 1322 * Prototypes for functions to support buf(9S) based IO. 1323 */ 1324 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg); 1325 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **); 1326 static void sd_destroypkt_for_buf(struct buf *); 1327 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp, 1328 struct buf *bp, int flags, 1329 int (*callback)(caddr_t), caddr_t callback_arg, 1330 diskaddr_t lba, uint32_t blockcount); 1331 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp, 1332 struct buf *bp, diskaddr_t lba, uint32_t blockcount); 1333 1334 /* 1335 * Prototypes for functions to support USCSI IO. 1336 */ 1337 static int sd_uscsi_strategy(struct buf *bp); 1338 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **); 1339 static void sd_destroypkt_for_uscsi(struct buf *); 1340 1341 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 1342 uchar_t chain_type, void *pktinfop); 1343 1344 static int sd_pm_entry(struct sd_lun *un); 1345 static void sd_pm_exit(struct sd_lun *un); 1346 1347 static void sd_pm_idletimeout_handler(void *arg); 1348 1349 /* 1350 * sd_core internal functions (used at the sd_core_io layer). 1351 */ 1352 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp); 1353 static void sdintr(struct scsi_pkt *pktp); 1354 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp); 1355 1356 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1357 enum uio_seg dataspace, int path_flag); 1358 1359 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen, 1360 daddr_t blkno, int (*func)(struct buf *)); 1361 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen, 1362 uint_t bflags, daddr_t blkno, int (*func)(struct buf *)); 1363 static void sd_bioclone_free(struct buf *bp); 1364 static void sd_shadow_buf_free(struct buf *bp); 1365 1366 static void sd_print_transport_rejected_message(struct sd_lun *un, 1367 struct sd_xbuf *xp, int code); 1368 static void sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, 1369 void *arg, int code); 1370 static void sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, 1371 void *arg, int code); 1372 static void sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, 1373 void *arg, int code); 1374 1375 static void sd_retry_command(struct sd_lun *un, struct buf *bp, 1376 int retry_check_flag, 1377 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, 1378 int c), 1379 void *user_arg, int failure_code, clock_t retry_delay, 1380 void (*statp)(kstat_io_t *)); 1381 1382 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, 1383 clock_t retry_delay, void (*statp)(kstat_io_t *)); 1384 1385 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 1386 struct scsi_pkt *pktp); 1387 static void sd_start_retry_command(void *arg); 1388 static void sd_start_direct_priority_command(void *arg); 1389 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, 1390 int errcode); 1391 static void sd_return_failed_command_no_restart(struct sd_lun *un, 1392 struct buf *bp, int errcode); 1393 static void sd_return_command(struct sd_lun *un, struct buf *bp); 1394 static void sd_sync_with_callback(struct sd_lun *un); 1395 static int sdrunout(caddr_t arg); 1396 1397 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp); 1398 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp); 1399 1400 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type); 1401 static void sd_restore_throttle(void *arg); 1402 1403 static void sd_init_cdb_limits(struct sd_lun *un); 1404 1405 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 1406 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1407 1408 /* 1409 * Error handling functions 1410 */ 1411 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 1412 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1413 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, 1414 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1415 static void sd_pkt_status_reservation_conflict(struct sd_lun *un, 1416 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1417 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 1418 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1419 1420 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp, 1421 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1422 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 1423 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1424 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp, 1425 struct sd_xbuf *xp, size_t actual_len); 1426 static void sd_decode_sense(struct sd_lun *un, struct buf *bp, 1427 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1428 1429 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp, 1430 void *arg, int code); 1431 1432 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 1433 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1434 static void sd_sense_key_recoverable_error(struct sd_lun *un, 1435 uint8_t *sense_datap, 1436 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1437 static void sd_sense_key_not_ready(struct sd_lun *un, 1438 uint8_t *sense_datap, 1439 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1440 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 1441 uint8_t *sense_datap, 1442 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1443 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 1444 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1445 static void sd_sense_key_unit_attention(struct sd_lun *un, 1446 uint8_t *sense_datap, 1447 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1448 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 1449 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1450 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 1451 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1452 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 1453 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1454 static void sd_sense_key_default(struct sd_lun *un, 1455 uint8_t *sense_datap, 1456 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1457 1458 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp, 1459 void *arg, int flag); 1460 1461 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 1462 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1463 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 1464 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1465 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 1466 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1467 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 1468 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1469 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 1470 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1471 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 1472 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1473 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 1474 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1475 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 1476 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1477 1478 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp); 1479 1480 static void sd_start_stop_unit_callback(void *arg); 1481 static void sd_start_stop_unit_task(void *arg); 1482 1483 static void sd_taskq_create(void); 1484 static void sd_taskq_delete(void); 1485 static void sd_target_change_task(void *arg); 1486 static void sd_log_lun_expansion_event(struct sd_lun *un, int km_flag); 1487 static void sd_media_change_task(void *arg); 1488 1489 static int sd_handle_mchange(struct sd_lun *un); 1490 static int sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag); 1491 static int sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp, 1492 uint32_t *lbap, int path_flag); 1493 static int sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp, 1494 uint32_t *lbap, uint32_t *psp, int path_flag); 1495 static int sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int pc_flag, 1496 int flag, int path_flag); 1497 static int sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr, 1498 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp); 1499 static int sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag); 1500 static int sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc, 1501 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp); 1502 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, 1503 uchar_t usr_cmd, uchar_t *usr_bufp); 1504 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, 1505 struct dk_callback *dkc); 1506 static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp); 1507 static int sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, 1508 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1509 uchar_t *bufaddr, uint_t buflen, int path_flag); 1510 static int sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc, 1511 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1512 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag); 1513 static int sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize, 1514 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag); 1515 static int sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize, 1516 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag); 1517 static int sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr, 1518 size_t buflen, daddr_t start_block, int path_flag); 1519 #define sd_send_scsi_READ(ssc, bufaddr, buflen, start_block, path_flag) \ 1520 sd_send_scsi_RDWR(ssc, SCMD_READ, bufaddr, buflen, start_block, \ 1521 path_flag) 1522 #define sd_send_scsi_WRITE(ssc, bufaddr, buflen, start_block, path_flag)\ 1523 sd_send_scsi_RDWR(ssc, SCMD_WRITE, bufaddr, buflen, start_block,\ 1524 path_flag) 1525 1526 static int sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr, 1527 uint16_t buflen, uchar_t page_code, uchar_t page_control, 1528 uint16_t param_ptr, int path_flag); 1529 1530 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un); 1531 static void sd_free_rqs(struct sd_lun *un); 1532 1533 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, 1534 uchar_t *data, int len, int fmt); 1535 static void sd_panic_for_res_conflict(struct sd_lun *un); 1536 1537 /* 1538 * Disk Ioctl Function Prototypes 1539 */ 1540 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag); 1541 static int sd_get_media_info_ext(dev_t dev, caddr_t arg, int flag); 1542 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag); 1543 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag); 1544 1545 /* 1546 * Multi-host Ioctl Prototypes 1547 */ 1548 static int sd_check_mhd(dev_t dev, int interval); 1549 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1550 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt); 1551 static char *sd_sname(uchar_t status); 1552 static void sd_mhd_resvd_recover(void *arg); 1553 static void sd_resv_reclaim_thread(); 1554 static int sd_take_ownership(dev_t dev, struct mhioctkown *p); 1555 static int sd_reserve_release(dev_t dev, int cmd); 1556 static void sd_rmv_resv_reclaim_req(dev_t dev); 1557 static void sd_mhd_reset_notify_cb(caddr_t arg); 1558 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un, 1559 mhioc_inkeys_t *usrp, int flag); 1560 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un, 1561 mhioc_inresvs_t *usrp, int flag); 1562 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag); 1563 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag); 1564 static int sd_mhdioc_release(dev_t dev); 1565 static int sd_mhdioc_register_devid(dev_t dev); 1566 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag); 1567 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag); 1568 1569 /* 1570 * SCSI removable prototypes 1571 */ 1572 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag); 1573 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1574 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1575 static int sr_pause_resume(dev_t dev, int mode); 1576 static int sr_play_msf(dev_t dev, caddr_t data, int flag); 1577 static int sr_play_trkind(dev_t dev, caddr_t data, int flag); 1578 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag); 1579 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag); 1580 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag); 1581 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag); 1582 static int sr_read_cdda(dev_t dev, caddr_t data, int flag); 1583 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag); 1584 static int sr_read_mode1(dev_t dev, caddr_t data, int flag); 1585 static int sr_read_mode2(dev_t dev, caddr_t data, int flag); 1586 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag); 1587 static int sr_sector_mode(dev_t dev, uint32_t blksize); 1588 static int sr_eject(dev_t dev); 1589 static void sr_ejected(register struct sd_lun *un); 1590 static int sr_check_wp(dev_t dev); 1591 static int sd_check_media(dev_t dev, enum dkio_state state); 1592 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1593 static void sd_delayed_cv_broadcast(void *arg); 1594 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag); 1595 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag); 1596 1597 static int sd_log_page_supported(sd_ssc_t *ssc, int log_page); 1598 1599 /* 1600 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions. 1601 */ 1602 static void sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag); 1603 static int sd_wm_cache_constructor(void *wm, void *un, int flags); 1604 static void sd_wm_cache_destructor(void *wm, void *un); 1605 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb, 1606 daddr_t endb, ushort_t typ); 1607 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb, 1608 daddr_t endb); 1609 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp); 1610 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm); 1611 static void sd_read_modify_write_task(void * arg); 1612 static int 1613 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 1614 struct buf **bpp); 1615 1616 1617 /* 1618 * Function prototypes for failfast support. 1619 */ 1620 static void sd_failfast_flushq(struct sd_lun *un); 1621 static int sd_failfast_flushq_callback(struct buf *bp); 1622 1623 /* 1624 * Function prototypes to check for lsi devices 1625 */ 1626 static void sd_is_lsi(struct sd_lun *un); 1627 1628 /* 1629 * Function prototypes for partial DMA support 1630 */ 1631 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 1632 struct scsi_pkt *pkt, struct sd_xbuf *xp); 1633 1634 1635 /* Function prototypes for cmlb */ 1636 static int sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 1637 diskaddr_t start_block, size_t reqlength, void *tg_cookie); 1638 1639 static int sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie); 1640 1641 /* 1642 * For printing RMW warning message timely 1643 */ 1644 static void sd_rmw_msg_print_handler(void *arg); 1645 1646 /* 1647 * Constants for failfast support: 1648 * 1649 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO 1650 * failfast processing being performed. 1651 * 1652 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing 1653 * failfast processing on all bufs with B_FAILFAST set. 1654 */ 1655 1656 #define SD_FAILFAST_INACTIVE 0 1657 #define SD_FAILFAST_ACTIVE 1 1658 1659 /* 1660 * Bitmask to control behavior of buf(9S) flushes when a transition to 1661 * the failfast state occurs. Optional bits include: 1662 * 1663 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that 1664 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will 1665 * be flushed. 1666 * 1667 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the 1668 * driver, in addition to the regular wait queue. This includes the xbuf 1669 * queues. When clear, only the driver's wait queue will be flushed. 1670 */ 1671 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 1672 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 1673 1674 /* 1675 * The default behavior is to only flush bufs that have B_FAILFAST set, but 1676 * to flush all queues within the driver. 1677 */ 1678 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES; 1679 1680 1681 /* 1682 * SD Testing Fault Injection 1683 */ 1684 #ifdef SD_FAULT_INJECTION 1685 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); 1686 static void sd_faultinjection(struct scsi_pkt *pktp); 1687 static void sd_injection_log(char *buf, struct sd_lun *un); 1688 #endif 1689 1690 /* 1691 * Device driver ops vector 1692 */ 1693 static struct cb_ops sd_cb_ops = { 1694 sdopen, /* open */ 1695 sdclose, /* close */ 1696 sdstrategy, /* strategy */ 1697 nodev, /* print */ 1698 sddump, /* dump */ 1699 sdread, /* read */ 1700 sdwrite, /* write */ 1701 sdioctl, /* ioctl */ 1702 nodev, /* devmap */ 1703 nodev, /* mmap */ 1704 nodev, /* segmap */ 1705 nochpoll, /* poll */ 1706 sd_prop_op, /* cb_prop_op */ 1707 0, /* streamtab */ 1708 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */ 1709 CB_REV, /* cb_rev */ 1710 sdaread, /* async I/O read entry point */ 1711 sdawrite /* async I/O write entry point */ 1712 }; 1713 1714 struct dev_ops sd_ops = { 1715 DEVO_REV, /* devo_rev, */ 1716 0, /* refcnt */ 1717 sdinfo, /* info */ 1718 nulldev, /* identify */ 1719 sdprobe, /* probe */ 1720 sdattach, /* attach */ 1721 sddetach, /* detach */ 1722 nodev, /* reset */ 1723 &sd_cb_ops, /* driver operations */ 1724 NULL, /* bus operations */ 1725 sdpower, /* power */ 1726 ddi_quiesce_not_needed, /* quiesce */ 1727 }; 1728 1729 /* 1730 * This is the loadable module wrapper. 1731 */ 1732 #include <sys/modctl.h> 1733 1734 #ifndef XPV_HVM_DRIVER 1735 static struct modldrv modldrv = { 1736 &mod_driverops, /* Type of module. This one is a driver */ 1737 SD_MODULE_NAME, /* Module name. */ 1738 &sd_ops /* driver ops */ 1739 }; 1740 1741 static struct modlinkage modlinkage = { 1742 MODREV_1, &modldrv, NULL 1743 }; 1744 1745 #else /* XPV_HVM_DRIVER */ 1746 static struct modlmisc modlmisc = { 1747 &mod_miscops, /* Type of module. This one is a misc */ 1748 "HVM " SD_MODULE_NAME, /* Module name. */ 1749 }; 1750 1751 static struct modlinkage modlinkage = { 1752 MODREV_1, &modlmisc, NULL 1753 }; 1754 1755 #endif /* XPV_HVM_DRIVER */ 1756 1757 static cmlb_tg_ops_t sd_tgops = { 1758 TG_DK_OPS_VERSION_1, 1759 sd_tg_rdwr, 1760 sd_tg_getinfo 1761 }; 1762 1763 static struct scsi_asq_key_strings sd_additional_codes[] = { 1764 0x81, 0, "Logical Unit is Reserved", 1765 0x85, 0, "Audio Address Not Valid", 1766 0xb6, 0, "Media Load Mechanism Failed", 1767 0xB9, 0, "Audio Play Operation Aborted", 1768 0xbf, 0, "Buffer Overflow for Read All Subcodes Command", 1769 0x53, 2, "Medium removal prevented", 1770 0x6f, 0, "Authentication failed during key exchange", 1771 0x6f, 1, "Key not present", 1772 0x6f, 2, "Key not established", 1773 0x6f, 3, "Read without proper authentication", 1774 0x6f, 4, "Mismatched region to this logical unit", 1775 0x6f, 5, "Region reset count error", 1776 0xffff, 0x0, NULL 1777 }; 1778 1779 1780 /* 1781 * Struct for passing printing information for sense data messages 1782 */ 1783 struct sd_sense_info { 1784 int ssi_severity; 1785 int ssi_pfa_flag; 1786 }; 1787 1788 /* 1789 * Table of function pointers for iostart-side routines. Separate "chains" 1790 * of layered function calls are formed by placing the function pointers 1791 * sequentially in the desired order. Functions are called according to an 1792 * incrementing table index ordering. The last function in each chain must 1793 * be sd_core_iostart(). The corresponding iodone-side routines are expected 1794 * in the sd_iodone_chain[] array. 1795 * 1796 * Note: It may seem more natural to organize both the iostart and iodone 1797 * functions together, into an array of structures (or some similar 1798 * organization) with a common index, rather than two separate arrays which 1799 * must be maintained in synchronization. The purpose of this division is 1800 * to achieve improved performance: individual arrays allows for more 1801 * effective cache line utilization on certain platforms. 1802 */ 1803 1804 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp); 1805 1806 1807 static sd_chain_t sd_iostart_chain[] = { 1808 1809 /* Chain for buf IO for disk drive targets (PM enabled) */ 1810 sd_mapblockaddr_iostart, /* Index: 0 */ 1811 sd_pm_iostart, /* Index: 1 */ 1812 sd_core_iostart, /* Index: 2 */ 1813 1814 /* Chain for buf IO for disk drive targets (PM disabled) */ 1815 sd_mapblockaddr_iostart, /* Index: 3 */ 1816 sd_core_iostart, /* Index: 4 */ 1817 1818 /* 1819 * Chain for buf IO for removable-media or large sector size 1820 * disk drive targets with RMW needed (PM enabled) 1821 */ 1822 sd_mapblockaddr_iostart, /* Index: 5 */ 1823 sd_mapblocksize_iostart, /* Index: 6 */ 1824 sd_pm_iostart, /* Index: 7 */ 1825 sd_core_iostart, /* Index: 8 */ 1826 1827 /* 1828 * Chain for buf IO for removable-media or large sector size 1829 * disk drive targets with RMW needed (PM disabled) 1830 */ 1831 sd_mapblockaddr_iostart, /* Index: 9 */ 1832 sd_mapblocksize_iostart, /* Index: 10 */ 1833 sd_core_iostart, /* Index: 11 */ 1834 1835 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1836 sd_mapblockaddr_iostart, /* Index: 12 */ 1837 sd_checksum_iostart, /* Index: 13 */ 1838 sd_pm_iostart, /* Index: 14 */ 1839 sd_core_iostart, /* Index: 15 */ 1840 1841 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1842 sd_mapblockaddr_iostart, /* Index: 16 */ 1843 sd_checksum_iostart, /* Index: 17 */ 1844 sd_core_iostart, /* Index: 18 */ 1845 1846 /* Chain for USCSI commands (all targets) */ 1847 sd_pm_iostart, /* Index: 19 */ 1848 sd_core_iostart, /* Index: 20 */ 1849 1850 /* Chain for checksumming USCSI commands (all targets) */ 1851 sd_checksum_uscsi_iostart, /* Index: 21 */ 1852 sd_pm_iostart, /* Index: 22 */ 1853 sd_core_iostart, /* Index: 23 */ 1854 1855 /* Chain for "direct" USCSI commands (all targets) */ 1856 sd_core_iostart, /* Index: 24 */ 1857 1858 /* Chain for "direct priority" USCSI commands (all targets) */ 1859 sd_core_iostart, /* Index: 25 */ 1860 1861 /* 1862 * Chain for buf IO for large sector size disk drive targets 1863 * with RMW needed with checksumming (PM enabled) 1864 */ 1865 sd_mapblockaddr_iostart, /* Index: 26 */ 1866 sd_mapblocksize_iostart, /* Index: 27 */ 1867 sd_checksum_iostart, /* Index: 28 */ 1868 sd_pm_iostart, /* Index: 29 */ 1869 sd_core_iostart, /* Index: 30 */ 1870 1871 /* 1872 * Chain for buf IO for large sector size disk drive targets 1873 * with RMW needed with checksumming (PM disabled) 1874 */ 1875 sd_mapblockaddr_iostart, /* Index: 31 */ 1876 sd_mapblocksize_iostart, /* Index: 32 */ 1877 sd_checksum_iostart, /* Index: 33 */ 1878 sd_core_iostart, /* Index: 34 */ 1879 1880 }; 1881 1882 /* 1883 * Macros to locate the first function of each iostart chain in the 1884 * sd_iostart_chain[] array. These are located by the index in the array. 1885 */ 1886 #define SD_CHAIN_DISK_IOSTART 0 1887 #define SD_CHAIN_DISK_IOSTART_NO_PM 3 1888 #define SD_CHAIN_MSS_DISK_IOSTART 5 1889 #define SD_CHAIN_RMMEDIA_IOSTART 5 1890 #define SD_CHAIN_MSS_DISK_IOSTART_NO_PM 9 1891 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9 1892 #define SD_CHAIN_CHKSUM_IOSTART 12 1893 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16 1894 #define SD_CHAIN_USCSI_CMD_IOSTART 19 1895 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21 1896 #define SD_CHAIN_DIRECT_CMD_IOSTART 24 1897 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25 1898 #define SD_CHAIN_MSS_CHKSUM_IOSTART 26 1899 #define SD_CHAIN_MSS_CHKSUM_IOSTART_NO_PM 31 1900 1901 1902 /* 1903 * Table of function pointers for the iodone-side routines for the driver- 1904 * internal layering mechanism. The calling sequence for iodone routines 1905 * uses a decrementing table index, so the last routine called in a chain 1906 * must be at the lowest array index location for that chain. The last 1907 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs) 1908 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering 1909 * of the functions in an iodone side chain must correspond to the ordering 1910 * of the iostart routines for that chain. Note that there is no iodone 1911 * side routine that corresponds to sd_core_iostart(), so there is no 1912 * entry in the table for this. 1913 */ 1914 1915 static sd_chain_t sd_iodone_chain[] = { 1916 1917 /* Chain for buf IO for disk drive targets (PM enabled) */ 1918 sd_buf_iodone, /* Index: 0 */ 1919 sd_mapblockaddr_iodone, /* Index: 1 */ 1920 sd_pm_iodone, /* Index: 2 */ 1921 1922 /* Chain for buf IO for disk drive targets (PM disabled) */ 1923 sd_buf_iodone, /* Index: 3 */ 1924 sd_mapblockaddr_iodone, /* Index: 4 */ 1925 1926 /* 1927 * Chain for buf IO for removable-media or large sector size 1928 * disk drive targets with RMW needed (PM enabled) 1929 */ 1930 sd_buf_iodone, /* Index: 5 */ 1931 sd_mapblockaddr_iodone, /* Index: 6 */ 1932 sd_mapblocksize_iodone, /* Index: 7 */ 1933 sd_pm_iodone, /* Index: 8 */ 1934 1935 /* 1936 * Chain for buf IO for removable-media or large sector size 1937 * disk drive targets with RMW needed (PM disabled) 1938 */ 1939 sd_buf_iodone, /* Index: 9 */ 1940 sd_mapblockaddr_iodone, /* Index: 10 */ 1941 sd_mapblocksize_iodone, /* Index: 11 */ 1942 1943 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1944 sd_buf_iodone, /* Index: 12 */ 1945 sd_mapblockaddr_iodone, /* Index: 13 */ 1946 sd_checksum_iodone, /* Index: 14 */ 1947 sd_pm_iodone, /* Index: 15 */ 1948 1949 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1950 sd_buf_iodone, /* Index: 16 */ 1951 sd_mapblockaddr_iodone, /* Index: 17 */ 1952 sd_checksum_iodone, /* Index: 18 */ 1953 1954 /* Chain for USCSI commands (non-checksum targets) */ 1955 sd_uscsi_iodone, /* Index: 19 */ 1956 sd_pm_iodone, /* Index: 20 */ 1957 1958 /* Chain for USCSI commands (checksum targets) */ 1959 sd_uscsi_iodone, /* Index: 21 */ 1960 sd_checksum_uscsi_iodone, /* Index: 22 */ 1961 sd_pm_iodone, /* Index: 22 */ 1962 1963 /* Chain for "direct" USCSI commands (all targets) */ 1964 sd_uscsi_iodone, /* Index: 24 */ 1965 1966 /* Chain for "direct priority" USCSI commands (all targets) */ 1967 sd_uscsi_iodone, /* Index: 25 */ 1968 1969 /* 1970 * Chain for buf IO for large sector size disk drive targets 1971 * with checksumming (PM enabled) 1972 */ 1973 sd_buf_iodone, /* Index: 26 */ 1974 sd_mapblockaddr_iodone, /* Index: 27 */ 1975 sd_mapblocksize_iodone, /* Index: 28 */ 1976 sd_checksum_iodone, /* Index: 29 */ 1977 sd_pm_iodone, /* Index: 30 */ 1978 1979 /* 1980 * Chain for buf IO for large sector size disk drive targets 1981 * with checksumming (PM disabled) 1982 */ 1983 sd_buf_iodone, /* Index: 31 */ 1984 sd_mapblockaddr_iodone, /* Index: 32 */ 1985 sd_mapblocksize_iodone, /* Index: 33 */ 1986 sd_checksum_iodone, /* Index: 34 */ 1987 }; 1988 1989 1990 /* 1991 * Macros to locate the "first" function in the sd_iodone_chain[] array for 1992 * each iodone-side chain. These are located by the array index, but as the 1993 * iodone side functions are called in a decrementing-index order, the 1994 * highest index number in each chain must be specified (as these correspond 1995 * to the first function in the iodone chain that will be called by the core 1996 * at IO completion time). 1997 */ 1998 1999 #define SD_CHAIN_DISK_IODONE 2 2000 #define SD_CHAIN_DISK_IODONE_NO_PM 4 2001 #define SD_CHAIN_RMMEDIA_IODONE 8 2002 #define SD_CHAIN_MSS_DISK_IODONE 8 2003 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11 2004 #define SD_CHAIN_MSS_DISK_IODONE_NO_PM 11 2005 #define SD_CHAIN_CHKSUM_IODONE 15 2006 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18 2007 #define SD_CHAIN_USCSI_CMD_IODONE 20 2008 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22 2009 #define SD_CHAIN_DIRECT_CMD_IODONE 24 2010 #define SD_CHAIN_PRIORITY_CMD_IODONE 25 2011 #define SD_CHAIN_MSS_CHKSUM_IODONE 30 2012 #define SD_CHAIN_MSS_CHKSUM_IODONE_NO_PM 34 2013 2014 2015 2016 /* 2017 * Array to map a layering chain index to the appropriate initpkt routine. 2018 * The redundant entries are present so that the index used for accessing 2019 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 2020 * with this table as well. 2021 */ 2022 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **); 2023 2024 static sd_initpkt_t sd_initpkt_map[] = { 2025 2026 /* Chain for buf IO for disk drive targets (PM enabled) */ 2027 sd_initpkt_for_buf, /* Index: 0 */ 2028 sd_initpkt_for_buf, /* Index: 1 */ 2029 sd_initpkt_for_buf, /* Index: 2 */ 2030 2031 /* Chain for buf IO for disk drive targets (PM disabled) */ 2032 sd_initpkt_for_buf, /* Index: 3 */ 2033 sd_initpkt_for_buf, /* Index: 4 */ 2034 2035 /* 2036 * Chain for buf IO for removable-media or large sector size 2037 * disk drive targets (PM enabled) 2038 */ 2039 sd_initpkt_for_buf, /* Index: 5 */ 2040 sd_initpkt_for_buf, /* Index: 6 */ 2041 sd_initpkt_for_buf, /* Index: 7 */ 2042 sd_initpkt_for_buf, /* Index: 8 */ 2043 2044 /* 2045 * Chain for buf IO for removable-media or large sector size 2046 * disk drive targets (PM disabled) 2047 */ 2048 sd_initpkt_for_buf, /* Index: 9 */ 2049 sd_initpkt_for_buf, /* Index: 10 */ 2050 sd_initpkt_for_buf, /* Index: 11 */ 2051 2052 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2053 sd_initpkt_for_buf, /* Index: 12 */ 2054 sd_initpkt_for_buf, /* Index: 13 */ 2055 sd_initpkt_for_buf, /* Index: 14 */ 2056 sd_initpkt_for_buf, /* Index: 15 */ 2057 2058 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2059 sd_initpkt_for_buf, /* Index: 16 */ 2060 sd_initpkt_for_buf, /* Index: 17 */ 2061 sd_initpkt_for_buf, /* Index: 18 */ 2062 2063 /* Chain for USCSI commands (non-checksum targets) */ 2064 sd_initpkt_for_uscsi, /* Index: 19 */ 2065 sd_initpkt_for_uscsi, /* Index: 20 */ 2066 2067 /* Chain for USCSI commands (checksum targets) */ 2068 sd_initpkt_for_uscsi, /* Index: 21 */ 2069 sd_initpkt_for_uscsi, /* Index: 22 */ 2070 sd_initpkt_for_uscsi, /* Index: 22 */ 2071 2072 /* Chain for "direct" USCSI commands (all targets) */ 2073 sd_initpkt_for_uscsi, /* Index: 24 */ 2074 2075 /* Chain for "direct priority" USCSI commands (all targets) */ 2076 sd_initpkt_for_uscsi, /* Index: 25 */ 2077 2078 /* 2079 * Chain for buf IO for large sector size disk drive targets 2080 * with checksumming (PM enabled) 2081 */ 2082 sd_initpkt_for_buf, /* Index: 26 */ 2083 sd_initpkt_for_buf, /* Index: 27 */ 2084 sd_initpkt_for_buf, /* Index: 28 */ 2085 sd_initpkt_for_buf, /* Index: 29 */ 2086 sd_initpkt_for_buf, /* Index: 30 */ 2087 2088 /* 2089 * Chain for buf IO for large sector size disk drive targets 2090 * with checksumming (PM disabled) 2091 */ 2092 sd_initpkt_for_buf, /* Index: 31 */ 2093 sd_initpkt_for_buf, /* Index: 32 */ 2094 sd_initpkt_for_buf, /* Index: 33 */ 2095 sd_initpkt_for_buf, /* Index: 34 */ 2096 }; 2097 2098 2099 /* 2100 * Array to map a layering chain index to the appropriate destroypktpkt routine. 2101 * The redundant entries are present so that the index used for accessing 2102 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 2103 * with this table as well. 2104 */ 2105 typedef void (*sd_destroypkt_t)(struct buf *); 2106 2107 static sd_destroypkt_t sd_destroypkt_map[] = { 2108 2109 /* Chain for buf IO for disk drive targets (PM enabled) */ 2110 sd_destroypkt_for_buf, /* Index: 0 */ 2111 sd_destroypkt_for_buf, /* Index: 1 */ 2112 sd_destroypkt_for_buf, /* Index: 2 */ 2113 2114 /* Chain for buf IO for disk drive targets (PM disabled) */ 2115 sd_destroypkt_for_buf, /* Index: 3 */ 2116 sd_destroypkt_for_buf, /* Index: 4 */ 2117 2118 /* 2119 * Chain for buf IO for removable-media or large sector size 2120 * disk drive targets (PM enabled) 2121 */ 2122 sd_destroypkt_for_buf, /* Index: 5 */ 2123 sd_destroypkt_for_buf, /* Index: 6 */ 2124 sd_destroypkt_for_buf, /* Index: 7 */ 2125 sd_destroypkt_for_buf, /* Index: 8 */ 2126 2127 /* 2128 * Chain for buf IO for removable-media or large sector size 2129 * disk drive targets (PM disabled) 2130 */ 2131 sd_destroypkt_for_buf, /* Index: 9 */ 2132 sd_destroypkt_for_buf, /* Index: 10 */ 2133 sd_destroypkt_for_buf, /* Index: 11 */ 2134 2135 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2136 sd_destroypkt_for_buf, /* Index: 12 */ 2137 sd_destroypkt_for_buf, /* Index: 13 */ 2138 sd_destroypkt_for_buf, /* Index: 14 */ 2139 sd_destroypkt_for_buf, /* Index: 15 */ 2140 2141 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2142 sd_destroypkt_for_buf, /* Index: 16 */ 2143 sd_destroypkt_for_buf, /* Index: 17 */ 2144 sd_destroypkt_for_buf, /* Index: 18 */ 2145 2146 /* Chain for USCSI commands (non-checksum targets) */ 2147 sd_destroypkt_for_uscsi, /* Index: 19 */ 2148 sd_destroypkt_for_uscsi, /* Index: 20 */ 2149 2150 /* Chain for USCSI commands (checksum targets) */ 2151 sd_destroypkt_for_uscsi, /* Index: 21 */ 2152 sd_destroypkt_for_uscsi, /* Index: 22 */ 2153 sd_destroypkt_for_uscsi, /* Index: 22 */ 2154 2155 /* Chain for "direct" USCSI commands (all targets) */ 2156 sd_destroypkt_for_uscsi, /* Index: 24 */ 2157 2158 /* Chain for "direct priority" USCSI commands (all targets) */ 2159 sd_destroypkt_for_uscsi, /* Index: 25 */ 2160 2161 /* 2162 * Chain for buf IO for large sector size disk drive targets 2163 * with checksumming (PM disabled) 2164 */ 2165 sd_destroypkt_for_buf, /* Index: 26 */ 2166 sd_destroypkt_for_buf, /* Index: 27 */ 2167 sd_destroypkt_for_buf, /* Index: 28 */ 2168 sd_destroypkt_for_buf, /* Index: 29 */ 2169 sd_destroypkt_for_buf, /* Index: 30 */ 2170 2171 /* 2172 * Chain for buf IO for large sector size disk drive targets 2173 * with checksumming (PM enabled) 2174 */ 2175 sd_destroypkt_for_buf, /* Index: 31 */ 2176 sd_destroypkt_for_buf, /* Index: 32 */ 2177 sd_destroypkt_for_buf, /* Index: 33 */ 2178 sd_destroypkt_for_buf, /* Index: 34 */ 2179 }; 2180 2181 2182 2183 /* 2184 * Array to map a layering chain index to the appropriate chain "type". 2185 * The chain type indicates a specific property/usage of the chain. 2186 * The redundant entries are present so that the index used for accessing 2187 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 2188 * with this table as well. 2189 */ 2190 2191 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */ 2192 #define SD_CHAIN_BUFIO 1 /* regular buf IO */ 2193 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */ 2194 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */ 2195 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */ 2196 /* (for error recovery) */ 2197 2198 static int sd_chain_type_map[] = { 2199 2200 /* Chain for buf IO for disk drive targets (PM enabled) */ 2201 SD_CHAIN_BUFIO, /* Index: 0 */ 2202 SD_CHAIN_BUFIO, /* Index: 1 */ 2203 SD_CHAIN_BUFIO, /* Index: 2 */ 2204 2205 /* Chain for buf IO for disk drive targets (PM disabled) */ 2206 SD_CHAIN_BUFIO, /* Index: 3 */ 2207 SD_CHAIN_BUFIO, /* Index: 4 */ 2208 2209 /* 2210 * Chain for buf IO for removable-media or large sector size 2211 * disk drive targets (PM enabled) 2212 */ 2213 SD_CHAIN_BUFIO, /* Index: 5 */ 2214 SD_CHAIN_BUFIO, /* Index: 6 */ 2215 SD_CHAIN_BUFIO, /* Index: 7 */ 2216 SD_CHAIN_BUFIO, /* Index: 8 */ 2217 2218 /* 2219 * Chain for buf IO for removable-media or large sector size 2220 * disk drive targets (PM disabled) 2221 */ 2222 SD_CHAIN_BUFIO, /* Index: 9 */ 2223 SD_CHAIN_BUFIO, /* Index: 10 */ 2224 SD_CHAIN_BUFIO, /* Index: 11 */ 2225 2226 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2227 SD_CHAIN_BUFIO, /* Index: 12 */ 2228 SD_CHAIN_BUFIO, /* Index: 13 */ 2229 SD_CHAIN_BUFIO, /* Index: 14 */ 2230 SD_CHAIN_BUFIO, /* Index: 15 */ 2231 2232 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2233 SD_CHAIN_BUFIO, /* Index: 16 */ 2234 SD_CHAIN_BUFIO, /* Index: 17 */ 2235 SD_CHAIN_BUFIO, /* Index: 18 */ 2236 2237 /* Chain for USCSI commands (non-checksum targets) */ 2238 SD_CHAIN_USCSI, /* Index: 19 */ 2239 SD_CHAIN_USCSI, /* Index: 20 */ 2240 2241 /* Chain for USCSI commands (checksum targets) */ 2242 SD_CHAIN_USCSI, /* Index: 21 */ 2243 SD_CHAIN_USCSI, /* Index: 22 */ 2244 SD_CHAIN_USCSI, /* Index: 23 */ 2245 2246 /* Chain for "direct" USCSI commands (all targets) */ 2247 SD_CHAIN_DIRECT, /* Index: 24 */ 2248 2249 /* Chain for "direct priority" USCSI commands (all targets) */ 2250 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */ 2251 2252 /* 2253 * Chain for buf IO for large sector size disk drive targets 2254 * with checksumming (PM enabled) 2255 */ 2256 SD_CHAIN_BUFIO, /* Index: 26 */ 2257 SD_CHAIN_BUFIO, /* Index: 27 */ 2258 SD_CHAIN_BUFIO, /* Index: 28 */ 2259 SD_CHAIN_BUFIO, /* Index: 29 */ 2260 SD_CHAIN_BUFIO, /* Index: 30 */ 2261 2262 /* 2263 * Chain for buf IO for large sector size disk drive targets 2264 * with checksumming (PM disabled) 2265 */ 2266 SD_CHAIN_BUFIO, /* Index: 31 */ 2267 SD_CHAIN_BUFIO, /* Index: 32 */ 2268 SD_CHAIN_BUFIO, /* Index: 33 */ 2269 SD_CHAIN_BUFIO, /* Index: 34 */ 2270 }; 2271 2272 2273 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */ 2274 #define SD_IS_BUFIO(xp) \ 2275 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO) 2276 2277 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */ 2278 #define SD_IS_DIRECT_PRIORITY(xp) \ 2279 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY) 2280 2281 2282 2283 /* 2284 * Struct, array, and macros to map a specific chain to the appropriate 2285 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays. 2286 * 2287 * The sd_chain_index_map[] array is used at attach time to set the various 2288 * un_xxx_chain type members of the sd_lun softstate to the specific layering 2289 * chain to be used with the instance. This allows different instances to use 2290 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart 2291 * and xb_chain_iodone index values in the sd_xbuf are initialized to these 2292 * values at sd_xbuf init time, this allows (1) layering chains may be changed 2293 * dynamically & without the use of locking; and (2) a layer may update the 2294 * xb_chain_io[start|done] member in a given xbuf with its current index value, 2295 * to allow for deferred processing of an IO within the same chain from a 2296 * different execution context. 2297 */ 2298 2299 struct sd_chain_index { 2300 int sci_iostart_index; 2301 int sci_iodone_index; 2302 }; 2303 2304 static struct sd_chain_index sd_chain_index_map[] = { 2305 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE }, 2306 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM }, 2307 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE }, 2308 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM }, 2309 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE }, 2310 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM }, 2311 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE }, 2312 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE }, 2313 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE }, 2314 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE }, 2315 { SD_CHAIN_MSS_CHKSUM_IOSTART, SD_CHAIN_MSS_CHKSUM_IODONE }, 2316 { SD_CHAIN_MSS_CHKSUM_IOSTART_NO_PM, SD_CHAIN_MSS_CHKSUM_IODONE_NO_PM }, 2317 2318 }; 2319 2320 2321 /* 2322 * The following are indexes into the sd_chain_index_map[] array. 2323 */ 2324 2325 /* un->un_buf_chain_type must be set to one of these */ 2326 #define SD_CHAIN_INFO_DISK 0 2327 #define SD_CHAIN_INFO_DISK_NO_PM 1 2328 #define SD_CHAIN_INFO_RMMEDIA 2 2329 #define SD_CHAIN_INFO_MSS_DISK 2 2330 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3 2331 #define SD_CHAIN_INFO_MSS_DSK_NO_PM 3 2332 #define SD_CHAIN_INFO_CHKSUM 4 2333 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5 2334 #define SD_CHAIN_INFO_MSS_DISK_CHKSUM 10 2335 #define SD_CHAIN_INFO_MSS_DISK_CHKSUM_NO_PM 11 2336 2337 /* un->un_uscsi_chain_type must be set to one of these */ 2338 #define SD_CHAIN_INFO_USCSI_CMD 6 2339 /* USCSI with PM disabled is the same as DIRECT */ 2340 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8 2341 #define SD_CHAIN_INFO_USCSI_CHKSUM 7 2342 2343 /* un->un_direct_chain_type must be set to one of these */ 2344 #define SD_CHAIN_INFO_DIRECT_CMD 8 2345 2346 /* un->un_priority_chain_type must be set to one of these */ 2347 #define SD_CHAIN_INFO_PRIORITY_CMD 9 2348 2349 /* size for devid inquiries */ 2350 #define MAX_INQUIRY_SIZE 0xF0 2351 2352 /* 2353 * Macros used by functions to pass a given buf(9S) struct along to the 2354 * next function in the layering chain for further processing. 2355 * 2356 * In the following macros, passing more than three arguments to the called 2357 * routines causes the optimizer for the SPARC compiler to stop doing tail 2358 * call elimination which results in significant performance degradation. 2359 */ 2360 #define SD_BEGIN_IOSTART(index, un, bp) \ 2361 ((*(sd_iostart_chain[index]))(index, un, bp)) 2362 2363 #define SD_BEGIN_IODONE(index, un, bp) \ 2364 ((*(sd_iodone_chain[index]))(index, un, bp)) 2365 2366 #define SD_NEXT_IOSTART(index, un, bp) \ 2367 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp)) 2368 2369 #define SD_NEXT_IODONE(index, un, bp) \ 2370 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp)) 2371 2372 /* 2373 * Function: _init 2374 * 2375 * Description: This is the driver _init(9E) entry point. 2376 * 2377 * Return Code: Returns the value from mod_install(9F) or 2378 * ddi_soft_state_init(9F) as appropriate. 2379 * 2380 * Context: Called when driver module loaded. 2381 */ 2382 2383 int 2384 _init(void) 2385 { 2386 int err; 2387 2388 /* establish driver name from module name */ 2389 sd_label = (char *)mod_modname(&modlinkage); 2390 2391 #ifndef XPV_HVM_DRIVER 2392 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun), 2393 SD_MAXUNIT); 2394 if (err != 0) { 2395 return (err); 2396 } 2397 2398 #else /* XPV_HVM_DRIVER */ 2399 /* Remove the leading "hvm_" from the module name */ 2400 ASSERT(strncmp(sd_label, "hvm_", strlen("hvm_")) == 0); 2401 sd_label += strlen("hvm_"); 2402 2403 #endif /* XPV_HVM_DRIVER */ 2404 2405 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL); 2406 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL); 2407 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL); 2408 2409 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL); 2410 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL); 2411 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL); 2412 2413 /* 2414 * it's ok to init here even for fibre device 2415 */ 2416 sd_scsi_probe_cache_init(); 2417 2418 sd_scsi_target_lun_init(); 2419 2420 /* 2421 * Creating taskq before mod_install ensures that all callers (threads) 2422 * that enter the module after a successful mod_install encounter 2423 * a valid taskq. 2424 */ 2425 sd_taskq_create(); 2426 2427 err = mod_install(&modlinkage); 2428 if (err != 0) { 2429 /* delete taskq if install fails */ 2430 sd_taskq_delete(); 2431 2432 mutex_destroy(&sd_detach_mutex); 2433 mutex_destroy(&sd_log_mutex); 2434 mutex_destroy(&sd_label_mutex); 2435 2436 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2437 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2438 cv_destroy(&sd_tr.srq_inprocess_cv); 2439 2440 sd_scsi_probe_cache_fini(); 2441 2442 sd_scsi_target_lun_fini(); 2443 2444 #ifndef XPV_HVM_DRIVER 2445 ddi_soft_state_fini(&sd_state); 2446 #endif /* !XPV_HVM_DRIVER */ 2447 return (err); 2448 } 2449 2450 return (err); 2451 } 2452 2453 2454 /* 2455 * Function: _fini 2456 * 2457 * Description: This is the driver _fini(9E) entry point. 2458 * 2459 * Return Code: Returns the value from mod_remove(9F) 2460 * 2461 * Context: Called when driver module is unloaded. 2462 */ 2463 2464 int 2465 _fini(void) 2466 { 2467 int err; 2468 2469 if ((err = mod_remove(&modlinkage)) != 0) { 2470 return (err); 2471 } 2472 2473 sd_taskq_delete(); 2474 2475 mutex_destroy(&sd_detach_mutex); 2476 mutex_destroy(&sd_log_mutex); 2477 mutex_destroy(&sd_label_mutex); 2478 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2479 2480 sd_scsi_probe_cache_fini(); 2481 2482 sd_scsi_target_lun_fini(); 2483 2484 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2485 cv_destroy(&sd_tr.srq_inprocess_cv); 2486 2487 #ifndef XPV_HVM_DRIVER 2488 ddi_soft_state_fini(&sd_state); 2489 #endif /* !XPV_HVM_DRIVER */ 2490 2491 return (err); 2492 } 2493 2494 2495 /* 2496 * Function: _info 2497 * 2498 * Description: This is the driver _info(9E) entry point. 2499 * 2500 * Arguments: modinfop - pointer to the driver modinfo structure 2501 * 2502 * Return Code: Returns the value from mod_info(9F). 2503 * 2504 * Context: Kernel thread context 2505 */ 2506 2507 int 2508 _info(struct modinfo *modinfop) 2509 { 2510 return (mod_info(&modlinkage, modinfop)); 2511 } 2512 2513 2514 /* 2515 * The following routines implement the driver message logging facility. 2516 * They provide component- and level- based debug output filtering. 2517 * Output may also be restricted to messages for a single instance by 2518 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set 2519 * to NULL, then messages for all instances are printed. 2520 * 2521 * These routines have been cloned from each other due to the language 2522 * constraints of macros and variable argument list processing. 2523 */ 2524 2525 2526 /* 2527 * Function: sd_log_err 2528 * 2529 * Description: This routine is called by the SD_ERROR macro for debug 2530 * logging of error conditions. 2531 * 2532 * Arguments: comp - driver component being logged 2533 * dev - pointer to driver info structure 2534 * fmt - error string and format to be logged 2535 */ 2536 2537 static void 2538 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...) 2539 { 2540 va_list ap; 2541 dev_info_t *dev; 2542 2543 ASSERT(un != NULL); 2544 dev = SD_DEVINFO(un); 2545 ASSERT(dev != NULL); 2546 2547 /* 2548 * Filter messages based on the global component and level masks. 2549 * Also print if un matches the value of sd_debug_un, or if 2550 * sd_debug_un is set to NULL. 2551 */ 2552 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) && 2553 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2554 mutex_enter(&sd_log_mutex); 2555 va_start(ap, fmt); 2556 (void) vsprintf(sd_log_buf, fmt, ap); 2557 va_end(ap); 2558 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2559 mutex_exit(&sd_log_mutex); 2560 } 2561 #ifdef SD_FAULT_INJECTION 2562 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2563 if (un->sd_injection_mask & comp) { 2564 mutex_enter(&sd_log_mutex); 2565 va_start(ap, fmt); 2566 (void) vsprintf(sd_log_buf, fmt, ap); 2567 va_end(ap); 2568 sd_injection_log(sd_log_buf, un); 2569 mutex_exit(&sd_log_mutex); 2570 } 2571 #endif 2572 } 2573 2574 2575 /* 2576 * Function: sd_log_info 2577 * 2578 * Description: This routine is called by the SD_INFO macro for debug 2579 * logging of general purpose informational conditions. 2580 * 2581 * Arguments: comp - driver component being logged 2582 * dev - pointer to driver info structure 2583 * fmt - info string and format to be logged 2584 */ 2585 2586 static void 2587 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...) 2588 { 2589 va_list ap; 2590 dev_info_t *dev; 2591 2592 ASSERT(un != NULL); 2593 dev = SD_DEVINFO(un); 2594 ASSERT(dev != NULL); 2595 2596 /* 2597 * Filter messages based on the global component and level masks. 2598 * Also print if un matches the value of sd_debug_un, or if 2599 * sd_debug_un is set to NULL. 2600 */ 2601 if ((sd_component_mask & component) && 2602 (sd_level_mask & SD_LOGMASK_INFO) && 2603 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2604 mutex_enter(&sd_log_mutex); 2605 va_start(ap, fmt); 2606 (void) vsprintf(sd_log_buf, fmt, ap); 2607 va_end(ap); 2608 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2609 mutex_exit(&sd_log_mutex); 2610 } 2611 #ifdef SD_FAULT_INJECTION 2612 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2613 if (un->sd_injection_mask & component) { 2614 mutex_enter(&sd_log_mutex); 2615 va_start(ap, fmt); 2616 (void) vsprintf(sd_log_buf, fmt, ap); 2617 va_end(ap); 2618 sd_injection_log(sd_log_buf, un); 2619 mutex_exit(&sd_log_mutex); 2620 } 2621 #endif 2622 } 2623 2624 2625 /* 2626 * Function: sd_log_trace 2627 * 2628 * Description: This routine is called by the SD_TRACE macro for debug 2629 * logging of trace conditions (i.e. function entry/exit). 2630 * 2631 * Arguments: comp - driver component being logged 2632 * dev - pointer to driver info structure 2633 * fmt - trace string and format to be logged 2634 */ 2635 2636 static void 2637 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...) 2638 { 2639 va_list ap; 2640 dev_info_t *dev; 2641 2642 ASSERT(un != NULL); 2643 dev = SD_DEVINFO(un); 2644 ASSERT(dev != NULL); 2645 2646 /* 2647 * Filter messages based on the global component and level masks. 2648 * Also print if un matches the value of sd_debug_un, or if 2649 * sd_debug_un is set to NULL. 2650 */ 2651 if ((sd_component_mask & component) && 2652 (sd_level_mask & SD_LOGMASK_TRACE) && 2653 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2654 mutex_enter(&sd_log_mutex); 2655 va_start(ap, fmt); 2656 (void) vsprintf(sd_log_buf, fmt, ap); 2657 va_end(ap); 2658 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2659 mutex_exit(&sd_log_mutex); 2660 } 2661 #ifdef SD_FAULT_INJECTION 2662 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2663 if (un->sd_injection_mask & component) { 2664 mutex_enter(&sd_log_mutex); 2665 va_start(ap, fmt); 2666 (void) vsprintf(sd_log_buf, fmt, ap); 2667 va_end(ap); 2668 sd_injection_log(sd_log_buf, un); 2669 mutex_exit(&sd_log_mutex); 2670 } 2671 #endif 2672 } 2673 2674 2675 /* 2676 * Function: sdprobe 2677 * 2678 * Description: This is the driver probe(9e) entry point function. 2679 * 2680 * Arguments: devi - opaque device info handle 2681 * 2682 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful. 2683 * DDI_PROBE_FAILURE: If the probe failed. 2684 * DDI_PROBE_PARTIAL: If the instance is not present now, 2685 * but may be present in the future. 2686 */ 2687 2688 static int 2689 sdprobe(dev_info_t *devi) 2690 { 2691 struct scsi_device *devp; 2692 int rval; 2693 #ifndef XPV_HVM_DRIVER 2694 int instance = ddi_get_instance(devi); 2695 #endif /* !XPV_HVM_DRIVER */ 2696 2697 /* 2698 * if it wasn't for pln, sdprobe could actually be nulldev 2699 * in the "__fibre" case. 2700 */ 2701 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) { 2702 return (DDI_PROBE_DONTCARE); 2703 } 2704 2705 devp = ddi_get_driver_private(devi); 2706 2707 if (devp == NULL) { 2708 /* Ooops... nexus driver is mis-configured... */ 2709 return (DDI_PROBE_FAILURE); 2710 } 2711 2712 #ifndef XPV_HVM_DRIVER 2713 if (ddi_get_soft_state(sd_state, instance) != NULL) { 2714 return (DDI_PROBE_PARTIAL); 2715 } 2716 #endif /* !XPV_HVM_DRIVER */ 2717 2718 /* 2719 * Call the SCSA utility probe routine to see if we actually 2720 * have a target at this SCSI nexus. 2721 */ 2722 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) { 2723 case SCSIPROBE_EXISTS: 2724 switch (devp->sd_inq->inq_dtype) { 2725 case DTYPE_DIRECT: 2726 rval = DDI_PROBE_SUCCESS; 2727 break; 2728 case DTYPE_RODIRECT: 2729 /* CDs etc. Can be removable media */ 2730 rval = DDI_PROBE_SUCCESS; 2731 break; 2732 case DTYPE_OPTICAL: 2733 /* 2734 * Rewritable optical driver HP115AA 2735 * Can also be removable media 2736 */ 2737 2738 /* 2739 * Do not attempt to bind to DTYPE_OPTICAL if 2740 * pre solaris 9 sparc sd behavior is required 2741 * 2742 * If first time through and sd_dtype_optical_bind 2743 * has not been set in /etc/system check properties 2744 */ 2745 2746 if (sd_dtype_optical_bind < 0) { 2747 sd_dtype_optical_bind = ddi_prop_get_int 2748 (DDI_DEV_T_ANY, devi, 0, 2749 "optical-device-bind", 1); 2750 } 2751 2752 if (sd_dtype_optical_bind == 0) { 2753 rval = DDI_PROBE_FAILURE; 2754 } else { 2755 rval = DDI_PROBE_SUCCESS; 2756 } 2757 break; 2758 2759 case DTYPE_NOTPRESENT: 2760 default: 2761 rval = DDI_PROBE_FAILURE; 2762 break; 2763 } 2764 break; 2765 default: 2766 rval = DDI_PROBE_PARTIAL; 2767 break; 2768 } 2769 2770 /* 2771 * This routine checks for resource allocation prior to freeing, 2772 * so it will take care of the "smart probing" case where a 2773 * scsi_probe() may or may not have been issued and will *not* 2774 * free previously-freed resources. 2775 */ 2776 scsi_unprobe(devp); 2777 return (rval); 2778 } 2779 2780 2781 /* 2782 * Function: sdinfo 2783 * 2784 * Description: This is the driver getinfo(9e) entry point function. 2785 * Given the device number, return the devinfo pointer from 2786 * the scsi_device structure or the instance number 2787 * associated with the dev_t. 2788 * 2789 * Arguments: dip - pointer to device info structure 2790 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO, 2791 * DDI_INFO_DEVT2INSTANCE) 2792 * arg - driver dev_t 2793 * resultp - user buffer for request response 2794 * 2795 * Return Code: DDI_SUCCESS 2796 * DDI_FAILURE 2797 */ 2798 /* ARGSUSED */ 2799 static int 2800 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2801 { 2802 struct sd_lun *un; 2803 dev_t dev; 2804 int instance; 2805 int error; 2806 2807 switch (infocmd) { 2808 case DDI_INFO_DEVT2DEVINFO: 2809 dev = (dev_t)arg; 2810 instance = SDUNIT(dev); 2811 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 2812 return (DDI_FAILURE); 2813 } 2814 *result = (void *) SD_DEVINFO(un); 2815 error = DDI_SUCCESS; 2816 break; 2817 case DDI_INFO_DEVT2INSTANCE: 2818 dev = (dev_t)arg; 2819 instance = SDUNIT(dev); 2820 *result = (void *)(uintptr_t)instance; 2821 error = DDI_SUCCESS; 2822 break; 2823 default: 2824 error = DDI_FAILURE; 2825 } 2826 return (error); 2827 } 2828 2829 /* 2830 * Function: sd_prop_op 2831 * 2832 * Description: This is the driver prop_op(9e) entry point function. 2833 * Return the number of blocks for the partition in question 2834 * or forward the request to the property facilities. 2835 * 2836 * Arguments: dev - device number 2837 * dip - pointer to device info structure 2838 * prop_op - property operator 2839 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent 2840 * name - pointer to property name 2841 * valuep - pointer or address of the user buffer 2842 * lengthp - property length 2843 * 2844 * Return Code: DDI_PROP_SUCCESS 2845 * DDI_PROP_NOT_FOUND 2846 * DDI_PROP_UNDEFINED 2847 * DDI_PROP_NO_MEMORY 2848 * DDI_PROP_BUF_TOO_SMALL 2849 */ 2850 2851 static int 2852 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 2853 char *name, caddr_t valuep, int *lengthp) 2854 { 2855 struct sd_lun *un; 2856 2857 if ((un = ddi_get_soft_state(sd_state, ddi_get_instance(dip))) == NULL) 2858 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 2859 name, valuep, lengthp)); 2860 2861 return (cmlb_prop_op(un->un_cmlbhandle, 2862 dev, dip, prop_op, mod_flags, name, valuep, lengthp, 2863 SDPART(dev), (void *)SD_PATH_DIRECT)); 2864 } 2865 2866 /* 2867 * The following functions are for smart probing: 2868 * sd_scsi_probe_cache_init() 2869 * sd_scsi_probe_cache_fini() 2870 * sd_scsi_clear_probe_cache() 2871 * sd_scsi_probe_with_cache() 2872 */ 2873 2874 /* 2875 * Function: sd_scsi_probe_cache_init 2876 * 2877 * Description: Initializes the probe response cache mutex and head pointer. 2878 * 2879 * Context: Kernel thread context 2880 */ 2881 2882 static void 2883 sd_scsi_probe_cache_init(void) 2884 { 2885 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL); 2886 sd_scsi_probe_cache_head = NULL; 2887 } 2888 2889 2890 /* 2891 * Function: sd_scsi_probe_cache_fini 2892 * 2893 * Description: Frees all resources associated with the probe response cache. 2894 * 2895 * Context: Kernel thread context 2896 */ 2897 2898 static void 2899 sd_scsi_probe_cache_fini(void) 2900 { 2901 struct sd_scsi_probe_cache *cp; 2902 struct sd_scsi_probe_cache *ncp; 2903 2904 /* Clean up our smart probing linked list */ 2905 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) { 2906 ncp = cp->next; 2907 kmem_free(cp, sizeof (struct sd_scsi_probe_cache)); 2908 } 2909 sd_scsi_probe_cache_head = NULL; 2910 mutex_destroy(&sd_scsi_probe_cache_mutex); 2911 } 2912 2913 2914 /* 2915 * Function: sd_scsi_clear_probe_cache 2916 * 2917 * Description: This routine clears the probe response cache. This is 2918 * done when open() returns ENXIO so that when deferred 2919 * attach is attempted (possibly after a device has been 2920 * turned on) we will retry the probe. Since we don't know 2921 * which target we failed to open, we just clear the 2922 * entire cache. 2923 * 2924 * Context: Kernel thread context 2925 */ 2926 2927 static void 2928 sd_scsi_clear_probe_cache(void) 2929 { 2930 struct sd_scsi_probe_cache *cp; 2931 int i; 2932 2933 mutex_enter(&sd_scsi_probe_cache_mutex); 2934 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2935 /* 2936 * Reset all entries to SCSIPROBE_EXISTS. This will 2937 * force probing to be performed the next time 2938 * sd_scsi_probe_with_cache is called. 2939 */ 2940 for (i = 0; i < NTARGETS_WIDE; i++) { 2941 cp->cache[i] = SCSIPROBE_EXISTS; 2942 } 2943 } 2944 mutex_exit(&sd_scsi_probe_cache_mutex); 2945 } 2946 2947 2948 /* 2949 * Function: sd_scsi_probe_with_cache 2950 * 2951 * Description: This routine implements support for a scsi device probe 2952 * with cache. The driver maintains a cache of the target 2953 * responses to scsi probes. If we get no response from a 2954 * target during a probe inquiry, we remember that, and we 2955 * avoid additional calls to scsi_probe on non-zero LUNs 2956 * on the same target until the cache is cleared. By doing 2957 * so we avoid the 1/4 sec selection timeout for nonzero 2958 * LUNs. lun0 of a target is always probed. 2959 * 2960 * Arguments: devp - Pointer to a scsi_device(9S) structure 2961 * waitfunc - indicates what the allocator routines should 2962 * do when resources are not available. This value 2963 * is passed on to scsi_probe() when that routine 2964 * is called. 2965 * 2966 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache; 2967 * otherwise the value returned by scsi_probe(9F). 2968 * 2969 * Context: Kernel thread context 2970 */ 2971 2972 static int 2973 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)()) 2974 { 2975 struct sd_scsi_probe_cache *cp; 2976 dev_info_t *pdip = ddi_get_parent(devp->sd_dev); 2977 int lun, tgt; 2978 2979 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2980 SCSI_ADDR_PROP_LUN, 0); 2981 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2982 SCSI_ADDR_PROP_TARGET, -1); 2983 2984 /* Make sure caching enabled and target in range */ 2985 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) { 2986 /* do it the old way (no cache) */ 2987 return (scsi_probe(devp, waitfn)); 2988 } 2989 2990 mutex_enter(&sd_scsi_probe_cache_mutex); 2991 2992 /* Find the cache for this scsi bus instance */ 2993 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2994 if (cp->pdip == pdip) { 2995 break; 2996 } 2997 } 2998 2999 /* If we can't find a cache for this pdip, create one */ 3000 if (cp == NULL) { 3001 int i; 3002 3003 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache), 3004 KM_SLEEP); 3005 cp->pdip = pdip; 3006 cp->next = sd_scsi_probe_cache_head; 3007 sd_scsi_probe_cache_head = cp; 3008 for (i = 0; i < NTARGETS_WIDE; i++) { 3009 cp->cache[i] = SCSIPROBE_EXISTS; 3010 } 3011 } 3012 3013 mutex_exit(&sd_scsi_probe_cache_mutex); 3014 3015 /* Recompute the cache for this target if LUN zero */ 3016 if (lun == 0) { 3017 cp->cache[tgt] = SCSIPROBE_EXISTS; 3018 } 3019 3020 /* Don't probe if cache remembers a NORESP from a previous LUN. */ 3021 if (cp->cache[tgt] != SCSIPROBE_EXISTS) { 3022 return (SCSIPROBE_NORESP); 3023 } 3024 3025 /* Do the actual probe; save & return the result */ 3026 return (cp->cache[tgt] = scsi_probe(devp, waitfn)); 3027 } 3028 3029 3030 /* 3031 * Function: sd_scsi_target_lun_init 3032 * 3033 * Description: Initializes the attached lun chain mutex and head pointer. 3034 * 3035 * Context: Kernel thread context 3036 */ 3037 3038 static void 3039 sd_scsi_target_lun_init(void) 3040 { 3041 mutex_init(&sd_scsi_target_lun_mutex, NULL, MUTEX_DRIVER, NULL); 3042 sd_scsi_target_lun_head = NULL; 3043 } 3044 3045 3046 /* 3047 * Function: sd_scsi_target_lun_fini 3048 * 3049 * Description: Frees all resources associated with the attached lun 3050 * chain 3051 * 3052 * Context: Kernel thread context 3053 */ 3054 3055 static void 3056 sd_scsi_target_lun_fini(void) 3057 { 3058 struct sd_scsi_hba_tgt_lun *cp; 3059 struct sd_scsi_hba_tgt_lun *ncp; 3060 3061 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = ncp) { 3062 ncp = cp->next; 3063 kmem_free(cp, sizeof (struct sd_scsi_hba_tgt_lun)); 3064 } 3065 sd_scsi_target_lun_head = NULL; 3066 mutex_destroy(&sd_scsi_target_lun_mutex); 3067 } 3068 3069 3070 /* 3071 * Function: sd_scsi_get_target_lun_count 3072 * 3073 * Description: This routine will check in the attached lun chain to see 3074 * how many luns are attached on the required SCSI controller 3075 * and target. Currently, some capabilities like tagged queue 3076 * are supported per target based by HBA. So all luns in a 3077 * target have the same capabilities. Based on this assumption, 3078 * sd should only set these capabilities once per target. This 3079 * function is called when sd needs to decide how many luns 3080 * already attached on a target. 3081 * 3082 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 3083 * controller device. 3084 * target - The target ID on the controller's SCSI bus. 3085 * 3086 * Return Code: The number of luns attached on the required target and 3087 * controller. 3088 * -1 if target ID is not in parallel SCSI scope or the given 3089 * dip is not in the chain. 3090 * 3091 * Context: Kernel thread context 3092 */ 3093 3094 static int 3095 sd_scsi_get_target_lun_count(dev_info_t *dip, int target) 3096 { 3097 struct sd_scsi_hba_tgt_lun *cp; 3098 3099 if ((target < 0) || (target >= NTARGETS_WIDE)) { 3100 return (-1); 3101 } 3102 3103 mutex_enter(&sd_scsi_target_lun_mutex); 3104 3105 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 3106 if (cp->pdip == dip) { 3107 break; 3108 } 3109 } 3110 3111 mutex_exit(&sd_scsi_target_lun_mutex); 3112 3113 if (cp == NULL) { 3114 return (-1); 3115 } 3116 3117 return (cp->nlun[target]); 3118 } 3119 3120 3121 /* 3122 * Function: sd_scsi_update_lun_on_target 3123 * 3124 * Description: This routine is used to update the attached lun chain when a 3125 * lun is attached or detached on a target. 3126 * 3127 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 3128 * controller device. 3129 * target - The target ID on the controller's SCSI bus. 3130 * flag - Indicate the lun is attached or detached. 3131 * 3132 * Context: Kernel thread context 3133 */ 3134 3135 static void 3136 sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag) 3137 { 3138 struct sd_scsi_hba_tgt_lun *cp; 3139 3140 mutex_enter(&sd_scsi_target_lun_mutex); 3141 3142 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 3143 if (cp->pdip == dip) { 3144 break; 3145 } 3146 } 3147 3148 if ((cp == NULL) && (flag == SD_SCSI_LUN_ATTACH)) { 3149 cp = kmem_zalloc(sizeof (struct sd_scsi_hba_tgt_lun), 3150 KM_SLEEP); 3151 cp->pdip = dip; 3152 cp->next = sd_scsi_target_lun_head; 3153 sd_scsi_target_lun_head = cp; 3154 } 3155 3156 mutex_exit(&sd_scsi_target_lun_mutex); 3157 3158 if (cp != NULL) { 3159 if (flag == SD_SCSI_LUN_ATTACH) { 3160 cp->nlun[target] ++; 3161 } else { 3162 cp->nlun[target] --; 3163 } 3164 } 3165 } 3166 3167 3168 /* 3169 * Function: sd_spin_up_unit 3170 * 3171 * Description: Issues the following commands to spin-up the device: 3172 * START STOP UNIT, and INQUIRY. 3173 * 3174 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3175 * structure for this target. 3176 * 3177 * Return Code: 0 - success 3178 * EIO - failure 3179 * EACCES - reservation conflict 3180 * 3181 * Context: Kernel thread context 3182 */ 3183 3184 static int 3185 sd_spin_up_unit(sd_ssc_t *ssc) 3186 { 3187 size_t resid = 0; 3188 int has_conflict = FALSE; 3189 uchar_t *bufaddr; 3190 int status; 3191 struct sd_lun *un; 3192 3193 ASSERT(ssc != NULL); 3194 un = ssc->ssc_un; 3195 ASSERT(un != NULL); 3196 3197 /* 3198 * Send a throwaway START UNIT command. 3199 * 3200 * If we fail on this, we don't care presently what precisely 3201 * is wrong. EMC's arrays will also fail this with a check 3202 * condition (0x2/0x4/0x3) if the device is "inactive," but 3203 * we don't want to fail the attach because it may become 3204 * "active" later. 3205 * We don't know if power condition is supported or not at 3206 * this stage, use START STOP bit. 3207 */ 3208 status = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 3209 SD_TARGET_START, SD_PATH_DIRECT); 3210 3211 if (status != 0) { 3212 if (status == EACCES) 3213 has_conflict = TRUE; 3214 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3215 } 3216 3217 /* 3218 * Send another INQUIRY command to the target. This is necessary for 3219 * non-removable media direct access devices because their INQUIRY data 3220 * may not be fully qualified until they are spun up (perhaps via the 3221 * START command above). Note: This seems to be needed for some 3222 * legacy devices only.) The INQUIRY command should succeed even if a 3223 * Reservation Conflict is present. 3224 */ 3225 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP); 3226 3227 if (sd_send_scsi_INQUIRY(ssc, bufaddr, SUN_INQSIZE, 0, 0, &resid) 3228 != 0) { 3229 kmem_free(bufaddr, SUN_INQSIZE); 3230 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 3231 return (EIO); 3232 } 3233 3234 /* 3235 * If we got enough INQUIRY data, copy it over the old INQUIRY data. 3236 * Note that this routine does not return a failure here even if the 3237 * INQUIRY command did not return any data. This is a legacy behavior. 3238 */ 3239 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) { 3240 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE); 3241 } 3242 3243 kmem_free(bufaddr, SUN_INQSIZE); 3244 3245 /* If we hit a reservation conflict above, tell the caller. */ 3246 if (has_conflict == TRUE) { 3247 return (EACCES); 3248 } 3249 3250 return (0); 3251 } 3252 3253 #ifdef _LP64 3254 /* 3255 * Function: sd_enable_descr_sense 3256 * 3257 * Description: This routine attempts to select descriptor sense format 3258 * using the Control mode page. Devices that support 64 bit 3259 * LBAs (for >2TB luns) should also implement descriptor 3260 * sense data so we will call this function whenever we see 3261 * a lun larger than 2TB. If for some reason the device 3262 * supports 64 bit LBAs but doesn't support descriptor sense 3263 * presumably the mode select will fail. Everything will 3264 * continue to work normally except that we will not get 3265 * complete sense data for commands that fail with an LBA 3266 * larger than 32 bits. 3267 * 3268 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3269 * structure for this target. 3270 * 3271 * Context: Kernel thread context only 3272 */ 3273 3274 static void 3275 sd_enable_descr_sense(sd_ssc_t *ssc) 3276 { 3277 uchar_t *header; 3278 struct mode_control_scsi3 *ctrl_bufp; 3279 size_t buflen; 3280 size_t bd_len; 3281 int status; 3282 struct sd_lun *un; 3283 3284 ASSERT(ssc != NULL); 3285 un = ssc->ssc_un; 3286 ASSERT(un != NULL); 3287 3288 /* 3289 * Read MODE SENSE page 0xA, Control Mode Page 3290 */ 3291 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH + 3292 sizeof (struct mode_control_scsi3); 3293 header = kmem_zalloc(buflen, KM_SLEEP); 3294 3295 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 3296 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT); 3297 3298 if (status != 0) { 3299 SD_ERROR(SD_LOG_COMMON, un, 3300 "sd_enable_descr_sense: mode sense ctrl page failed\n"); 3301 goto eds_exit; 3302 } 3303 3304 /* 3305 * Determine size of Block Descriptors in order to locate 3306 * the mode page data. ATAPI devices return 0, SCSI devices 3307 * should return MODE_BLK_DESC_LENGTH. 3308 */ 3309 bd_len = ((struct mode_header *)header)->bdesc_length; 3310 3311 /* Clear the mode data length field for MODE SELECT */ 3312 ((struct mode_header *)header)->length = 0; 3313 3314 ctrl_bufp = (struct mode_control_scsi3 *) 3315 (header + MODE_HEADER_LENGTH + bd_len); 3316 3317 /* 3318 * If the page length is smaller than the expected value, 3319 * the target device doesn't support D_SENSE. Bail out here. 3320 */ 3321 if (ctrl_bufp->mode_page.length < 3322 sizeof (struct mode_control_scsi3) - 2) { 3323 SD_ERROR(SD_LOG_COMMON, un, 3324 "sd_enable_descr_sense: enable D_SENSE failed\n"); 3325 goto eds_exit; 3326 } 3327 3328 /* 3329 * Clear PS bit for MODE SELECT 3330 */ 3331 ctrl_bufp->mode_page.ps = 0; 3332 3333 /* 3334 * Set D_SENSE to enable descriptor sense format. 3335 */ 3336 ctrl_bufp->d_sense = 1; 3337 3338 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3339 3340 /* 3341 * Use MODE SELECT to commit the change to the D_SENSE bit 3342 */ 3343 status = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, header, 3344 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT); 3345 3346 if (status != 0) { 3347 SD_INFO(SD_LOG_COMMON, un, 3348 "sd_enable_descr_sense: mode select ctrl page failed\n"); 3349 } else { 3350 kmem_free(header, buflen); 3351 return; 3352 } 3353 3354 eds_exit: 3355 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3356 kmem_free(header, buflen); 3357 } 3358 3359 /* 3360 * Function: sd_reenable_dsense_task 3361 * 3362 * Description: Re-enable descriptor sense after device or bus reset 3363 * 3364 * Context: Executes in a taskq() thread context 3365 */ 3366 static void 3367 sd_reenable_dsense_task(void *arg) 3368 { 3369 struct sd_lun *un = arg; 3370 sd_ssc_t *ssc; 3371 3372 ASSERT(un != NULL); 3373 3374 ssc = sd_ssc_init(un); 3375 sd_enable_descr_sense(ssc); 3376 sd_ssc_fini(ssc); 3377 } 3378 #endif /* _LP64 */ 3379 3380 /* 3381 * Function: sd_set_mmc_caps 3382 * 3383 * Description: This routine determines if the device is MMC compliant and if 3384 * the device supports CDDA via a mode sense of the CDVD 3385 * capabilities mode page. Also checks if the device is a 3386 * dvdram writable device. 3387 * 3388 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3389 * structure for this target. 3390 * 3391 * Context: Kernel thread context only 3392 */ 3393 3394 static void 3395 sd_set_mmc_caps(sd_ssc_t *ssc) 3396 { 3397 struct mode_header_grp2 *sense_mhp; 3398 uchar_t *sense_page; 3399 caddr_t buf; 3400 int bd_len; 3401 int status; 3402 struct uscsi_cmd com; 3403 int rtn; 3404 uchar_t *out_data_rw, *out_data_hd; 3405 uchar_t *rqbuf_rw, *rqbuf_hd; 3406 struct sd_lun *un; 3407 3408 ASSERT(ssc != NULL); 3409 un = ssc->ssc_un; 3410 ASSERT(un != NULL); 3411 3412 /* 3413 * The flags which will be set in this function are - mmc compliant, 3414 * dvdram writable device, cdda support. Initialize them to FALSE 3415 * and if a capability is detected - it will be set to TRUE. 3416 */ 3417 un->un_f_mmc_cap = FALSE; 3418 un->un_f_dvdram_writable_device = FALSE; 3419 un->un_f_cfg_cdda = FALSE; 3420 3421 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3422 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf, 3423 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 3424 3425 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3426 3427 if (status != 0) { 3428 /* command failed; just return */ 3429 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3430 return; 3431 } 3432 /* 3433 * If the mode sense request for the CDROM CAPABILITIES 3434 * page (0x2A) succeeds the device is assumed to be MMC. 3435 */ 3436 un->un_f_mmc_cap = TRUE; 3437 3438 /* Get to the page data */ 3439 sense_mhp = (struct mode_header_grp2 *)buf; 3440 bd_len = (sense_mhp->bdesc_length_hi << 8) | 3441 sense_mhp->bdesc_length_lo; 3442 if (bd_len > MODE_BLK_DESC_LENGTH) { 3443 /* 3444 * We did not get back the expected block descriptor 3445 * length so we cannot determine if the device supports 3446 * CDDA. However, we still indicate the device is MMC 3447 * according to the successful response to the page 3448 * 0x2A mode sense request. 3449 */ 3450 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3451 "sd_set_mmc_caps: Mode Sense returned " 3452 "invalid block descriptor length\n"); 3453 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3454 return; 3455 } 3456 3457 /* See if read CDDA is supported */ 3458 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + 3459 bd_len); 3460 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE; 3461 3462 /* See if writing DVD RAM is supported. */ 3463 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE; 3464 if (un->un_f_dvdram_writable_device == TRUE) { 3465 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3466 return; 3467 } 3468 3469 /* 3470 * If the device presents DVD or CD capabilities in the mode 3471 * page, we can return here since a RRD will not have 3472 * these capabilities. 3473 */ 3474 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3475 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3476 return; 3477 } 3478 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3479 3480 /* 3481 * If un->un_f_dvdram_writable_device is still FALSE, 3482 * check for a Removable Rigid Disk (RRD). A RRD 3483 * device is identified by the features RANDOM_WRITABLE and 3484 * HARDWARE_DEFECT_MANAGEMENT. 3485 */ 3486 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3487 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3488 3489 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw, 3490 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3491 RANDOM_WRITABLE, SD_PATH_STANDARD); 3492 3493 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3494 3495 if (rtn != 0) { 3496 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3497 kmem_free(rqbuf_rw, SENSE_LENGTH); 3498 return; 3499 } 3500 3501 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3502 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3503 3504 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd, 3505 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3506 HARDWARE_DEFECT_MANAGEMENT, SD_PATH_STANDARD); 3507 3508 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3509 3510 if (rtn == 0) { 3511 /* 3512 * We have good information, check for random writable 3513 * and hardware defect features. 3514 */ 3515 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3516 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) { 3517 un->un_f_dvdram_writable_device = TRUE; 3518 } 3519 } 3520 3521 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3522 kmem_free(rqbuf_rw, SENSE_LENGTH); 3523 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3524 kmem_free(rqbuf_hd, SENSE_LENGTH); 3525 } 3526 3527 /* 3528 * Function: sd_check_for_writable_cd 3529 * 3530 * Description: This routine determines if the media in the device is 3531 * writable or not. It uses the get configuration command (0x46) 3532 * to determine if the media is writable 3533 * 3534 * Arguments: un - driver soft state (unit) structure 3535 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" 3536 * chain and the normal command waitq, or 3537 * SD_PATH_DIRECT_PRIORITY to use the USCSI 3538 * "direct" chain and bypass the normal command 3539 * waitq. 3540 * 3541 * Context: Never called at interrupt context. 3542 */ 3543 3544 static void 3545 sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag) 3546 { 3547 struct uscsi_cmd com; 3548 uchar_t *out_data; 3549 uchar_t *rqbuf; 3550 int rtn; 3551 uchar_t *out_data_rw, *out_data_hd; 3552 uchar_t *rqbuf_rw, *rqbuf_hd; 3553 struct mode_header_grp2 *sense_mhp; 3554 uchar_t *sense_page; 3555 caddr_t buf; 3556 int bd_len; 3557 int status; 3558 struct sd_lun *un; 3559 3560 ASSERT(ssc != NULL); 3561 un = ssc->ssc_un; 3562 ASSERT(un != NULL); 3563 ASSERT(mutex_owned(SD_MUTEX(un))); 3564 3565 /* 3566 * Initialize the writable media to false, if configuration info. 3567 * tells us otherwise then only we will set it. 3568 */ 3569 un->un_f_mmc_writable_media = FALSE; 3570 mutex_exit(SD_MUTEX(un)); 3571 3572 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 3573 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3574 3575 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, SENSE_LENGTH, 3576 out_data, SD_PROFILE_HEADER_LEN, path_flag); 3577 3578 if (rtn != 0) 3579 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3580 3581 mutex_enter(SD_MUTEX(un)); 3582 if (rtn == 0) { 3583 /* 3584 * We have good information, check for writable DVD. 3585 */ 3586 if ((out_data[6] == 0) && (out_data[7] == 0x12)) { 3587 un->un_f_mmc_writable_media = TRUE; 3588 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3589 kmem_free(rqbuf, SENSE_LENGTH); 3590 return; 3591 } 3592 } 3593 3594 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3595 kmem_free(rqbuf, SENSE_LENGTH); 3596 3597 /* 3598 * Determine if this is a RRD type device. 3599 */ 3600 mutex_exit(SD_MUTEX(un)); 3601 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3602 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf, 3603 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, path_flag); 3604 3605 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3606 3607 mutex_enter(SD_MUTEX(un)); 3608 if (status != 0) { 3609 /* command failed; just return */ 3610 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3611 return; 3612 } 3613 3614 /* Get to the page data */ 3615 sense_mhp = (struct mode_header_grp2 *)buf; 3616 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 3617 if (bd_len > MODE_BLK_DESC_LENGTH) { 3618 /* 3619 * We did not get back the expected block descriptor length so 3620 * we cannot check the mode page. 3621 */ 3622 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3623 "sd_check_for_writable_cd: Mode Sense returned " 3624 "invalid block descriptor length\n"); 3625 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3626 return; 3627 } 3628 3629 /* 3630 * If the device presents DVD or CD capabilities in the mode 3631 * page, we can return here since a RRD device will not have 3632 * these capabilities. 3633 */ 3634 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len); 3635 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3636 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3637 return; 3638 } 3639 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3640 3641 /* 3642 * If un->un_f_mmc_writable_media is still FALSE, 3643 * check for RRD type media. A RRD device is identified 3644 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT. 3645 */ 3646 mutex_exit(SD_MUTEX(un)); 3647 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3648 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3649 3650 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw, 3651 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3652 RANDOM_WRITABLE, path_flag); 3653 3654 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3655 if (rtn != 0) { 3656 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3657 kmem_free(rqbuf_rw, SENSE_LENGTH); 3658 mutex_enter(SD_MUTEX(un)); 3659 return; 3660 } 3661 3662 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3663 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3664 3665 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd, 3666 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3667 HARDWARE_DEFECT_MANAGEMENT, path_flag); 3668 3669 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3670 mutex_enter(SD_MUTEX(un)); 3671 if (rtn == 0) { 3672 /* 3673 * We have good information, check for random writable 3674 * and hardware defect features as current. 3675 */ 3676 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3677 (out_data_rw[10] & 0x1) && 3678 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) && 3679 (out_data_hd[10] & 0x1)) { 3680 un->un_f_mmc_writable_media = TRUE; 3681 } 3682 } 3683 3684 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3685 kmem_free(rqbuf_rw, SENSE_LENGTH); 3686 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3687 kmem_free(rqbuf_hd, SENSE_LENGTH); 3688 } 3689 3690 /* 3691 * Function: sd_read_unit_properties 3692 * 3693 * Description: The following implements a property lookup mechanism. 3694 * Properties for particular disks (keyed on vendor, model 3695 * and rev numbers) are sought in the sd.conf file via 3696 * sd_process_sdconf_file(), and if not found there, are 3697 * looked for in a list hardcoded in this driver via 3698 * sd_process_sdconf_table() Once located the properties 3699 * are used to update the driver unit structure. 3700 * 3701 * Arguments: un - driver soft state (unit) structure 3702 */ 3703 3704 static void 3705 sd_read_unit_properties(struct sd_lun *un) 3706 { 3707 /* 3708 * sd_process_sdconf_file returns SD_FAILURE if it cannot find 3709 * the "sd-config-list" property (from the sd.conf file) or if 3710 * there was not a match for the inquiry vid/pid. If this event 3711 * occurs the static driver configuration table is searched for 3712 * a match. 3713 */ 3714 ASSERT(un != NULL); 3715 if (sd_process_sdconf_file(un) == SD_FAILURE) { 3716 sd_process_sdconf_table(un); 3717 } 3718 3719 /* check for LSI device */ 3720 sd_is_lsi(un); 3721 3722 3723 } 3724 3725 3726 /* 3727 * Function: sd_process_sdconf_file 3728 * 3729 * Description: Use ddi_prop_lookup(9F) to obtain the properties from the 3730 * driver's config file (ie, sd.conf) and update the driver 3731 * soft state structure accordingly. 3732 * 3733 * Arguments: un - driver soft state (unit) structure 3734 * 3735 * Return Code: SD_SUCCESS - The properties were successfully set according 3736 * to the driver configuration file. 3737 * SD_FAILURE - The driver config list was not obtained or 3738 * there was no vid/pid match. This indicates that 3739 * the static config table should be used. 3740 * 3741 * The config file has a property, "sd-config-list". Currently we support 3742 * two kinds of formats. For both formats, the value of this property 3743 * is a list of duplets: 3744 * 3745 * sd-config-list= 3746 * <duplet>, 3747 * [,<duplet>]*; 3748 * 3749 * For the improved format, where 3750 * 3751 * <duplet>:= "<vid+pid>","<tunable-list>" 3752 * 3753 * and 3754 * 3755 * <tunable-list>:= <tunable> [, <tunable> ]*; 3756 * <tunable> = <name> : <value> 3757 * 3758 * The <vid+pid> is the string that is returned by the target device on a 3759 * SCSI inquiry command, the <tunable-list> contains one or more tunables 3760 * to apply to all target devices with the specified <vid+pid>. 3761 * 3762 * Each <tunable> is a "<name> : <value>" pair. 3763 * 3764 * For the old format, the structure of each duplet is as follows: 3765 * 3766 * <duplet>:= "<vid+pid>","<data-property-name_list>" 3767 * 3768 * The first entry of the duplet is the device ID string (the concatenated 3769 * vid & pid; not to be confused with a device_id). This is defined in 3770 * the same way as in the sd_disk_table. 3771 * 3772 * The second part of the duplet is a string that identifies a 3773 * data-property-name-list. The data-property-name-list is defined as 3774 * follows: 3775 * 3776 * <data-property-name-list>:=<data-property-name> [<data-property-name>] 3777 * 3778 * The syntax of <data-property-name> depends on the <version> field. 3779 * 3780 * If version = SD_CONF_VERSION_1 we have the following syntax: 3781 * 3782 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3783 * 3784 * where the prop0 value will be used to set prop0 if bit0 set in the 3785 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1 3786 * 3787 */ 3788 3789 static int 3790 sd_process_sdconf_file(struct sd_lun *un) 3791 { 3792 char **config_list = NULL; 3793 uint_t nelements; 3794 char *vidptr; 3795 int vidlen; 3796 char *dnlist_ptr; 3797 char *dataname_ptr; 3798 char *dataname_lasts; 3799 int *data_list = NULL; 3800 uint_t data_list_len; 3801 int rval = SD_FAILURE; 3802 int i; 3803 3804 ASSERT(un != NULL); 3805 3806 /* Obtain the configuration list associated with the .conf file */ 3807 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, SD_DEVINFO(un), 3808 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, sd_config_list, 3809 &config_list, &nelements) != DDI_PROP_SUCCESS) { 3810 return (SD_FAILURE); 3811 } 3812 3813 /* 3814 * Compare vids in each duplet to the inquiry vid - if a match is 3815 * made, get the data value and update the soft state structure 3816 * accordingly. 3817 * 3818 * Each duplet should show as a pair of strings, return SD_FAILURE 3819 * otherwise. 3820 */ 3821 if (nelements & 1) { 3822 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3823 "sd-config-list should show as pairs of strings.\n"); 3824 if (config_list) 3825 ddi_prop_free(config_list); 3826 return (SD_FAILURE); 3827 } 3828 3829 for (i = 0; i < nelements; i += 2) { 3830 /* 3831 * Note: The assumption here is that each vid entry is on 3832 * a unique line from its associated duplet. 3833 */ 3834 vidptr = config_list[i]; 3835 vidlen = (int)strlen(vidptr); 3836 if ((vidlen == 0) || 3837 (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS)) { 3838 continue; 3839 } 3840 3841 /* 3842 * dnlist contains 1 or more blank separated 3843 * data-property-name entries 3844 */ 3845 dnlist_ptr = config_list[i + 1]; 3846 3847 if (strchr(dnlist_ptr, ':') != NULL) { 3848 /* 3849 * Decode the improved format sd-config-list. 3850 */ 3851 sd_nvpair_str_decode(un, dnlist_ptr); 3852 } else { 3853 /* 3854 * The old format sd-config-list, loop through all 3855 * data-property-name entries in the 3856 * data-property-name-list 3857 * setting the properties for each. 3858 */ 3859 for (dataname_ptr = sd_strtok_r(dnlist_ptr, " \t", 3860 &dataname_lasts); dataname_ptr != NULL; 3861 dataname_ptr = sd_strtok_r(NULL, " \t", 3862 &dataname_lasts)) { 3863 int version; 3864 3865 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3866 "sd_process_sdconf_file: disk:%s, " 3867 "data:%s\n", vidptr, dataname_ptr); 3868 3869 /* Get the data list */ 3870 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 3871 SD_DEVINFO(un), 0, dataname_ptr, &data_list, 3872 &data_list_len) != DDI_PROP_SUCCESS) { 3873 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3874 "sd_process_sdconf_file: data " 3875 "property (%s) has no value\n", 3876 dataname_ptr); 3877 continue; 3878 } 3879 3880 version = data_list[0]; 3881 3882 if (version == SD_CONF_VERSION_1) { 3883 sd_tunables values; 3884 3885 /* Set the properties */ 3886 if (sd_chk_vers1_data(un, data_list[1], 3887 &data_list[2], data_list_len, 3888 dataname_ptr) == SD_SUCCESS) { 3889 sd_get_tunables_from_conf(un, 3890 data_list[1], &data_list[2], 3891 &values); 3892 sd_set_vers1_properties(un, 3893 data_list[1], &values); 3894 rval = SD_SUCCESS; 3895 } else { 3896 rval = SD_FAILURE; 3897 } 3898 } else { 3899 scsi_log(SD_DEVINFO(un), sd_label, 3900 CE_WARN, "data property %s version " 3901 "0x%x is invalid.", 3902 dataname_ptr, version); 3903 rval = SD_FAILURE; 3904 } 3905 if (data_list) 3906 ddi_prop_free(data_list); 3907 } 3908 } 3909 } 3910 3911 /* free up the memory allocated by ddi_prop_lookup_string_array(). */ 3912 if (config_list) { 3913 ddi_prop_free(config_list); 3914 } 3915 3916 return (rval); 3917 } 3918 3919 /* 3920 * Function: sd_nvpair_str_decode() 3921 * 3922 * Description: Parse the improved format sd-config-list to get 3923 * each entry of tunable, which includes a name-value pair. 3924 * Then call sd_set_properties() to set the property. 3925 * 3926 * Arguments: un - driver soft state (unit) structure 3927 * nvpair_str - the tunable list 3928 */ 3929 static void 3930 sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str) 3931 { 3932 char *nv, *name, *value, *token; 3933 char *nv_lasts, *v_lasts, *x_lasts; 3934 3935 for (nv = sd_strtok_r(nvpair_str, ",", &nv_lasts); nv != NULL; 3936 nv = sd_strtok_r(NULL, ",", &nv_lasts)) { 3937 token = sd_strtok_r(nv, ":", &v_lasts); 3938 name = sd_strtok_r(token, " \t", &x_lasts); 3939 token = sd_strtok_r(NULL, ":", &v_lasts); 3940 value = sd_strtok_r(token, " \t", &x_lasts); 3941 if (name == NULL || value == NULL) { 3942 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3943 "sd_nvpair_str_decode: " 3944 "name or value is not valid!\n"); 3945 } else { 3946 sd_set_properties(un, name, value); 3947 } 3948 } 3949 } 3950 3951 /* 3952 * Function: sd_strtok_r() 3953 * 3954 * Description: This function uses strpbrk and strspn to break 3955 * string into tokens on sequentially subsequent calls. Return 3956 * NULL when no non-separator characters remain. The first 3957 * argument is NULL for subsequent calls. 3958 */ 3959 static char * 3960 sd_strtok_r(char *string, const char *sepset, char **lasts) 3961 { 3962 char *q, *r; 3963 3964 /* First or subsequent call */ 3965 if (string == NULL) 3966 string = *lasts; 3967 3968 if (string == NULL) 3969 return (NULL); 3970 3971 /* Skip leading separators */ 3972 q = string + strspn(string, sepset); 3973 3974 if (*q == '\0') 3975 return (NULL); 3976 3977 if ((r = strpbrk(q, sepset)) == NULL) 3978 *lasts = NULL; 3979 else { 3980 *r = '\0'; 3981 *lasts = r + 1; 3982 } 3983 return (q); 3984 } 3985 3986 /* 3987 * Function: sd_set_properties() 3988 * 3989 * Description: Set device properties based on the improved 3990 * format sd-config-list. 3991 * 3992 * Arguments: un - driver soft state (unit) structure 3993 * name - supported tunable name 3994 * value - tunable value 3995 */ 3996 static void 3997 sd_set_properties(struct sd_lun *un, char *name, char *value) 3998 { 3999 char *endptr = NULL; 4000 long val = 0; 4001 4002 if (strcasecmp(name, "cache-nonvolatile") == 0) { 4003 if (strcasecmp(value, "true") == 0) { 4004 un->un_f_suppress_cache_flush = TRUE; 4005 } else if (strcasecmp(value, "false") == 0) { 4006 un->un_f_suppress_cache_flush = FALSE; 4007 } else { 4008 goto value_invalid; 4009 } 4010 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4011 "suppress_cache_flush flag set to %d\n", 4012 un->un_f_suppress_cache_flush); 4013 return; 4014 } 4015 4016 if (strcasecmp(name, "controller-type") == 0) { 4017 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4018 un->un_ctype = val; 4019 } else { 4020 goto value_invalid; 4021 } 4022 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4023 "ctype set to %d\n", un->un_ctype); 4024 return; 4025 } 4026 4027 if (strcasecmp(name, "delay-busy") == 0) { 4028 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4029 un->un_busy_timeout = drv_usectohz(val / 1000); 4030 } else { 4031 goto value_invalid; 4032 } 4033 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4034 "busy_timeout set to %d\n", un->un_busy_timeout); 4035 return; 4036 } 4037 4038 if (strcasecmp(name, "disksort") == 0) { 4039 if (strcasecmp(value, "true") == 0) { 4040 un->un_f_disksort_disabled = FALSE; 4041 } else if (strcasecmp(value, "false") == 0) { 4042 un->un_f_disksort_disabled = TRUE; 4043 } else { 4044 goto value_invalid; 4045 } 4046 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4047 "disksort disabled flag set to %d\n", 4048 un->un_f_disksort_disabled); 4049 return; 4050 } 4051 4052 if (strcasecmp(name, "power-condition") == 0) { 4053 if (strcasecmp(value, "true") == 0) { 4054 un->un_f_power_condition_disabled = FALSE; 4055 } else if (strcasecmp(value, "false") == 0) { 4056 un->un_f_power_condition_disabled = TRUE; 4057 } else { 4058 goto value_invalid; 4059 } 4060 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4061 "power condition disabled flag set to %d\n", 4062 un->un_f_power_condition_disabled); 4063 return; 4064 } 4065 4066 if (strcasecmp(name, "timeout-releasereservation") == 0) { 4067 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4068 un->un_reserve_release_time = val; 4069 } else { 4070 goto value_invalid; 4071 } 4072 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4073 "reservation release timeout set to %d\n", 4074 un->un_reserve_release_time); 4075 return; 4076 } 4077 4078 if (strcasecmp(name, "reset-lun") == 0) { 4079 if (strcasecmp(value, "true") == 0) { 4080 un->un_f_lun_reset_enabled = TRUE; 4081 } else if (strcasecmp(value, "false") == 0) { 4082 un->un_f_lun_reset_enabled = FALSE; 4083 } else { 4084 goto value_invalid; 4085 } 4086 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4087 "lun reset enabled flag set to %d\n", 4088 un->un_f_lun_reset_enabled); 4089 return; 4090 } 4091 4092 if (strcasecmp(name, "retries-busy") == 0) { 4093 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4094 un->un_busy_retry_count = val; 4095 } else { 4096 goto value_invalid; 4097 } 4098 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4099 "busy retry count set to %d\n", un->un_busy_retry_count); 4100 return; 4101 } 4102 4103 if (strcasecmp(name, "retries-timeout") == 0) { 4104 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4105 un->un_retry_count = val; 4106 } else { 4107 goto value_invalid; 4108 } 4109 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4110 "timeout retry count set to %d\n", un->un_retry_count); 4111 return; 4112 } 4113 4114 if (strcasecmp(name, "retries-notready") == 0) { 4115 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4116 un->un_notready_retry_count = val; 4117 } else { 4118 goto value_invalid; 4119 } 4120 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4121 "notready retry count set to %d\n", 4122 un->un_notready_retry_count); 4123 return; 4124 } 4125 4126 if (strcasecmp(name, "retries-reset") == 0) { 4127 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4128 un->un_reset_retry_count = val; 4129 } else { 4130 goto value_invalid; 4131 } 4132 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4133 "reset retry count set to %d\n", 4134 un->un_reset_retry_count); 4135 return; 4136 } 4137 4138 if (strcasecmp(name, "throttle-max") == 0) { 4139 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4140 un->un_saved_throttle = un->un_throttle = val; 4141 } else { 4142 goto value_invalid; 4143 } 4144 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4145 "throttle set to %d\n", un->un_throttle); 4146 } 4147 4148 if (strcasecmp(name, "throttle-min") == 0) { 4149 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4150 un->un_min_throttle = val; 4151 } else { 4152 goto value_invalid; 4153 } 4154 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4155 "min throttle set to %d\n", un->un_min_throttle); 4156 } 4157 4158 if (strcasecmp(name, "rmw-type") == 0) { 4159 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4160 un->un_f_rmw_type = val; 4161 } else { 4162 goto value_invalid; 4163 } 4164 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4165 "RMW type set to %d\n", un->un_f_rmw_type); 4166 } 4167 4168 /* 4169 * Validate the throttle values. 4170 * If any of the numbers are invalid, set everything to defaults. 4171 */ 4172 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4173 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4174 (un->un_min_throttle > un->un_throttle)) { 4175 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4176 un->un_min_throttle = sd_min_throttle; 4177 } 4178 return; 4179 4180 value_invalid: 4181 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4182 "value of prop %s is invalid\n", name); 4183 } 4184 4185 /* 4186 * Function: sd_get_tunables_from_conf() 4187 * 4188 * 4189 * This function reads the data list from the sd.conf file and pulls 4190 * the values that can have numeric values as arguments and places 4191 * the values in the appropriate sd_tunables member. 4192 * Since the order of the data list members varies across platforms 4193 * This function reads them from the data list in a platform specific 4194 * order and places them into the correct sd_tunable member that is 4195 * consistent across all platforms. 4196 */ 4197 static void 4198 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list, 4199 sd_tunables *values) 4200 { 4201 int i; 4202 int mask; 4203 4204 bzero(values, sizeof (sd_tunables)); 4205 4206 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 4207 4208 mask = 1 << i; 4209 if (mask > flags) { 4210 break; 4211 } 4212 4213 switch (mask & flags) { 4214 case 0: /* This mask bit not set in flags */ 4215 continue; 4216 case SD_CONF_BSET_THROTTLE: 4217 values->sdt_throttle = data_list[i]; 4218 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4219 "sd_get_tunables_from_conf: throttle = %d\n", 4220 values->sdt_throttle); 4221 break; 4222 case SD_CONF_BSET_CTYPE: 4223 values->sdt_ctype = data_list[i]; 4224 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4225 "sd_get_tunables_from_conf: ctype = %d\n", 4226 values->sdt_ctype); 4227 break; 4228 case SD_CONF_BSET_NRR_COUNT: 4229 values->sdt_not_rdy_retries = data_list[i]; 4230 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4231 "sd_get_tunables_from_conf: not_rdy_retries = %d\n", 4232 values->sdt_not_rdy_retries); 4233 break; 4234 case SD_CONF_BSET_BSY_RETRY_COUNT: 4235 values->sdt_busy_retries = data_list[i]; 4236 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4237 "sd_get_tunables_from_conf: busy_retries = %d\n", 4238 values->sdt_busy_retries); 4239 break; 4240 case SD_CONF_BSET_RST_RETRIES: 4241 values->sdt_reset_retries = data_list[i]; 4242 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4243 "sd_get_tunables_from_conf: reset_retries = %d\n", 4244 values->sdt_reset_retries); 4245 break; 4246 case SD_CONF_BSET_RSV_REL_TIME: 4247 values->sdt_reserv_rel_time = data_list[i]; 4248 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4249 "sd_get_tunables_from_conf: reserv_rel_time = %d\n", 4250 values->sdt_reserv_rel_time); 4251 break; 4252 case SD_CONF_BSET_MIN_THROTTLE: 4253 values->sdt_min_throttle = data_list[i]; 4254 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4255 "sd_get_tunables_from_conf: min_throttle = %d\n", 4256 values->sdt_min_throttle); 4257 break; 4258 case SD_CONF_BSET_DISKSORT_DISABLED: 4259 values->sdt_disk_sort_dis = data_list[i]; 4260 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4261 "sd_get_tunables_from_conf: disk_sort_dis = %d\n", 4262 values->sdt_disk_sort_dis); 4263 break; 4264 case SD_CONF_BSET_LUN_RESET_ENABLED: 4265 values->sdt_lun_reset_enable = data_list[i]; 4266 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4267 "sd_get_tunables_from_conf: lun_reset_enable = %d" 4268 "\n", values->sdt_lun_reset_enable); 4269 break; 4270 case SD_CONF_BSET_CACHE_IS_NV: 4271 values->sdt_suppress_cache_flush = data_list[i]; 4272 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4273 "sd_get_tunables_from_conf: \ 4274 suppress_cache_flush = %d" 4275 "\n", values->sdt_suppress_cache_flush); 4276 break; 4277 case SD_CONF_BSET_PC_DISABLED: 4278 values->sdt_disk_sort_dis = data_list[i]; 4279 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4280 "sd_get_tunables_from_conf: power_condition_dis = " 4281 "%d\n", values->sdt_power_condition_dis); 4282 break; 4283 } 4284 } 4285 } 4286 4287 /* 4288 * Function: sd_process_sdconf_table 4289 * 4290 * Description: Search the static configuration table for a match on the 4291 * inquiry vid/pid and update the driver soft state structure 4292 * according to the table property values for the device. 4293 * 4294 * The form of a configuration table entry is: 4295 * <vid+pid>,<flags>,<property-data> 4296 * "SEAGATE ST42400N",1,0x40000, 4297 * 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1; 4298 * 4299 * Arguments: un - driver soft state (unit) structure 4300 */ 4301 4302 static void 4303 sd_process_sdconf_table(struct sd_lun *un) 4304 { 4305 char *id = NULL; 4306 int table_index; 4307 int idlen; 4308 4309 ASSERT(un != NULL); 4310 for (table_index = 0; table_index < sd_disk_table_size; 4311 table_index++) { 4312 id = sd_disk_table[table_index].device_id; 4313 idlen = strlen(id); 4314 if (idlen == 0) { 4315 continue; 4316 } 4317 4318 /* 4319 * The static configuration table currently does not 4320 * implement version 10 properties. Additionally, 4321 * multiple data-property-name entries are not 4322 * implemented in the static configuration table. 4323 */ 4324 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4325 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4326 "sd_process_sdconf_table: disk %s\n", id); 4327 sd_set_vers1_properties(un, 4328 sd_disk_table[table_index].flags, 4329 sd_disk_table[table_index].properties); 4330 break; 4331 } 4332 } 4333 } 4334 4335 4336 /* 4337 * Function: sd_sdconf_id_match 4338 * 4339 * Description: This local function implements a case sensitive vid/pid 4340 * comparison as well as the boundary cases of wild card and 4341 * multiple blanks. 4342 * 4343 * Note: An implicit assumption made here is that the scsi 4344 * inquiry structure will always keep the vid, pid and 4345 * revision strings in consecutive sequence, so they can be 4346 * read as a single string. If this assumption is not the 4347 * case, a separate string, to be used for the check, needs 4348 * to be built with these strings concatenated. 4349 * 4350 * Arguments: un - driver soft state (unit) structure 4351 * id - table or config file vid/pid 4352 * idlen - length of the vid/pid (bytes) 4353 * 4354 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 4355 * SD_FAILURE - Indicates no match with the inquiry vid/pid 4356 */ 4357 4358 static int 4359 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen) 4360 { 4361 struct scsi_inquiry *sd_inq; 4362 int rval = SD_SUCCESS; 4363 4364 ASSERT(un != NULL); 4365 sd_inq = un->un_sd->sd_inq; 4366 ASSERT(id != NULL); 4367 4368 /* 4369 * We use the inq_vid as a pointer to a buffer containing the 4370 * vid and pid and use the entire vid/pid length of the table 4371 * entry for the comparison. This works because the inq_pid 4372 * data member follows inq_vid in the scsi_inquiry structure. 4373 */ 4374 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) { 4375 /* 4376 * The user id string is compared to the inquiry vid/pid 4377 * using a case insensitive comparison and ignoring 4378 * multiple spaces. 4379 */ 4380 rval = sd_blank_cmp(un, id, idlen); 4381 if (rval != SD_SUCCESS) { 4382 /* 4383 * User id strings that start and end with a "*" 4384 * are a special case. These do not have a 4385 * specific vendor, and the product string can 4386 * appear anywhere in the 16 byte PID portion of 4387 * the inquiry data. This is a simple strstr() 4388 * type search for the user id in the inquiry data. 4389 */ 4390 if ((id[0] == '*') && (id[idlen - 1] == '*')) { 4391 char *pidptr = &id[1]; 4392 int i; 4393 int j; 4394 int pidstrlen = idlen - 2; 4395 j = sizeof (SD_INQUIRY(un)->inq_pid) - 4396 pidstrlen; 4397 4398 if (j < 0) { 4399 return (SD_FAILURE); 4400 } 4401 for (i = 0; i < j; i++) { 4402 if (bcmp(&SD_INQUIRY(un)->inq_pid[i], 4403 pidptr, pidstrlen) == 0) { 4404 rval = SD_SUCCESS; 4405 break; 4406 } 4407 } 4408 } 4409 } 4410 } 4411 return (rval); 4412 } 4413 4414 4415 /* 4416 * Function: sd_blank_cmp 4417 * 4418 * Description: If the id string starts and ends with a space, treat 4419 * multiple consecutive spaces as equivalent to a single 4420 * space. For example, this causes a sd_disk_table entry 4421 * of " NEC CDROM " to match a device's id string of 4422 * "NEC CDROM". 4423 * 4424 * Note: The success exit condition for this routine is if 4425 * the pointer to the table entry is '\0' and the cnt of 4426 * the inquiry length is zero. This will happen if the inquiry 4427 * string returned by the device is padded with spaces to be 4428 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The 4429 * SCSI spec states that the inquiry string is to be padded with 4430 * spaces. 4431 * 4432 * Arguments: un - driver soft state (unit) structure 4433 * id - table or config file vid/pid 4434 * idlen - length of the vid/pid (bytes) 4435 * 4436 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 4437 * SD_FAILURE - Indicates no match with the inquiry vid/pid 4438 */ 4439 4440 static int 4441 sd_blank_cmp(struct sd_lun *un, char *id, int idlen) 4442 { 4443 char *p1; 4444 char *p2; 4445 int cnt; 4446 cnt = sizeof (SD_INQUIRY(un)->inq_vid) + 4447 sizeof (SD_INQUIRY(un)->inq_pid); 4448 4449 ASSERT(un != NULL); 4450 p2 = un->un_sd->sd_inq->inq_vid; 4451 ASSERT(id != NULL); 4452 p1 = id; 4453 4454 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) { 4455 /* 4456 * Note: string p1 is terminated by a NUL but string p2 4457 * isn't. The end of p2 is determined by cnt. 4458 */ 4459 for (;;) { 4460 /* skip over any extra blanks in both strings */ 4461 while ((*p1 != '\0') && (*p1 == ' ')) { 4462 p1++; 4463 } 4464 while ((cnt != 0) && (*p2 == ' ')) { 4465 p2++; 4466 cnt--; 4467 } 4468 4469 /* compare the two strings */ 4470 if ((cnt == 0) || 4471 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) { 4472 break; 4473 } 4474 while ((cnt > 0) && 4475 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) { 4476 p1++; 4477 p2++; 4478 cnt--; 4479 } 4480 } 4481 } 4482 4483 /* return SD_SUCCESS if both strings match */ 4484 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE); 4485 } 4486 4487 4488 /* 4489 * Function: sd_chk_vers1_data 4490 * 4491 * Description: Verify the version 1 device properties provided by the 4492 * user via the configuration file 4493 * 4494 * Arguments: un - driver soft state (unit) structure 4495 * flags - integer mask indicating properties to be set 4496 * prop_list - integer list of property values 4497 * list_len - number of the elements 4498 * 4499 * Return Code: SD_SUCCESS - Indicates the user provided data is valid 4500 * SD_FAILURE - Indicates the user provided data is invalid 4501 */ 4502 4503 static int 4504 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 4505 int list_len, char *dataname_ptr) 4506 { 4507 int i; 4508 int mask = 1; 4509 int index = 0; 4510 4511 ASSERT(un != NULL); 4512 4513 /* Check for a NULL property name and list */ 4514 if (dataname_ptr == NULL) { 4515 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4516 "sd_chk_vers1_data: NULL data property name."); 4517 return (SD_FAILURE); 4518 } 4519 if (prop_list == NULL) { 4520 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4521 "sd_chk_vers1_data: %s NULL data property list.", 4522 dataname_ptr); 4523 return (SD_FAILURE); 4524 } 4525 4526 /* Display a warning if undefined bits are set in the flags */ 4527 if (flags & ~SD_CONF_BIT_MASK) { 4528 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4529 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. " 4530 "Properties not set.", 4531 (flags & ~SD_CONF_BIT_MASK), dataname_ptr); 4532 return (SD_FAILURE); 4533 } 4534 4535 /* 4536 * Verify the length of the list by identifying the highest bit set 4537 * in the flags and validating that the property list has a length 4538 * up to the index of this bit. 4539 */ 4540 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 4541 if (flags & mask) { 4542 index++; 4543 } 4544 mask = 1 << i; 4545 } 4546 if (list_len < (index + 2)) { 4547 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4548 "sd_chk_vers1_data: " 4549 "Data property list %s size is incorrect. " 4550 "Properties not set.", dataname_ptr); 4551 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: " 4552 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS); 4553 return (SD_FAILURE); 4554 } 4555 return (SD_SUCCESS); 4556 } 4557 4558 4559 /* 4560 * Function: sd_set_vers1_properties 4561 * 4562 * Description: Set version 1 device properties based on a property list 4563 * retrieved from the driver configuration file or static 4564 * configuration table. Version 1 properties have the format: 4565 * 4566 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 4567 * 4568 * where the prop0 value will be used to set prop0 if bit0 4569 * is set in the flags 4570 * 4571 * Arguments: un - driver soft state (unit) structure 4572 * flags - integer mask indicating properties to be set 4573 * prop_list - integer list of property values 4574 */ 4575 4576 static void 4577 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list) 4578 { 4579 ASSERT(un != NULL); 4580 4581 /* 4582 * Set the flag to indicate cache is to be disabled. An attempt 4583 * to disable the cache via sd_cache_control() will be made 4584 * later during attach once the basic initialization is complete. 4585 */ 4586 if (flags & SD_CONF_BSET_NOCACHE) { 4587 un->un_f_opt_disable_cache = TRUE; 4588 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4589 "sd_set_vers1_properties: caching disabled flag set\n"); 4590 } 4591 4592 /* CD-specific configuration parameters */ 4593 if (flags & SD_CONF_BSET_PLAYMSF_BCD) { 4594 un->un_f_cfg_playmsf_bcd = TRUE; 4595 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4596 "sd_set_vers1_properties: playmsf_bcd set\n"); 4597 } 4598 if (flags & SD_CONF_BSET_READSUB_BCD) { 4599 un->un_f_cfg_readsub_bcd = TRUE; 4600 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4601 "sd_set_vers1_properties: readsub_bcd set\n"); 4602 } 4603 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) { 4604 un->un_f_cfg_read_toc_trk_bcd = TRUE; 4605 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4606 "sd_set_vers1_properties: read_toc_trk_bcd set\n"); 4607 } 4608 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) { 4609 un->un_f_cfg_read_toc_addr_bcd = TRUE; 4610 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4611 "sd_set_vers1_properties: read_toc_addr_bcd set\n"); 4612 } 4613 if (flags & SD_CONF_BSET_NO_READ_HEADER) { 4614 un->un_f_cfg_no_read_header = TRUE; 4615 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4616 "sd_set_vers1_properties: no_read_header set\n"); 4617 } 4618 if (flags & SD_CONF_BSET_READ_CD_XD4) { 4619 un->un_f_cfg_read_cd_xd4 = TRUE; 4620 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4621 "sd_set_vers1_properties: read_cd_xd4 set\n"); 4622 } 4623 4624 /* Support for devices which do not have valid/unique serial numbers */ 4625 if (flags & SD_CONF_BSET_FAB_DEVID) { 4626 un->un_f_opt_fab_devid = TRUE; 4627 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4628 "sd_set_vers1_properties: fab_devid bit set\n"); 4629 } 4630 4631 /* Support for user throttle configuration */ 4632 if (flags & SD_CONF_BSET_THROTTLE) { 4633 ASSERT(prop_list != NULL); 4634 un->un_saved_throttle = un->un_throttle = 4635 prop_list->sdt_throttle; 4636 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4637 "sd_set_vers1_properties: throttle set to %d\n", 4638 prop_list->sdt_throttle); 4639 } 4640 4641 /* Set the per disk retry count according to the conf file or table. */ 4642 if (flags & SD_CONF_BSET_NRR_COUNT) { 4643 ASSERT(prop_list != NULL); 4644 if (prop_list->sdt_not_rdy_retries) { 4645 un->un_notready_retry_count = 4646 prop_list->sdt_not_rdy_retries; 4647 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4648 "sd_set_vers1_properties: not ready retry count" 4649 " set to %d\n", un->un_notready_retry_count); 4650 } 4651 } 4652 4653 /* The controller type is reported for generic disk driver ioctls */ 4654 if (flags & SD_CONF_BSET_CTYPE) { 4655 ASSERT(prop_list != NULL); 4656 switch (prop_list->sdt_ctype) { 4657 case CTYPE_CDROM: 4658 un->un_ctype = prop_list->sdt_ctype; 4659 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4660 "sd_set_vers1_properties: ctype set to " 4661 "CTYPE_CDROM\n"); 4662 break; 4663 case CTYPE_CCS: 4664 un->un_ctype = prop_list->sdt_ctype; 4665 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4666 "sd_set_vers1_properties: ctype set to " 4667 "CTYPE_CCS\n"); 4668 break; 4669 case CTYPE_ROD: /* RW optical */ 4670 un->un_ctype = prop_list->sdt_ctype; 4671 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4672 "sd_set_vers1_properties: ctype set to " 4673 "CTYPE_ROD\n"); 4674 break; 4675 default: 4676 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4677 "sd_set_vers1_properties: Could not set " 4678 "invalid ctype value (%d)", 4679 prop_list->sdt_ctype); 4680 } 4681 } 4682 4683 /* Purple failover timeout */ 4684 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) { 4685 ASSERT(prop_list != NULL); 4686 un->un_busy_retry_count = 4687 prop_list->sdt_busy_retries; 4688 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4689 "sd_set_vers1_properties: " 4690 "busy retry count set to %d\n", 4691 un->un_busy_retry_count); 4692 } 4693 4694 /* Purple reset retry count */ 4695 if (flags & SD_CONF_BSET_RST_RETRIES) { 4696 ASSERT(prop_list != NULL); 4697 un->un_reset_retry_count = 4698 prop_list->sdt_reset_retries; 4699 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4700 "sd_set_vers1_properties: " 4701 "reset retry count set to %d\n", 4702 un->un_reset_retry_count); 4703 } 4704 4705 /* Purple reservation release timeout */ 4706 if (flags & SD_CONF_BSET_RSV_REL_TIME) { 4707 ASSERT(prop_list != NULL); 4708 un->un_reserve_release_time = 4709 prop_list->sdt_reserv_rel_time; 4710 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4711 "sd_set_vers1_properties: " 4712 "reservation release timeout set to %d\n", 4713 un->un_reserve_release_time); 4714 } 4715 4716 /* 4717 * Driver flag telling the driver to verify that no commands are pending 4718 * for a device before issuing a Test Unit Ready. This is a workaround 4719 * for a firmware bug in some Seagate eliteI drives. 4720 */ 4721 if (flags & SD_CONF_BSET_TUR_CHECK) { 4722 un->un_f_cfg_tur_check = TRUE; 4723 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4724 "sd_set_vers1_properties: tur queue check set\n"); 4725 } 4726 4727 if (flags & SD_CONF_BSET_MIN_THROTTLE) { 4728 un->un_min_throttle = prop_list->sdt_min_throttle; 4729 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4730 "sd_set_vers1_properties: min throttle set to %d\n", 4731 un->un_min_throttle); 4732 } 4733 4734 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) { 4735 un->un_f_disksort_disabled = 4736 (prop_list->sdt_disk_sort_dis != 0) ? 4737 TRUE : FALSE; 4738 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4739 "sd_set_vers1_properties: disksort disabled " 4740 "flag set to %d\n", 4741 prop_list->sdt_disk_sort_dis); 4742 } 4743 4744 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) { 4745 un->un_f_lun_reset_enabled = 4746 (prop_list->sdt_lun_reset_enable != 0) ? 4747 TRUE : FALSE; 4748 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4749 "sd_set_vers1_properties: lun reset enabled " 4750 "flag set to %d\n", 4751 prop_list->sdt_lun_reset_enable); 4752 } 4753 4754 if (flags & SD_CONF_BSET_CACHE_IS_NV) { 4755 un->un_f_suppress_cache_flush = 4756 (prop_list->sdt_suppress_cache_flush != 0) ? 4757 TRUE : FALSE; 4758 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4759 "sd_set_vers1_properties: suppress_cache_flush " 4760 "flag set to %d\n", 4761 prop_list->sdt_suppress_cache_flush); 4762 } 4763 4764 if (flags & SD_CONF_BSET_PC_DISABLED) { 4765 un->un_f_power_condition_disabled = 4766 (prop_list->sdt_power_condition_dis != 0) ? 4767 TRUE : FALSE; 4768 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4769 "sd_set_vers1_properties: power_condition_disabled " 4770 "flag set to %d\n", 4771 prop_list->sdt_power_condition_dis); 4772 } 4773 4774 /* 4775 * Validate the throttle values. 4776 * If any of the numbers are invalid, set everything to defaults. 4777 */ 4778 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4779 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4780 (un->un_min_throttle > un->un_throttle)) { 4781 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4782 un->un_min_throttle = sd_min_throttle; 4783 } 4784 } 4785 4786 /* 4787 * Function: sd_is_lsi() 4788 * 4789 * Description: Check for lsi devices, step through the static device 4790 * table to match vid/pid. 4791 * 4792 * Args: un - ptr to sd_lun 4793 * 4794 * Notes: When creating new LSI property, need to add the new LSI property 4795 * to this function. 4796 */ 4797 static void 4798 sd_is_lsi(struct sd_lun *un) 4799 { 4800 char *id = NULL; 4801 int table_index; 4802 int idlen; 4803 void *prop; 4804 4805 ASSERT(un != NULL); 4806 for (table_index = 0; table_index < sd_disk_table_size; 4807 table_index++) { 4808 id = sd_disk_table[table_index].device_id; 4809 idlen = strlen(id); 4810 if (idlen == 0) { 4811 continue; 4812 } 4813 4814 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4815 prop = sd_disk_table[table_index].properties; 4816 if (prop == &lsi_properties || 4817 prop == &lsi_oem_properties || 4818 prop == &lsi_properties_scsi || 4819 prop == &symbios_properties) { 4820 un->un_f_cfg_is_lsi = TRUE; 4821 } 4822 break; 4823 } 4824 } 4825 } 4826 4827 /* 4828 * Function: sd_get_physical_geometry 4829 * 4830 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and 4831 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the 4832 * target, and use this information to initialize the physical 4833 * geometry cache specified by pgeom_p. 4834 * 4835 * MODE SENSE is an optional command, so failure in this case 4836 * does not necessarily denote an error. We want to use the 4837 * MODE SENSE commands to derive the physical geometry of the 4838 * device, but if either command fails, the logical geometry is 4839 * used as the fallback for disk label geometry in cmlb. 4840 * 4841 * This requires that un->un_blockcount and un->un_tgt_blocksize 4842 * have already been initialized for the current target and 4843 * that the current values be passed as args so that we don't 4844 * end up ever trying to use -1 as a valid value. This could 4845 * happen if either value is reset while we're not holding 4846 * the mutex. 4847 * 4848 * Arguments: un - driver soft state (unit) structure 4849 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4850 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4851 * to use the USCSI "direct" chain and bypass the normal 4852 * command waitq. 4853 * 4854 * Context: Kernel thread only (can sleep). 4855 */ 4856 4857 static int 4858 sd_get_physical_geometry(struct sd_lun *un, cmlb_geom_t *pgeom_p, 4859 diskaddr_t capacity, int lbasize, int path_flag) 4860 { 4861 struct mode_format *page3p; 4862 struct mode_geometry *page4p; 4863 struct mode_header *headerp; 4864 int sector_size; 4865 int nsect; 4866 int nhead; 4867 int ncyl; 4868 int intrlv; 4869 int spc; 4870 diskaddr_t modesense_capacity; 4871 int rpm; 4872 int bd_len; 4873 int mode_header_length; 4874 uchar_t *p3bufp; 4875 uchar_t *p4bufp; 4876 int cdbsize; 4877 int ret = EIO; 4878 sd_ssc_t *ssc; 4879 int status; 4880 4881 ASSERT(un != NULL); 4882 4883 if (lbasize == 0) { 4884 if (ISCD(un)) { 4885 lbasize = 2048; 4886 } else { 4887 lbasize = un->un_sys_blocksize; 4888 } 4889 } 4890 pgeom_p->g_secsize = (unsigned short)lbasize; 4891 4892 /* 4893 * If the unit is a cd/dvd drive MODE SENSE page three 4894 * and MODE SENSE page four are reserved (see SBC spec 4895 * and MMC spec). To prevent soft errors just return 4896 * using the default LBA size. 4897 */ 4898 if (ISCD(un)) 4899 return (ret); 4900 4901 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0; 4902 4903 /* 4904 * Retrieve MODE SENSE page 3 - Format Device Page 4905 */ 4906 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP); 4907 ssc = sd_ssc_init(un); 4908 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p3bufp, 4909 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag); 4910 if (status != 0) { 4911 SD_ERROR(SD_LOG_COMMON, un, 4912 "sd_get_physical_geometry: mode sense page 3 failed\n"); 4913 goto page3_exit; 4914 } 4915 4916 /* 4917 * Determine size of Block Descriptors in order to locate the mode 4918 * page data. ATAPI devices return 0, SCSI devices should return 4919 * MODE_BLK_DESC_LENGTH. 4920 */ 4921 headerp = (struct mode_header *)p3bufp; 4922 if (un->un_f_cfg_is_atapi == TRUE) { 4923 struct mode_header_grp2 *mhp = 4924 (struct mode_header_grp2 *)headerp; 4925 mode_header_length = MODE_HEADER_LENGTH_GRP2; 4926 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4927 } else { 4928 mode_header_length = MODE_HEADER_LENGTH; 4929 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4930 } 4931 4932 if (bd_len > MODE_BLK_DESC_LENGTH) { 4933 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 4934 "sd_get_physical_geometry: received unexpected bd_len " 4935 "of %d, page3\n", bd_len); 4936 status = EIO; 4937 goto page3_exit; 4938 } 4939 4940 page3p = (struct mode_format *) 4941 ((caddr_t)headerp + mode_header_length + bd_len); 4942 4943 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) { 4944 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 4945 "sd_get_physical_geometry: mode sense pg3 code mismatch " 4946 "%d\n", page3p->mode_page.code); 4947 status = EIO; 4948 goto page3_exit; 4949 } 4950 4951 /* 4952 * Use this physical geometry data only if BOTH MODE SENSE commands 4953 * complete successfully; otherwise, revert to the logical geometry. 4954 * So, we need to save everything in temporary variables. 4955 */ 4956 sector_size = BE_16(page3p->data_bytes_sect); 4957 4958 /* 4959 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size 4960 */ 4961 if (sector_size == 0) { 4962 sector_size = un->un_sys_blocksize; 4963 } else { 4964 sector_size &= ~(un->un_sys_blocksize - 1); 4965 } 4966 4967 nsect = BE_16(page3p->sect_track); 4968 intrlv = BE_16(page3p->interleave); 4969 4970 SD_INFO(SD_LOG_COMMON, un, 4971 "sd_get_physical_geometry: Format Parameters (page 3)\n"); 4972 SD_INFO(SD_LOG_COMMON, un, 4973 " mode page: %d; nsect: %d; sector size: %d;\n", 4974 page3p->mode_page.code, nsect, sector_size); 4975 SD_INFO(SD_LOG_COMMON, un, 4976 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv, 4977 BE_16(page3p->track_skew), 4978 BE_16(page3p->cylinder_skew)); 4979 4980 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 4981 4982 /* 4983 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page 4984 */ 4985 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP); 4986 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p4bufp, 4987 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag); 4988 if (status != 0) { 4989 SD_ERROR(SD_LOG_COMMON, un, 4990 "sd_get_physical_geometry: mode sense page 4 failed\n"); 4991 goto page4_exit; 4992 } 4993 4994 /* 4995 * Determine size of Block Descriptors in order to locate the mode 4996 * page data. ATAPI devices return 0, SCSI devices should return 4997 * MODE_BLK_DESC_LENGTH. 4998 */ 4999 headerp = (struct mode_header *)p4bufp; 5000 if (un->un_f_cfg_is_atapi == TRUE) { 5001 struct mode_header_grp2 *mhp = 5002 (struct mode_header_grp2 *)headerp; 5003 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 5004 } else { 5005 bd_len = ((struct mode_header *)headerp)->bdesc_length; 5006 } 5007 5008 if (bd_len > MODE_BLK_DESC_LENGTH) { 5009 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 5010 "sd_get_physical_geometry: received unexpected bd_len of " 5011 "%d, page4\n", bd_len); 5012 status = EIO; 5013 goto page4_exit; 5014 } 5015 5016 page4p = (struct mode_geometry *) 5017 ((caddr_t)headerp + mode_header_length + bd_len); 5018 5019 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) { 5020 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 5021 "sd_get_physical_geometry: mode sense pg4 code mismatch " 5022 "%d\n", page4p->mode_page.code); 5023 status = EIO; 5024 goto page4_exit; 5025 } 5026 5027 /* 5028 * Stash the data now, after we know that both commands completed. 5029 */ 5030 5031 5032 nhead = (int)page4p->heads; /* uchar, so no conversion needed */ 5033 spc = nhead * nsect; 5034 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb; 5035 rpm = BE_16(page4p->rpm); 5036 5037 modesense_capacity = spc * ncyl; 5038 5039 SD_INFO(SD_LOG_COMMON, un, 5040 "sd_get_physical_geometry: Geometry Parameters (page 4)\n"); 5041 SD_INFO(SD_LOG_COMMON, un, 5042 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm); 5043 SD_INFO(SD_LOG_COMMON, un, 5044 " computed capacity(h*s*c): %d;\n", modesense_capacity); 5045 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n", 5046 (void *)pgeom_p, capacity); 5047 5048 /* 5049 * Compensate if the drive's geometry is not rectangular, i.e., 5050 * the product of C * H * S returned by MODE SENSE >= that returned 5051 * by read capacity. This is an idiosyncrasy of the original x86 5052 * disk subsystem. 5053 */ 5054 if (modesense_capacity >= capacity) { 5055 SD_INFO(SD_LOG_COMMON, un, 5056 "sd_get_physical_geometry: adjusting acyl; " 5057 "old: %d; new: %d\n", pgeom_p->g_acyl, 5058 (modesense_capacity - capacity + spc - 1) / spc); 5059 if (sector_size != 0) { 5060 /* 1243403: NEC D38x7 drives don't support sec size */ 5061 pgeom_p->g_secsize = (unsigned short)sector_size; 5062 } 5063 pgeom_p->g_nsect = (unsigned short)nsect; 5064 pgeom_p->g_nhead = (unsigned short)nhead; 5065 pgeom_p->g_capacity = capacity; 5066 pgeom_p->g_acyl = 5067 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc; 5068 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl; 5069 } 5070 5071 pgeom_p->g_rpm = (unsigned short)rpm; 5072 pgeom_p->g_intrlv = (unsigned short)intrlv; 5073 ret = 0; 5074 5075 SD_INFO(SD_LOG_COMMON, un, 5076 "sd_get_physical_geometry: mode sense geometry:\n"); 5077 SD_INFO(SD_LOG_COMMON, un, 5078 " nsect: %d; sector size: %d; interlv: %d\n", 5079 nsect, sector_size, intrlv); 5080 SD_INFO(SD_LOG_COMMON, un, 5081 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n", 5082 nhead, ncyl, rpm, modesense_capacity); 5083 SD_INFO(SD_LOG_COMMON, un, 5084 "sd_get_physical_geometry: (cached)\n"); 5085 SD_INFO(SD_LOG_COMMON, un, 5086 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 5087 pgeom_p->g_ncyl, pgeom_p->g_acyl, 5088 pgeom_p->g_nhead, pgeom_p->g_nsect); 5089 SD_INFO(SD_LOG_COMMON, un, 5090 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n", 5091 pgeom_p->g_secsize, pgeom_p->g_capacity, 5092 pgeom_p->g_intrlv, pgeom_p->g_rpm); 5093 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 5094 5095 page4_exit: 5096 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH); 5097 5098 page3_exit: 5099 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH); 5100 5101 if (status != 0) { 5102 if (status == EIO) { 5103 /* 5104 * Some disks do not support mode sense(6), we 5105 * should ignore this kind of error(sense key is 5106 * 0x5 - illegal request). 5107 */ 5108 uint8_t *sensep; 5109 int senlen; 5110 5111 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 5112 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 5113 ssc->ssc_uscsi_cmd->uscsi_rqresid); 5114 5115 if (senlen > 0 && 5116 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 5117 sd_ssc_assessment(ssc, 5118 SD_FMT_IGNORE_COMPROMISE); 5119 } else { 5120 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 5121 } 5122 } else { 5123 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5124 } 5125 } 5126 sd_ssc_fini(ssc); 5127 return (ret); 5128 } 5129 5130 /* 5131 * Function: sd_get_virtual_geometry 5132 * 5133 * Description: Ask the controller to tell us about the target device. 5134 * 5135 * Arguments: un - pointer to softstate 5136 * capacity - disk capacity in #blocks 5137 * lbasize - disk block size in bytes 5138 * 5139 * Context: Kernel thread only 5140 */ 5141 5142 static int 5143 sd_get_virtual_geometry(struct sd_lun *un, cmlb_geom_t *lgeom_p, 5144 diskaddr_t capacity, int lbasize) 5145 { 5146 uint_t geombuf; 5147 int spc; 5148 5149 ASSERT(un != NULL); 5150 5151 /* Set sector size, and total number of sectors */ 5152 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1); 5153 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1); 5154 5155 /* Let the HBA tell us its geometry */ 5156 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1); 5157 5158 /* A value of -1 indicates an undefined "geometry" property */ 5159 if (geombuf == (-1)) { 5160 return (EINVAL); 5161 } 5162 5163 /* Initialize the logical geometry cache. */ 5164 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff; 5165 lgeom_p->g_nsect = geombuf & 0xffff; 5166 lgeom_p->g_secsize = un->un_sys_blocksize; 5167 5168 spc = lgeom_p->g_nhead * lgeom_p->g_nsect; 5169 5170 /* 5171 * Note: The driver originally converted the capacity value from 5172 * target blocks to system blocks. However, the capacity value passed 5173 * to this routine is already in terms of system blocks (this scaling 5174 * is done when the READ CAPACITY command is issued and processed). 5175 * This 'error' may have gone undetected because the usage of g_ncyl 5176 * (which is based upon g_capacity) is very limited within the driver 5177 */ 5178 lgeom_p->g_capacity = capacity; 5179 5180 /* 5181 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The 5182 * hba may return zero values if the device has been removed. 5183 */ 5184 if (spc == 0) { 5185 lgeom_p->g_ncyl = 0; 5186 } else { 5187 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc; 5188 } 5189 lgeom_p->g_acyl = 0; 5190 5191 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n"); 5192 return (0); 5193 5194 } 5195 /* 5196 * Function: sd_update_block_info 5197 * 5198 * Description: Calculate a byte count to sector count bitshift value 5199 * from sector size. 5200 * 5201 * Arguments: un: unit struct. 5202 * lbasize: new target sector size 5203 * capacity: new target capacity, ie. block count 5204 * 5205 * Context: Kernel thread context 5206 */ 5207 5208 static void 5209 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity) 5210 { 5211 if (lbasize != 0) { 5212 un->un_tgt_blocksize = lbasize; 5213 un->un_f_tgt_blocksize_is_valid = TRUE; 5214 if (!un->un_f_has_removable_media) { 5215 un->un_sys_blocksize = lbasize; 5216 } 5217 } 5218 5219 if (capacity != 0) { 5220 un->un_blockcount = capacity; 5221 un->un_f_blockcount_is_valid = TRUE; 5222 } 5223 } 5224 5225 5226 /* 5227 * Function: sd_register_devid 5228 * 5229 * Description: This routine will obtain the device id information from the 5230 * target, obtain the serial number, and register the device 5231 * id with the ddi framework. 5232 * 5233 * Arguments: devi - the system's dev_info_t for the device. 5234 * un - driver soft state (unit) structure 5235 * reservation_flag - indicates if a reservation conflict 5236 * occurred during attach 5237 * 5238 * Context: Kernel Thread 5239 */ 5240 static void 5241 sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi, int reservation_flag) 5242 { 5243 int rval = 0; 5244 uchar_t *inq80 = NULL; 5245 size_t inq80_len = MAX_INQUIRY_SIZE; 5246 size_t inq80_resid = 0; 5247 uchar_t *inq83 = NULL; 5248 size_t inq83_len = MAX_INQUIRY_SIZE; 5249 size_t inq83_resid = 0; 5250 int dlen, len; 5251 char *sn; 5252 struct sd_lun *un; 5253 5254 ASSERT(ssc != NULL); 5255 un = ssc->ssc_un; 5256 ASSERT(un != NULL); 5257 ASSERT(mutex_owned(SD_MUTEX(un))); 5258 ASSERT((SD_DEVINFO(un)) == devi); 5259 5260 5261 /* 5262 * We check the availability of the World Wide Name (0x83) and Unit 5263 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using 5264 * un_vpd_page_mask from them, we decide which way to get the WWN. If 5265 * 0x83 is available, that is the best choice. Our next choice is 5266 * 0x80. If neither are available, we munge the devid from the device 5267 * vid/pid/serial # for Sun qualified disks, or use the ddi framework 5268 * to fabricate a devid for non-Sun qualified disks. 5269 */ 5270 if (sd_check_vpd_page_support(ssc) == 0) { 5271 /* collect page 80 data if available */ 5272 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) { 5273 5274 mutex_exit(SD_MUTEX(un)); 5275 inq80 = kmem_zalloc(inq80_len, KM_SLEEP); 5276 5277 rval = sd_send_scsi_INQUIRY(ssc, inq80, inq80_len, 5278 0x01, 0x80, &inq80_resid); 5279 5280 if (rval != 0) { 5281 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5282 kmem_free(inq80, inq80_len); 5283 inq80 = NULL; 5284 inq80_len = 0; 5285 } else if (ddi_prop_exists( 5286 DDI_DEV_T_NONE, SD_DEVINFO(un), 5287 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 5288 INQUIRY_SERIAL_NO) == 0) { 5289 /* 5290 * If we don't already have a serial number 5291 * property, do quick verify of data returned 5292 * and define property. 5293 */ 5294 dlen = inq80_len - inq80_resid; 5295 len = (size_t)inq80[3]; 5296 if ((dlen >= 4) && ((len + 4) <= dlen)) { 5297 /* 5298 * Ensure sn termination, skip leading 5299 * blanks, and create property 5300 * 'inquiry-serial-no'. 5301 */ 5302 sn = (char *)&inq80[4]; 5303 sn[len] = 0; 5304 while (*sn && (*sn == ' ')) 5305 sn++; 5306 if (*sn) { 5307 (void) ddi_prop_update_string( 5308 DDI_DEV_T_NONE, 5309 SD_DEVINFO(un), 5310 INQUIRY_SERIAL_NO, sn); 5311 } 5312 } 5313 } 5314 mutex_enter(SD_MUTEX(un)); 5315 } 5316 5317 /* collect page 83 data if available */ 5318 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) { 5319 mutex_exit(SD_MUTEX(un)); 5320 inq83 = kmem_zalloc(inq83_len, KM_SLEEP); 5321 5322 rval = sd_send_scsi_INQUIRY(ssc, inq83, inq83_len, 5323 0x01, 0x83, &inq83_resid); 5324 5325 if (rval != 0) { 5326 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5327 kmem_free(inq83, inq83_len); 5328 inq83 = NULL; 5329 inq83_len = 0; 5330 } 5331 mutex_enter(SD_MUTEX(un)); 5332 } 5333 } 5334 5335 /* 5336 * If transport has already registered a devid for this target 5337 * then that takes precedence over the driver's determination 5338 * of the devid. 5339 * 5340 * NOTE: The reason this check is done here instead of at the beginning 5341 * of the function is to allow the code above to create the 5342 * 'inquiry-serial-no' property. 5343 */ 5344 if (ddi_devid_get(SD_DEVINFO(un), &un->un_devid) == DDI_SUCCESS) { 5345 ASSERT(un->un_devid); 5346 un->un_f_devid_transport_defined = TRUE; 5347 goto cleanup; /* use devid registered by the transport */ 5348 } 5349 5350 /* 5351 * This is the case of antiquated Sun disk drives that have the 5352 * FAB_DEVID property set in the disk_table. These drives 5353 * manage the devid's by storing them in last 2 available sectors 5354 * on the drive and have them fabricated by the ddi layer by calling 5355 * ddi_devid_init and passing the DEVID_FAB flag. 5356 */ 5357 if (un->un_f_opt_fab_devid == TRUE) { 5358 /* 5359 * Depending on EINVAL isn't reliable, since a reserved disk 5360 * may result in invalid geometry, so check to make sure a 5361 * reservation conflict did not occur during attach. 5362 */ 5363 if ((sd_get_devid(ssc) == EINVAL) && 5364 (reservation_flag != SD_TARGET_IS_RESERVED)) { 5365 /* 5366 * The devid is invalid AND there is no reservation 5367 * conflict. Fabricate a new devid. 5368 */ 5369 (void) sd_create_devid(ssc); 5370 } 5371 5372 /* Register the devid if it exists */ 5373 if (un->un_devid != NULL) { 5374 (void) ddi_devid_register(SD_DEVINFO(un), 5375 un->un_devid); 5376 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5377 "sd_register_devid: Devid Fabricated\n"); 5378 } 5379 goto cleanup; 5380 } 5381 5382 /* encode best devid possible based on data available */ 5383 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, 5384 (char *)ddi_driver_name(SD_DEVINFO(un)), 5385 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)), 5386 inq80, inq80_len - inq80_resid, inq83, inq83_len - 5387 inq83_resid, &un->un_devid) == DDI_SUCCESS) { 5388 5389 /* devid successfully encoded, register devid */ 5390 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); 5391 5392 } else { 5393 /* 5394 * Unable to encode a devid based on data available. 5395 * This is not a Sun qualified disk. Older Sun disk 5396 * drives that have the SD_FAB_DEVID property 5397 * set in the disk_table and non Sun qualified 5398 * disks are treated in the same manner. These 5399 * drives manage the devid's by storing them in 5400 * last 2 available sectors on the drive and 5401 * have them fabricated by the ddi layer by 5402 * calling ddi_devid_init and passing the 5403 * DEVID_FAB flag. 5404 * Create a fabricate devid only if there's no 5405 * fabricate devid existed. 5406 */ 5407 if (sd_get_devid(ssc) == EINVAL) { 5408 (void) sd_create_devid(ssc); 5409 } 5410 un->un_f_opt_fab_devid = TRUE; 5411 5412 /* Register the devid if it exists */ 5413 if (un->un_devid != NULL) { 5414 (void) ddi_devid_register(SD_DEVINFO(un), 5415 un->un_devid); 5416 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5417 "sd_register_devid: devid fabricated using " 5418 "ddi framework\n"); 5419 } 5420 } 5421 5422 cleanup: 5423 /* clean up resources */ 5424 if (inq80 != NULL) { 5425 kmem_free(inq80, inq80_len); 5426 } 5427 if (inq83 != NULL) { 5428 kmem_free(inq83, inq83_len); 5429 } 5430 } 5431 5432 5433 5434 /* 5435 * Function: sd_get_devid 5436 * 5437 * Description: This routine will return 0 if a valid device id has been 5438 * obtained from the target and stored in the soft state. If a 5439 * valid device id has not been previously read and stored, a 5440 * read attempt will be made. 5441 * 5442 * Arguments: un - driver soft state (unit) structure 5443 * 5444 * Return Code: 0 if we successfully get the device id 5445 * 5446 * Context: Kernel Thread 5447 */ 5448 5449 static int 5450 sd_get_devid(sd_ssc_t *ssc) 5451 { 5452 struct dk_devid *dkdevid; 5453 ddi_devid_t tmpid; 5454 uint_t *ip; 5455 size_t sz; 5456 diskaddr_t blk; 5457 int status; 5458 int chksum; 5459 int i; 5460 size_t buffer_size; 5461 struct sd_lun *un; 5462 5463 ASSERT(ssc != NULL); 5464 un = ssc->ssc_un; 5465 ASSERT(un != NULL); 5466 ASSERT(mutex_owned(SD_MUTEX(un))); 5467 5468 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n", 5469 un); 5470 5471 if (un->un_devid != NULL) { 5472 return (0); 5473 } 5474 5475 mutex_exit(SD_MUTEX(un)); 5476 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5477 (void *)SD_PATH_DIRECT) != 0) { 5478 mutex_enter(SD_MUTEX(un)); 5479 return (EINVAL); 5480 } 5481 5482 /* 5483 * Read and verify device id, stored in the reserved cylinders at the 5484 * end of the disk. Backup label is on the odd sectors of the last 5485 * track of the last cylinder. Device id will be on track of the next 5486 * to last cylinder. 5487 */ 5488 mutex_enter(SD_MUTEX(un)); 5489 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid)); 5490 mutex_exit(SD_MUTEX(un)); 5491 dkdevid = kmem_alloc(buffer_size, KM_SLEEP); 5492 status = sd_send_scsi_READ(ssc, dkdevid, buffer_size, blk, 5493 SD_PATH_DIRECT); 5494 5495 if (status != 0) { 5496 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5497 goto error; 5498 } 5499 5500 /* Validate the revision */ 5501 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 5502 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 5503 status = EINVAL; 5504 goto error; 5505 } 5506 5507 /* Calculate the checksum */ 5508 chksum = 0; 5509 ip = (uint_t *)dkdevid; 5510 for (i = 0; i < ((DEV_BSIZE - sizeof (int)) / sizeof (int)); 5511 i++) { 5512 chksum ^= ip[i]; 5513 } 5514 5515 /* Compare the checksums */ 5516 if (DKD_GETCHKSUM(dkdevid) != chksum) { 5517 status = EINVAL; 5518 goto error; 5519 } 5520 5521 /* Validate the device id */ 5522 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 5523 status = EINVAL; 5524 goto error; 5525 } 5526 5527 /* 5528 * Store the device id in the driver soft state 5529 */ 5530 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 5531 tmpid = kmem_alloc(sz, KM_SLEEP); 5532 5533 mutex_enter(SD_MUTEX(un)); 5534 5535 un->un_devid = tmpid; 5536 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 5537 5538 kmem_free(dkdevid, buffer_size); 5539 5540 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un); 5541 5542 return (status); 5543 error: 5544 mutex_enter(SD_MUTEX(un)); 5545 kmem_free(dkdevid, buffer_size); 5546 return (status); 5547 } 5548 5549 5550 /* 5551 * Function: sd_create_devid 5552 * 5553 * Description: This routine will fabricate the device id and write it 5554 * to the disk. 5555 * 5556 * Arguments: un - driver soft state (unit) structure 5557 * 5558 * Return Code: value of the fabricated device id 5559 * 5560 * Context: Kernel Thread 5561 */ 5562 5563 static ddi_devid_t 5564 sd_create_devid(sd_ssc_t *ssc) 5565 { 5566 struct sd_lun *un; 5567 5568 ASSERT(ssc != NULL); 5569 un = ssc->ssc_un; 5570 ASSERT(un != NULL); 5571 5572 /* Fabricate the devid */ 5573 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid) 5574 == DDI_FAILURE) { 5575 return (NULL); 5576 } 5577 5578 /* Write the devid to disk */ 5579 if (sd_write_deviceid(ssc) != 0) { 5580 ddi_devid_free(un->un_devid); 5581 un->un_devid = NULL; 5582 } 5583 5584 return (un->un_devid); 5585 } 5586 5587 5588 /* 5589 * Function: sd_write_deviceid 5590 * 5591 * Description: This routine will write the device id to the disk 5592 * reserved sector. 5593 * 5594 * Arguments: un - driver soft state (unit) structure 5595 * 5596 * Return Code: EINVAL 5597 * value returned by sd_send_scsi_cmd 5598 * 5599 * Context: Kernel Thread 5600 */ 5601 5602 static int 5603 sd_write_deviceid(sd_ssc_t *ssc) 5604 { 5605 struct dk_devid *dkdevid; 5606 uchar_t *buf; 5607 diskaddr_t blk; 5608 uint_t *ip, chksum; 5609 int status; 5610 int i; 5611 struct sd_lun *un; 5612 5613 ASSERT(ssc != NULL); 5614 un = ssc->ssc_un; 5615 ASSERT(un != NULL); 5616 ASSERT(mutex_owned(SD_MUTEX(un))); 5617 5618 mutex_exit(SD_MUTEX(un)); 5619 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5620 (void *)SD_PATH_DIRECT) != 0) { 5621 mutex_enter(SD_MUTEX(un)); 5622 return (-1); 5623 } 5624 5625 5626 /* Allocate the buffer */ 5627 buf = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP); 5628 dkdevid = (struct dk_devid *)buf; 5629 5630 /* Fill in the revision */ 5631 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 5632 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 5633 5634 /* Copy in the device id */ 5635 mutex_enter(SD_MUTEX(un)); 5636 bcopy(un->un_devid, &dkdevid->dkd_devid, 5637 ddi_devid_sizeof(un->un_devid)); 5638 mutex_exit(SD_MUTEX(un)); 5639 5640 /* Calculate the checksum */ 5641 chksum = 0; 5642 ip = (uint_t *)dkdevid; 5643 for (i = 0; i < ((DEV_BSIZE - sizeof (int)) / sizeof (int)); 5644 i++) { 5645 chksum ^= ip[i]; 5646 } 5647 5648 /* Fill-in checksum */ 5649 DKD_FORMCHKSUM(chksum, dkdevid); 5650 5651 /* Write the reserved sector */ 5652 status = sd_send_scsi_WRITE(ssc, buf, un->un_sys_blocksize, blk, 5653 SD_PATH_DIRECT); 5654 if (status != 0) 5655 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5656 5657 kmem_free(buf, un->un_sys_blocksize); 5658 5659 mutex_enter(SD_MUTEX(un)); 5660 return (status); 5661 } 5662 5663 5664 /* 5665 * Function: sd_check_vpd_page_support 5666 * 5667 * Description: This routine sends an inquiry command with the EVPD bit set and 5668 * a page code of 0x00 to the device. It is used to determine which 5669 * vital product pages are available to find the devid. We are 5670 * looking for pages 0x83 or 0x80. If we return a negative 1, the 5671 * device does not support that command. 5672 * 5673 * Arguments: un - driver soft state (unit) structure 5674 * 5675 * Return Code: 0 - success 5676 * 1 - check condition 5677 * 5678 * Context: This routine can sleep. 5679 */ 5680 5681 static int 5682 sd_check_vpd_page_support(sd_ssc_t *ssc) 5683 { 5684 uchar_t *page_list = NULL; 5685 uchar_t page_length = 0xff; /* Use max possible length */ 5686 uchar_t evpd = 0x01; /* Set the EVPD bit */ 5687 uchar_t page_code = 0x00; /* Supported VPD Pages */ 5688 int rval = 0; 5689 int counter; 5690 struct sd_lun *un; 5691 5692 ASSERT(ssc != NULL); 5693 un = ssc->ssc_un; 5694 ASSERT(un != NULL); 5695 ASSERT(mutex_owned(SD_MUTEX(un))); 5696 5697 mutex_exit(SD_MUTEX(un)); 5698 5699 /* 5700 * We'll set the page length to the maximum to save figuring it out 5701 * with an additional call. 5702 */ 5703 page_list = kmem_zalloc(page_length, KM_SLEEP); 5704 5705 rval = sd_send_scsi_INQUIRY(ssc, page_list, page_length, evpd, 5706 page_code, NULL); 5707 5708 if (rval != 0) 5709 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5710 5711 mutex_enter(SD_MUTEX(un)); 5712 5713 /* 5714 * Now we must validate that the device accepted the command, as some 5715 * drives do not support it. If the drive does support it, we will 5716 * return 0, and the supported pages will be in un_vpd_page_mask. If 5717 * not, we return -1. 5718 */ 5719 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) { 5720 /* Loop to find one of the 2 pages we need */ 5721 counter = 4; /* Supported pages start at byte 4, with 0x00 */ 5722 5723 /* 5724 * Pages are returned in ascending order, and 0x83 is what we 5725 * are hoping for. 5726 */ 5727 while ((page_list[counter] <= 0x86) && 5728 (counter <= (page_list[VPD_PAGE_LENGTH] + 5729 VPD_HEAD_OFFSET))) { 5730 /* 5731 * Add 3 because page_list[3] is the number of 5732 * pages minus 3 5733 */ 5734 5735 switch (page_list[counter]) { 5736 case 0x00: 5737 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG; 5738 break; 5739 case 0x80: 5740 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG; 5741 break; 5742 case 0x81: 5743 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG; 5744 break; 5745 case 0x82: 5746 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG; 5747 break; 5748 case 0x83: 5749 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG; 5750 break; 5751 case 0x86: 5752 un->un_vpd_page_mask |= SD_VPD_EXTENDED_DATA_PG; 5753 break; 5754 } 5755 counter++; 5756 } 5757 5758 } else { 5759 rval = -1; 5760 5761 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5762 "sd_check_vpd_page_support: This drive does not implement " 5763 "VPD pages.\n"); 5764 } 5765 5766 kmem_free(page_list, page_length); 5767 5768 return (rval); 5769 } 5770 5771 5772 /* 5773 * Function: sd_setup_pm 5774 * 5775 * Description: Initialize Power Management on the device 5776 * 5777 * Context: Kernel Thread 5778 */ 5779 5780 static void 5781 sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi) 5782 { 5783 uint_t log_page_size; 5784 uchar_t *log_page_data; 5785 int rval = 0; 5786 struct sd_lun *un; 5787 5788 ASSERT(ssc != NULL); 5789 un = ssc->ssc_un; 5790 ASSERT(un != NULL); 5791 5792 /* 5793 * Since we are called from attach, holding a mutex for 5794 * un is unnecessary. Because some of the routines called 5795 * from here require SD_MUTEX to not be held, assert this 5796 * right up front. 5797 */ 5798 ASSERT(!mutex_owned(SD_MUTEX(un))); 5799 /* 5800 * Since the sd device does not have the 'reg' property, 5801 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 5802 * The following code is to tell cpr that this device 5803 * DOES need to be suspended and resumed. 5804 */ 5805 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 5806 "pm-hardware-state", "needs-suspend-resume"); 5807 5808 /* 5809 * This complies with the new power management framework 5810 * for certain desktop machines. Create the pm_components 5811 * property as a string array property. 5812 * If un_f_pm_supported is TRUE, that means the disk 5813 * attached HBA has set the "pm-capable" property and 5814 * the value of this property is bigger than 0. 5815 */ 5816 if (un->un_f_pm_supported) { 5817 /* 5818 * not all devices have a motor, try it first. 5819 * some devices may return ILLEGAL REQUEST, some 5820 * will hang 5821 * The following START_STOP_UNIT is used to check if target 5822 * device has a motor. 5823 */ 5824 un->un_f_start_stop_supported = TRUE; 5825 5826 if (un->un_f_power_condition_supported) { 5827 rval = sd_send_scsi_START_STOP_UNIT(ssc, 5828 SD_POWER_CONDITION, SD_TARGET_ACTIVE, 5829 SD_PATH_DIRECT); 5830 if (rval != 0) { 5831 un->un_f_power_condition_supported = FALSE; 5832 } 5833 } 5834 if (!un->un_f_power_condition_supported) { 5835 rval = sd_send_scsi_START_STOP_UNIT(ssc, 5836 SD_START_STOP, SD_TARGET_START, SD_PATH_DIRECT); 5837 } 5838 if (rval != 0) { 5839 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5840 un->un_f_start_stop_supported = FALSE; 5841 } 5842 5843 /* 5844 * create pm properties anyways otherwise the parent can't 5845 * go to sleep 5846 */ 5847 un->un_f_pm_is_enabled = TRUE; 5848 (void) sd_create_pm_components(devi, un); 5849 5850 /* 5851 * If it claims that log sense is supported, check it out. 5852 */ 5853 if (un->un_f_log_sense_supported) { 5854 rval = sd_log_page_supported(ssc, 5855 START_STOP_CYCLE_PAGE); 5856 if (rval == 1) { 5857 /* Page found, use it. */ 5858 un->un_start_stop_cycle_page = 5859 START_STOP_CYCLE_PAGE; 5860 } else { 5861 /* 5862 * Page not found or log sense is not 5863 * supported. 5864 * Notice we do not check the old style 5865 * START_STOP_CYCLE_VU_PAGE because this 5866 * code path does not apply to old disks. 5867 */ 5868 un->un_f_log_sense_supported = FALSE; 5869 un->un_f_pm_log_sense_smart = FALSE; 5870 } 5871 } 5872 5873 return; 5874 } 5875 5876 /* 5877 * For the disk whose attached HBA has not set the "pm-capable" 5878 * property, check if it supports the power management. 5879 */ 5880 if (!un->un_f_log_sense_supported) { 5881 un->un_power_level = SD_SPINDLE_ON; 5882 un->un_f_pm_is_enabled = FALSE; 5883 return; 5884 } 5885 5886 rval = sd_log_page_supported(ssc, START_STOP_CYCLE_PAGE); 5887 5888 #ifdef SDDEBUG 5889 if (sd_force_pm_supported) { 5890 /* Force a successful result */ 5891 rval = 1; 5892 } 5893 #endif 5894 5895 /* 5896 * If the start-stop cycle counter log page is not supported 5897 * or if the pm-capable property is set to be false (0), 5898 * then we should not create the pm_components property. 5899 */ 5900 if (rval == -1) { 5901 /* 5902 * Error. 5903 * Reading log sense failed, most likely this is 5904 * an older drive that does not support log sense. 5905 * If this fails auto-pm is not supported. 5906 */ 5907 un->un_power_level = SD_SPINDLE_ON; 5908 un->un_f_pm_is_enabled = FALSE; 5909 5910 } else if (rval == 0) { 5911 /* 5912 * Page not found. 5913 * The start stop cycle counter is implemented as page 5914 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For 5915 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE). 5916 */ 5917 if (sd_log_page_supported(ssc, START_STOP_CYCLE_VU_PAGE) == 1) { 5918 /* 5919 * Page found, use this one. 5920 */ 5921 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE; 5922 un->un_f_pm_is_enabled = TRUE; 5923 } else { 5924 /* 5925 * Error or page not found. 5926 * auto-pm is not supported for this device. 5927 */ 5928 un->un_power_level = SD_SPINDLE_ON; 5929 un->un_f_pm_is_enabled = FALSE; 5930 } 5931 } else { 5932 /* 5933 * Page found, use it. 5934 */ 5935 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE; 5936 un->un_f_pm_is_enabled = TRUE; 5937 } 5938 5939 5940 if (un->un_f_pm_is_enabled == TRUE) { 5941 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5942 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5943 5944 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 5945 log_page_size, un->un_start_stop_cycle_page, 5946 0x01, 0, SD_PATH_DIRECT); 5947 5948 if (rval != 0) { 5949 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5950 } 5951 5952 #ifdef SDDEBUG 5953 if (sd_force_pm_supported) { 5954 /* Force a successful result */ 5955 rval = 0; 5956 } 5957 #endif 5958 5959 /* 5960 * If the Log sense for Page( Start/stop cycle counter page) 5961 * succeeds, then power management is supported and we can 5962 * enable auto-pm. 5963 */ 5964 if (rval == 0) { 5965 (void) sd_create_pm_components(devi, un); 5966 } else { 5967 un->un_power_level = SD_SPINDLE_ON; 5968 un->un_f_pm_is_enabled = FALSE; 5969 } 5970 5971 kmem_free(log_page_data, log_page_size); 5972 } 5973 } 5974 5975 5976 /* 5977 * Function: sd_create_pm_components 5978 * 5979 * Description: Initialize PM property. 5980 * 5981 * Context: Kernel thread context 5982 */ 5983 5984 static void 5985 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un) 5986 { 5987 ASSERT(!mutex_owned(SD_MUTEX(un))); 5988 5989 if (un->un_f_power_condition_supported) { 5990 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 5991 "pm-components", sd_pwr_pc.pm_comp, 5) 5992 != DDI_PROP_SUCCESS) { 5993 un->un_power_level = SD_SPINDLE_ACTIVE; 5994 un->un_f_pm_is_enabled = FALSE; 5995 return; 5996 } 5997 } else { 5998 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 5999 "pm-components", sd_pwr_ss.pm_comp, 3) 6000 != DDI_PROP_SUCCESS) { 6001 un->un_power_level = SD_SPINDLE_ON; 6002 un->un_f_pm_is_enabled = FALSE; 6003 return; 6004 } 6005 } 6006 /* 6007 * When components are initially created they are idle, 6008 * power up any non-removables. 6009 * Note: the return value of pm_raise_power can't be used 6010 * for determining if PM should be enabled for this device. 6011 * Even if you check the return values and remove this 6012 * property created above, the PM framework will not honor the 6013 * change after the first call to pm_raise_power. Hence, 6014 * removal of that property does not help if pm_raise_power 6015 * fails. In the case of removable media, the start/stop 6016 * will fail if the media is not present. 6017 */ 6018 if (un->un_f_attach_spinup && (pm_raise_power(SD_DEVINFO(un), 0, 6019 SD_PM_STATE_ACTIVE(un)) == DDI_SUCCESS)) { 6020 mutex_enter(SD_MUTEX(un)); 6021 un->un_power_level = SD_PM_STATE_ACTIVE(un); 6022 mutex_enter(&un->un_pm_mutex); 6023 /* Set to on and not busy. */ 6024 un->un_pm_count = 0; 6025 } else { 6026 mutex_enter(SD_MUTEX(un)); 6027 un->un_power_level = SD_PM_STATE_STOPPED(un); 6028 mutex_enter(&un->un_pm_mutex); 6029 /* Set to off. */ 6030 un->un_pm_count = -1; 6031 } 6032 mutex_exit(&un->un_pm_mutex); 6033 mutex_exit(SD_MUTEX(un)); 6034 } 6035 6036 6037 /* 6038 * Function: sd_ddi_suspend 6039 * 6040 * Description: Performs system power-down operations. This includes 6041 * setting the drive state to indicate its suspended so 6042 * that no new commands will be accepted. Also, wait for 6043 * all commands that are in transport or queued to a timer 6044 * for retry to complete. All timeout threads are cancelled. 6045 * 6046 * Return Code: DDI_FAILURE or DDI_SUCCESS 6047 * 6048 * Context: Kernel thread context 6049 */ 6050 6051 static int 6052 sd_ddi_suspend(dev_info_t *devi) 6053 { 6054 struct sd_lun *un; 6055 clock_t wait_cmds_complete; 6056 6057 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 6058 if (un == NULL) { 6059 return (DDI_FAILURE); 6060 } 6061 6062 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n"); 6063 6064 mutex_enter(SD_MUTEX(un)); 6065 6066 /* Return success if the device is already suspended. */ 6067 if (un->un_state == SD_STATE_SUSPENDED) { 6068 mutex_exit(SD_MUTEX(un)); 6069 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 6070 "device already suspended, exiting\n"); 6071 return (DDI_SUCCESS); 6072 } 6073 6074 /* Return failure if the device is being used by HA */ 6075 if (un->un_resvd_status & 6076 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) { 6077 mutex_exit(SD_MUTEX(un)); 6078 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 6079 "device in use by HA, exiting\n"); 6080 return (DDI_FAILURE); 6081 } 6082 6083 /* 6084 * Return failure if the device is in a resource wait 6085 * or power changing state. 6086 */ 6087 if ((un->un_state == SD_STATE_RWAIT) || 6088 (un->un_state == SD_STATE_PM_CHANGING)) { 6089 mutex_exit(SD_MUTEX(un)); 6090 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 6091 "device in resource wait state, exiting\n"); 6092 return (DDI_FAILURE); 6093 } 6094 6095 6096 un->un_save_state = un->un_last_state; 6097 New_state(un, SD_STATE_SUSPENDED); 6098 6099 /* 6100 * Wait for all commands that are in transport or queued to a timer 6101 * for retry to complete. 6102 * 6103 * While waiting, no new commands will be accepted or sent because of 6104 * the new state we set above. 6105 * 6106 * Wait till current operation has completed. If we are in the resource 6107 * wait state (with an intr outstanding) then we need to wait till the 6108 * intr completes and starts the next cmd. We want to wait for 6109 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND. 6110 */ 6111 wait_cmds_complete = ddi_get_lbolt() + 6112 (sd_wait_cmds_complete * drv_usectohz(1000000)); 6113 6114 while (un->un_ncmds_in_transport != 0) { 6115 /* 6116 * Fail if commands do not finish in the specified time. 6117 */ 6118 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un), 6119 wait_cmds_complete) == -1) { 6120 /* 6121 * Undo the state changes made above. Everything 6122 * must go back to it's original value. 6123 */ 6124 Restore_state(un); 6125 un->un_last_state = un->un_save_state; 6126 /* Wake up any threads that might be waiting. */ 6127 cv_broadcast(&un->un_suspend_cv); 6128 mutex_exit(SD_MUTEX(un)); 6129 SD_ERROR(SD_LOG_IO_PM, un, 6130 "sd_ddi_suspend: failed due to outstanding cmds\n"); 6131 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n"); 6132 return (DDI_FAILURE); 6133 } 6134 } 6135 6136 /* 6137 * Cancel SCSI watch thread and timeouts, if any are active 6138 */ 6139 6140 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) { 6141 opaque_t temp_token = un->un_swr_token; 6142 mutex_exit(SD_MUTEX(un)); 6143 scsi_watch_suspend(temp_token); 6144 mutex_enter(SD_MUTEX(un)); 6145 } 6146 6147 if (un->un_reset_throttle_timeid != NULL) { 6148 timeout_id_t temp_id = un->un_reset_throttle_timeid; 6149 un->un_reset_throttle_timeid = NULL; 6150 mutex_exit(SD_MUTEX(un)); 6151 (void) untimeout(temp_id); 6152 mutex_enter(SD_MUTEX(un)); 6153 } 6154 6155 if (un->un_dcvb_timeid != NULL) { 6156 timeout_id_t temp_id = un->un_dcvb_timeid; 6157 un->un_dcvb_timeid = NULL; 6158 mutex_exit(SD_MUTEX(un)); 6159 (void) untimeout(temp_id); 6160 mutex_enter(SD_MUTEX(un)); 6161 } 6162 6163 mutex_enter(&un->un_pm_mutex); 6164 if (un->un_pm_timeid != NULL) { 6165 timeout_id_t temp_id = un->un_pm_timeid; 6166 un->un_pm_timeid = NULL; 6167 mutex_exit(&un->un_pm_mutex); 6168 mutex_exit(SD_MUTEX(un)); 6169 (void) untimeout(temp_id); 6170 mutex_enter(SD_MUTEX(un)); 6171 } else { 6172 mutex_exit(&un->un_pm_mutex); 6173 } 6174 6175 if (un->un_rmw_msg_timeid != NULL) { 6176 timeout_id_t temp_id = un->un_rmw_msg_timeid; 6177 un->un_rmw_msg_timeid = NULL; 6178 mutex_exit(SD_MUTEX(un)); 6179 (void) untimeout(temp_id); 6180 mutex_enter(SD_MUTEX(un)); 6181 } 6182 6183 if (un->un_retry_timeid != NULL) { 6184 timeout_id_t temp_id = un->un_retry_timeid; 6185 un->un_retry_timeid = NULL; 6186 mutex_exit(SD_MUTEX(un)); 6187 (void) untimeout(temp_id); 6188 mutex_enter(SD_MUTEX(un)); 6189 6190 if (un->un_retry_bp != NULL) { 6191 un->un_retry_bp->av_forw = un->un_waitq_headp; 6192 un->un_waitq_headp = un->un_retry_bp; 6193 if (un->un_waitq_tailp == NULL) { 6194 un->un_waitq_tailp = un->un_retry_bp; 6195 } 6196 un->un_retry_bp = NULL; 6197 un->un_retry_statp = NULL; 6198 } 6199 } 6200 6201 if (un->un_direct_priority_timeid != NULL) { 6202 timeout_id_t temp_id = un->un_direct_priority_timeid; 6203 un->un_direct_priority_timeid = NULL; 6204 mutex_exit(SD_MUTEX(un)); 6205 (void) untimeout(temp_id); 6206 mutex_enter(SD_MUTEX(un)); 6207 } 6208 6209 if (un->un_f_is_fibre == TRUE) { 6210 /* 6211 * Remove callbacks for insert and remove events 6212 */ 6213 if (un->un_insert_event != NULL) { 6214 mutex_exit(SD_MUTEX(un)); 6215 (void) ddi_remove_event_handler(un->un_insert_cb_id); 6216 mutex_enter(SD_MUTEX(un)); 6217 un->un_insert_event = NULL; 6218 } 6219 6220 if (un->un_remove_event != NULL) { 6221 mutex_exit(SD_MUTEX(un)); 6222 (void) ddi_remove_event_handler(un->un_remove_cb_id); 6223 mutex_enter(SD_MUTEX(un)); 6224 un->un_remove_event = NULL; 6225 } 6226 } 6227 6228 mutex_exit(SD_MUTEX(un)); 6229 6230 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n"); 6231 6232 return (DDI_SUCCESS); 6233 } 6234 6235 6236 /* 6237 * Function: sd_ddi_resume 6238 * 6239 * Description: Performs system power-up operations.. 6240 * 6241 * Return Code: DDI_SUCCESS 6242 * DDI_FAILURE 6243 * 6244 * Context: Kernel thread context 6245 */ 6246 6247 static int 6248 sd_ddi_resume(dev_info_t *devi) 6249 { 6250 struct sd_lun *un; 6251 6252 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 6253 if (un == NULL) { 6254 return (DDI_FAILURE); 6255 } 6256 6257 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n"); 6258 6259 mutex_enter(SD_MUTEX(un)); 6260 Restore_state(un); 6261 6262 /* 6263 * Restore the state which was saved to give the 6264 * the right state in un_last_state 6265 */ 6266 un->un_last_state = un->un_save_state; 6267 /* 6268 * Note: throttle comes back at full. 6269 * Also note: this MUST be done before calling pm_raise_power 6270 * otherwise the system can get hung in biowait. The scenario where 6271 * this'll happen is under cpr suspend. Writing of the system 6272 * state goes through sddump, which writes 0 to un_throttle. If 6273 * writing the system state then fails, example if the partition is 6274 * too small, then cpr attempts a resume. If throttle isn't restored 6275 * from the saved value until after calling pm_raise_power then 6276 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs 6277 * in biowait. 6278 */ 6279 un->un_throttle = un->un_saved_throttle; 6280 6281 /* 6282 * The chance of failure is very rare as the only command done in power 6283 * entry point is START command when you transition from 0->1 or 6284 * unknown->1. Put it to SPINDLE ON state irrespective of the state at 6285 * which suspend was done. Ignore the return value as the resume should 6286 * not be failed. In the case of removable media the media need not be 6287 * inserted and hence there is a chance that raise power will fail with 6288 * media not present. 6289 */ 6290 if (un->un_f_attach_spinup) { 6291 mutex_exit(SD_MUTEX(un)); 6292 (void) pm_raise_power(SD_DEVINFO(un), 0, 6293 SD_PM_STATE_ACTIVE(un)); 6294 mutex_enter(SD_MUTEX(un)); 6295 } 6296 6297 /* 6298 * Don't broadcast to the suspend cv and therefore possibly 6299 * start I/O until after power has been restored. 6300 */ 6301 cv_broadcast(&un->un_suspend_cv); 6302 cv_broadcast(&un->un_state_cv); 6303 6304 /* restart thread */ 6305 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) { 6306 scsi_watch_resume(un->un_swr_token); 6307 } 6308 6309 #if (defined(__fibre)) 6310 if (un->un_f_is_fibre == TRUE) { 6311 /* 6312 * Add callbacks for insert and remove events 6313 */ 6314 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 6315 sd_init_event_callbacks(un); 6316 } 6317 } 6318 #endif 6319 6320 /* 6321 * Transport any pending commands to the target. 6322 * 6323 * If this is a low-activity device commands in queue will have to wait 6324 * until new commands come in, which may take awhile. Also, we 6325 * specifically don't check un_ncmds_in_transport because we know that 6326 * there really are no commands in progress after the unit was 6327 * suspended and we could have reached the throttle level, been 6328 * suspended, and have no new commands coming in for awhile. Highly 6329 * unlikely, but so is the low-activity disk scenario. 6330 */ 6331 ddi_xbuf_dispatch(un->un_xbuf_attr); 6332 6333 sd_start_cmds(un, NULL); 6334 mutex_exit(SD_MUTEX(un)); 6335 6336 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n"); 6337 6338 return (DDI_SUCCESS); 6339 } 6340 6341 6342 /* 6343 * Function: sd_pm_state_change 6344 * 6345 * Description: Change the driver power state. 6346 * Someone else is required to actually change the driver 6347 * power level. 6348 * 6349 * Arguments: un - driver soft state (unit) structure 6350 * level - the power level that is changed to 6351 * flag - to decide how to change the power state 6352 * 6353 * Return Code: DDI_SUCCESS 6354 * 6355 * Context: Kernel thread context 6356 */ 6357 static int 6358 sd_pm_state_change(struct sd_lun *un, int level, int flag) 6359 { 6360 ASSERT(un != NULL); 6361 SD_TRACE(SD_LOG_POWER, un, "sd_pm_state_change: entry\n"); 6362 6363 ASSERT(!mutex_owned(SD_MUTEX(un))); 6364 mutex_enter(SD_MUTEX(un)); 6365 6366 if (flag == SD_PM_STATE_ROLLBACK || SD_PM_IS_IO_CAPABLE(un, level)) { 6367 un->un_power_level = level; 6368 ASSERT(!mutex_owned(&un->un_pm_mutex)); 6369 mutex_enter(&un->un_pm_mutex); 6370 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 6371 un->un_pm_count++; 6372 ASSERT(un->un_pm_count == 0); 6373 } 6374 mutex_exit(&un->un_pm_mutex); 6375 } else { 6376 /* 6377 * Exit if power management is not enabled for this device, 6378 * or if the device is being used by HA. 6379 */ 6380 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status & 6381 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) { 6382 mutex_exit(SD_MUTEX(un)); 6383 SD_TRACE(SD_LOG_POWER, un, 6384 "sd_pm_state_change: exiting\n"); 6385 return (DDI_FAILURE); 6386 } 6387 6388 SD_INFO(SD_LOG_POWER, un, "sd_pm_state_change: " 6389 "un_ncmds_in_driver=%ld\n", un->un_ncmds_in_driver); 6390 6391 /* 6392 * See if the device is not busy, ie.: 6393 * - we have no commands in the driver for this device 6394 * - not waiting for resources 6395 */ 6396 if ((un->un_ncmds_in_driver == 0) && 6397 (un->un_state != SD_STATE_RWAIT)) { 6398 /* 6399 * The device is not busy, so it is OK to go to low 6400 * power state. Indicate low power, but rely on someone 6401 * else to actually change it. 6402 */ 6403 mutex_enter(&un->un_pm_mutex); 6404 un->un_pm_count = -1; 6405 mutex_exit(&un->un_pm_mutex); 6406 un->un_power_level = level; 6407 } 6408 } 6409 6410 mutex_exit(SD_MUTEX(un)); 6411 6412 SD_TRACE(SD_LOG_POWER, un, "sd_pm_state_change: exit\n"); 6413 6414 return (DDI_SUCCESS); 6415 } 6416 6417 6418 /* 6419 * Function: sd_pm_idletimeout_handler 6420 * 6421 * Description: A timer routine that's active only while a device is busy. 6422 * The purpose is to extend slightly the pm framework's busy 6423 * view of the device to prevent busy/idle thrashing for 6424 * back-to-back commands. Do this by comparing the current time 6425 * to the time at which the last command completed and when the 6426 * difference is greater than sd_pm_idletime, call 6427 * pm_idle_component. In addition to indicating idle to the pm 6428 * framework, update the chain type to again use the internal pm 6429 * layers of the driver. 6430 * 6431 * Arguments: arg - driver soft state (unit) structure 6432 * 6433 * Context: Executes in a timeout(9F) thread context 6434 */ 6435 6436 static void 6437 sd_pm_idletimeout_handler(void *arg) 6438 { 6439 struct sd_lun *un = arg; 6440 6441 time_t now; 6442 6443 mutex_enter(&sd_detach_mutex); 6444 if (un->un_detach_count != 0) { 6445 /* Abort if the instance is detaching */ 6446 mutex_exit(&sd_detach_mutex); 6447 return; 6448 } 6449 mutex_exit(&sd_detach_mutex); 6450 6451 now = ddi_get_time(); 6452 /* 6453 * Grab both mutexes, in the proper order, since we're accessing 6454 * both PM and softstate variables. 6455 */ 6456 mutex_enter(SD_MUTEX(un)); 6457 mutex_enter(&un->un_pm_mutex); 6458 if (((now - un->un_pm_idle_time) > sd_pm_idletime) && 6459 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) { 6460 /* 6461 * Update the chain types. 6462 * This takes affect on the next new command received. 6463 */ 6464 if (un->un_f_non_devbsize_supported) { 6465 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 6466 } else { 6467 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 6468 } 6469 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 6470 6471 SD_TRACE(SD_LOG_IO_PM, un, 6472 "sd_pm_idletimeout_handler: idling device\n"); 6473 (void) pm_idle_component(SD_DEVINFO(un), 0); 6474 un->un_pm_idle_timeid = NULL; 6475 } else { 6476 un->un_pm_idle_timeid = 6477 timeout(sd_pm_idletimeout_handler, un, 6478 (drv_usectohz((clock_t)300000))); /* 300 ms. */ 6479 } 6480 mutex_exit(&un->un_pm_mutex); 6481 mutex_exit(SD_MUTEX(un)); 6482 } 6483 6484 6485 /* 6486 * Function: sd_pm_timeout_handler 6487 * 6488 * Description: Callback to tell framework we are idle. 6489 * 6490 * Context: timeout(9f) thread context. 6491 */ 6492 6493 static void 6494 sd_pm_timeout_handler(void *arg) 6495 { 6496 struct sd_lun *un = arg; 6497 6498 (void) pm_idle_component(SD_DEVINFO(un), 0); 6499 mutex_enter(&un->un_pm_mutex); 6500 un->un_pm_timeid = NULL; 6501 mutex_exit(&un->un_pm_mutex); 6502 } 6503 6504 6505 /* 6506 * Function: sdpower 6507 * 6508 * Description: PM entry point. 6509 * 6510 * Return Code: DDI_SUCCESS 6511 * DDI_FAILURE 6512 * 6513 * Context: Kernel thread context 6514 */ 6515 6516 static int 6517 sdpower(dev_info_t *devi, int component, int level) 6518 { 6519 struct sd_lun *un; 6520 int instance; 6521 int rval = DDI_SUCCESS; 6522 uint_t i, log_page_size, maxcycles, ncycles; 6523 uchar_t *log_page_data; 6524 int log_sense_page; 6525 int medium_present; 6526 time_t intvlp; 6527 dev_t dev; 6528 struct pm_trans_data sd_pm_tran_data; 6529 uchar_t save_state; 6530 int sval; 6531 uchar_t state_before_pm; 6532 int got_semaphore_here; 6533 sd_ssc_t *ssc; 6534 int last_power_level; 6535 6536 instance = ddi_get_instance(devi); 6537 6538 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 6539 !SD_PM_IS_LEVEL_VALID(un, level) || component != 0) { 6540 return (DDI_FAILURE); 6541 } 6542 6543 dev = sd_make_device(SD_DEVINFO(un)); 6544 ssc = sd_ssc_init(un); 6545 6546 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level); 6547 6548 /* 6549 * Must synchronize power down with close. 6550 * Attempt to decrement/acquire the open/close semaphore, 6551 * but do NOT wait on it. If it's not greater than zero, 6552 * ie. it can't be decremented without waiting, then 6553 * someone else, either open or close, already has it 6554 * and the try returns 0. Use that knowledge here to determine 6555 * if it's OK to change the device power level. 6556 * Also, only increment it on exit if it was decremented, ie. gotten, 6557 * here. 6558 */ 6559 got_semaphore_here = sema_tryp(&un->un_semoclose); 6560 6561 mutex_enter(SD_MUTEX(un)); 6562 6563 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n", 6564 un->un_ncmds_in_driver); 6565 6566 /* 6567 * If un_ncmds_in_driver is non-zero it indicates commands are 6568 * already being processed in the driver, or if the semaphore was 6569 * not gotten here it indicates an open or close is being processed. 6570 * At the same time somebody is requesting to go to a lower power 6571 * that can't perform I/O, which can't happen, therefore we need to 6572 * return failure. 6573 */ 6574 if ((!SD_PM_IS_IO_CAPABLE(un, level)) && 6575 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) { 6576 mutex_exit(SD_MUTEX(un)); 6577 6578 if (got_semaphore_here != 0) { 6579 sema_v(&un->un_semoclose); 6580 } 6581 SD_TRACE(SD_LOG_IO_PM, un, 6582 "sdpower: exit, device has queued cmds.\n"); 6583 6584 goto sdpower_failed; 6585 } 6586 6587 /* 6588 * if it is OFFLINE that means the disk is completely dead 6589 * in our case we have to put the disk in on or off by sending commands 6590 * Of course that will fail anyway so return back here. 6591 * 6592 * Power changes to a device that's OFFLINE or SUSPENDED 6593 * are not allowed. 6594 */ 6595 if ((un->un_state == SD_STATE_OFFLINE) || 6596 (un->un_state == SD_STATE_SUSPENDED)) { 6597 mutex_exit(SD_MUTEX(un)); 6598 6599 if (got_semaphore_here != 0) { 6600 sema_v(&un->un_semoclose); 6601 } 6602 SD_TRACE(SD_LOG_IO_PM, un, 6603 "sdpower: exit, device is off-line.\n"); 6604 6605 goto sdpower_failed; 6606 } 6607 6608 /* 6609 * Change the device's state to indicate it's power level 6610 * is being changed. Do this to prevent a power off in the 6611 * middle of commands, which is especially bad on devices 6612 * that are really powered off instead of just spun down. 6613 */ 6614 state_before_pm = un->un_state; 6615 un->un_state = SD_STATE_PM_CHANGING; 6616 6617 mutex_exit(SD_MUTEX(un)); 6618 6619 /* 6620 * If log sense command is not supported, bypass the 6621 * following checking, otherwise, check the log sense 6622 * information for this device. 6623 */ 6624 if (SD_PM_STOP_MOTOR_NEEDED(un, level) && 6625 un->un_f_log_sense_supported) { 6626 /* 6627 * Get the log sense information to understand whether the 6628 * the powercycle counts have gone beyond the threshhold. 6629 */ 6630 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 6631 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 6632 6633 mutex_enter(SD_MUTEX(un)); 6634 log_sense_page = un->un_start_stop_cycle_page; 6635 mutex_exit(SD_MUTEX(un)); 6636 6637 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 6638 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT); 6639 6640 if (rval != 0) { 6641 if (rval == EIO) 6642 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 6643 else 6644 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6645 } 6646 6647 #ifdef SDDEBUG 6648 if (sd_force_pm_supported) { 6649 /* Force a successful result */ 6650 rval = 0; 6651 } 6652 #endif 6653 if (rval != 0) { 6654 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 6655 "Log Sense Failed\n"); 6656 6657 kmem_free(log_page_data, log_page_size); 6658 /* Cannot support power management on those drives */ 6659 6660 if (got_semaphore_here != 0) { 6661 sema_v(&un->un_semoclose); 6662 } 6663 /* 6664 * On exit put the state back to it's original value 6665 * and broadcast to anyone waiting for the power 6666 * change completion. 6667 */ 6668 mutex_enter(SD_MUTEX(un)); 6669 un->un_state = state_before_pm; 6670 cv_broadcast(&un->un_suspend_cv); 6671 mutex_exit(SD_MUTEX(un)); 6672 SD_TRACE(SD_LOG_IO_PM, un, 6673 "sdpower: exit, Log Sense Failed.\n"); 6674 6675 goto sdpower_failed; 6676 } 6677 6678 /* 6679 * From the page data - Convert the essential information to 6680 * pm_trans_data 6681 */ 6682 maxcycles = 6683 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) | 6684 (log_page_data[0x1E] << 8) | log_page_data[0x1F]; 6685 6686 ncycles = 6687 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) | 6688 (log_page_data[0x26] << 8) | log_page_data[0x27]; 6689 6690 if (un->un_f_pm_log_sense_smart) { 6691 sd_pm_tran_data.un.smart_count.allowed = maxcycles; 6692 sd_pm_tran_data.un.smart_count.consumed = ncycles; 6693 sd_pm_tran_data.un.smart_count.flag = 0; 6694 sd_pm_tran_data.format = DC_SMART_FORMAT; 6695 } else { 6696 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles; 6697 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles; 6698 for (i = 0; i < DC_SCSI_MFR_LEN; i++) { 6699 sd_pm_tran_data.un.scsi_cycles.svc_date[i] = 6700 log_page_data[8+i]; 6701 } 6702 sd_pm_tran_data.un.scsi_cycles.flag = 0; 6703 sd_pm_tran_data.format = DC_SCSI_FORMAT; 6704 } 6705 6706 kmem_free(log_page_data, log_page_size); 6707 6708 /* 6709 * Call pm_trans_check routine to get the Ok from 6710 * the global policy 6711 */ 6712 rval = pm_trans_check(&sd_pm_tran_data, &intvlp); 6713 #ifdef SDDEBUG 6714 if (sd_force_pm_supported) { 6715 /* Force a successful result */ 6716 rval = 1; 6717 } 6718 #endif 6719 switch (rval) { 6720 case 0: 6721 /* 6722 * Not Ok to Power cycle or error in parameters passed 6723 * Would have given the advised time to consider power 6724 * cycle. Based on the new intvlp parameter we are 6725 * supposed to pretend we are busy so that pm framework 6726 * will never call our power entry point. Because of 6727 * that install a timeout handler and wait for the 6728 * recommended time to elapse so that power management 6729 * can be effective again. 6730 * 6731 * To effect this behavior, call pm_busy_component to 6732 * indicate to the framework this device is busy. 6733 * By not adjusting un_pm_count the rest of PM in 6734 * the driver will function normally, and independent 6735 * of this but because the framework is told the device 6736 * is busy it won't attempt powering down until it gets 6737 * a matching idle. The timeout handler sends this. 6738 * Note: sd_pm_entry can't be called here to do this 6739 * because sdpower may have been called as a result 6740 * of a call to pm_raise_power from within sd_pm_entry. 6741 * 6742 * If a timeout handler is already active then 6743 * don't install another. 6744 */ 6745 mutex_enter(&un->un_pm_mutex); 6746 if (un->un_pm_timeid == NULL) { 6747 un->un_pm_timeid = 6748 timeout(sd_pm_timeout_handler, 6749 un, intvlp * drv_usectohz(1000000)); 6750 mutex_exit(&un->un_pm_mutex); 6751 (void) pm_busy_component(SD_DEVINFO(un), 0); 6752 } else { 6753 mutex_exit(&un->un_pm_mutex); 6754 } 6755 if (got_semaphore_here != 0) { 6756 sema_v(&un->un_semoclose); 6757 } 6758 /* 6759 * On exit put the state back to it's original value 6760 * and broadcast to anyone waiting for the power 6761 * change completion. 6762 */ 6763 mutex_enter(SD_MUTEX(un)); 6764 un->un_state = state_before_pm; 6765 cv_broadcast(&un->un_suspend_cv); 6766 mutex_exit(SD_MUTEX(un)); 6767 6768 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, " 6769 "trans check Failed, not ok to power cycle.\n"); 6770 6771 goto sdpower_failed; 6772 case -1: 6773 if (got_semaphore_here != 0) { 6774 sema_v(&un->un_semoclose); 6775 } 6776 /* 6777 * On exit put the state back to it's original value 6778 * and broadcast to anyone waiting for the power 6779 * change completion. 6780 */ 6781 mutex_enter(SD_MUTEX(un)); 6782 un->un_state = state_before_pm; 6783 cv_broadcast(&un->un_suspend_cv); 6784 mutex_exit(SD_MUTEX(un)); 6785 SD_TRACE(SD_LOG_IO_PM, un, 6786 "sdpower: exit, trans check command Failed.\n"); 6787 6788 goto sdpower_failed; 6789 } 6790 } 6791 6792 if (!SD_PM_IS_IO_CAPABLE(un, level)) { 6793 /* 6794 * Save the last state... if the STOP FAILS we need it 6795 * for restoring 6796 */ 6797 mutex_enter(SD_MUTEX(un)); 6798 save_state = un->un_last_state; 6799 last_power_level = un->un_power_level; 6800 /* 6801 * There must not be any cmds. getting processed 6802 * in the driver when we get here. Power to the 6803 * device is potentially going off. 6804 */ 6805 ASSERT(un->un_ncmds_in_driver == 0); 6806 mutex_exit(SD_MUTEX(un)); 6807 6808 /* 6809 * For now PM suspend the device completely before spindle is 6810 * turned off 6811 */ 6812 if ((rval = sd_pm_state_change(un, level, SD_PM_STATE_CHANGE)) 6813 == DDI_FAILURE) { 6814 if (got_semaphore_here != 0) { 6815 sema_v(&un->un_semoclose); 6816 } 6817 /* 6818 * On exit put the state back to it's original value 6819 * and broadcast to anyone waiting for the power 6820 * change completion. 6821 */ 6822 mutex_enter(SD_MUTEX(un)); 6823 un->un_state = state_before_pm; 6824 un->un_power_level = last_power_level; 6825 cv_broadcast(&un->un_suspend_cv); 6826 mutex_exit(SD_MUTEX(un)); 6827 SD_TRACE(SD_LOG_IO_PM, un, 6828 "sdpower: exit, PM suspend Failed.\n"); 6829 6830 goto sdpower_failed; 6831 } 6832 } 6833 6834 /* 6835 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open, 6836 * close, or strategy. Dump no long uses this routine, it uses it's 6837 * own code so it can be done in polled mode. 6838 */ 6839 6840 medium_present = TRUE; 6841 6842 /* 6843 * When powering up, issue a TUR in case the device is at unit 6844 * attention. Don't do retries. Bypass the PM layer, otherwise 6845 * a deadlock on un_pm_busy_cv will occur. 6846 */ 6847 if (SD_PM_IS_IO_CAPABLE(un, level)) { 6848 sval = sd_send_scsi_TEST_UNIT_READY(ssc, 6849 SD_DONT_RETRY_TUR | SD_BYPASS_PM); 6850 if (sval != 0) 6851 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6852 } 6853 6854 if (un->un_f_power_condition_supported) { 6855 char *pm_condition_name[] = {"STOPPED", "STANDBY", 6856 "IDLE", "ACTIVE"}; 6857 SD_TRACE(SD_LOG_IO_PM, un, 6858 "sdpower: sending \'%s\' power condition", 6859 pm_condition_name[level]); 6860 sval = sd_send_scsi_START_STOP_UNIT(ssc, SD_POWER_CONDITION, 6861 sd_pl2pc[level], SD_PATH_DIRECT); 6862 } else { 6863 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n", 6864 ((level == SD_SPINDLE_ON) ? "START" : "STOP")); 6865 sval = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 6866 ((level == SD_SPINDLE_ON) ? SD_TARGET_START : 6867 SD_TARGET_STOP), SD_PATH_DIRECT); 6868 } 6869 if (sval != 0) { 6870 if (sval == EIO) 6871 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 6872 else 6873 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6874 } 6875 6876 /* Command failed, check for media present. */ 6877 if ((sval == ENXIO) && un->un_f_has_removable_media) { 6878 medium_present = FALSE; 6879 } 6880 6881 /* 6882 * The conditions of interest here are: 6883 * if a spindle off with media present fails, 6884 * then restore the state and return an error. 6885 * else if a spindle on fails, 6886 * then return an error (there's no state to restore). 6887 * In all other cases we setup for the new state 6888 * and return success. 6889 */ 6890 if (!SD_PM_IS_IO_CAPABLE(un, level)) { 6891 if ((medium_present == TRUE) && (sval != 0)) { 6892 /* The stop command from above failed */ 6893 rval = DDI_FAILURE; 6894 /* 6895 * The stop command failed, and we have media 6896 * present. Put the level back by calling the 6897 * sd_pm_resume() and set the state back to 6898 * it's previous value. 6899 */ 6900 (void) sd_pm_state_change(un, last_power_level, 6901 SD_PM_STATE_ROLLBACK); 6902 mutex_enter(SD_MUTEX(un)); 6903 un->un_last_state = save_state; 6904 mutex_exit(SD_MUTEX(un)); 6905 } else if (un->un_f_monitor_media_state) { 6906 /* 6907 * The stop command from above succeeded. 6908 * Terminate watch thread in case of removable media 6909 * devices going into low power state. This is as per 6910 * the requirements of pm framework, otherwise commands 6911 * will be generated for the device (through watch 6912 * thread), even when the device is in low power state. 6913 */ 6914 mutex_enter(SD_MUTEX(un)); 6915 un->un_f_watcht_stopped = FALSE; 6916 if (un->un_swr_token != NULL) { 6917 opaque_t temp_token = un->un_swr_token; 6918 un->un_f_watcht_stopped = TRUE; 6919 un->un_swr_token = NULL; 6920 mutex_exit(SD_MUTEX(un)); 6921 (void) scsi_watch_request_terminate(temp_token, 6922 SCSI_WATCH_TERMINATE_ALL_WAIT); 6923 } else { 6924 mutex_exit(SD_MUTEX(un)); 6925 } 6926 } 6927 } else { 6928 /* 6929 * The level requested is I/O capable. 6930 * Legacy behavior: return success on a failed spinup 6931 * if there is no media in the drive. 6932 * Do this by looking at medium_present here. 6933 */ 6934 if ((sval != 0) && medium_present) { 6935 /* The start command from above failed */ 6936 rval = DDI_FAILURE; 6937 } else { 6938 /* 6939 * The start command from above succeeded 6940 * PM resume the devices now that we have 6941 * started the disks 6942 */ 6943 (void) sd_pm_state_change(un, level, 6944 SD_PM_STATE_CHANGE); 6945 6946 /* 6947 * Resume the watch thread since it was suspended 6948 * when the device went into low power mode. 6949 */ 6950 if (un->un_f_monitor_media_state) { 6951 mutex_enter(SD_MUTEX(un)); 6952 if (un->un_f_watcht_stopped == TRUE) { 6953 opaque_t temp_token; 6954 6955 un->un_f_watcht_stopped = FALSE; 6956 mutex_exit(SD_MUTEX(un)); 6957 temp_token = scsi_watch_request_submit( 6958 SD_SCSI_DEVP(un), 6959 sd_check_media_time, 6960 SENSE_LENGTH, sd_media_watch_cb, 6961 (caddr_t)dev); 6962 mutex_enter(SD_MUTEX(un)); 6963 un->un_swr_token = temp_token; 6964 } 6965 mutex_exit(SD_MUTEX(un)); 6966 } 6967 } 6968 } 6969 6970 if (got_semaphore_here != 0) { 6971 sema_v(&un->un_semoclose); 6972 } 6973 /* 6974 * On exit put the state back to it's original value 6975 * and broadcast to anyone waiting for the power 6976 * change completion. 6977 */ 6978 mutex_enter(SD_MUTEX(un)); 6979 un->un_state = state_before_pm; 6980 cv_broadcast(&un->un_suspend_cv); 6981 mutex_exit(SD_MUTEX(un)); 6982 6983 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval); 6984 6985 sd_ssc_fini(ssc); 6986 return (rval); 6987 6988 sdpower_failed: 6989 6990 sd_ssc_fini(ssc); 6991 return (DDI_FAILURE); 6992 } 6993 6994 6995 6996 /* 6997 * Function: sdattach 6998 * 6999 * Description: Driver's attach(9e) entry point function. 7000 * 7001 * Arguments: devi - opaque device info handle 7002 * cmd - attach type 7003 * 7004 * Return Code: DDI_SUCCESS 7005 * DDI_FAILURE 7006 * 7007 * Context: Kernel thread context 7008 */ 7009 7010 static int 7011 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 7012 { 7013 switch (cmd) { 7014 case DDI_ATTACH: 7015 return (sd_unit_attach(devi)); 7016 case DDI_RESUME: 7017 return (sd_ddi_resume(devi)); 7018 default: 7019 break; 7020 } 7021 return (DDI_FAILURE); 7022 } 7023 7024 7025 /* 7026 * Function: sddetach 7027 * 7028 * Description: Driver's detach(9E) entry point function. 7029 * 7030 * Arguments: devi - opaque device info handle 7031 * cmd - detach type 7032 * 7033 * Return Code: DDI_SUCCESS 7034 * DDI_FAILURE 7035 * 7036 * Context: Kernel thread context 7037 */ 7038 7039 static int 7040 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 7041 { 7042 switch (cmd) { 7043 case DDI_DETACH: 7044 return (sd_unit_detach(devi)); 7045 case DDI_SUSPEND: 7046 return (sd_ddi_suspend(devi)); 7047 default: 7048 break; 7049 } 7050 return (DDI_FAILURE); 7051 } 7052 7053 7054 /* 7055 * Function: sd_sync_with_callback 7056 * 7057 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft 7058 * state while the callback routine is active. 7059 * 7060 * Arguments: un: softstate structure for the instance 7061 * 7062 * Context: Kernel thread context 7063 */ 7064 7065 static void 7066 sd_sync_with_callback(struct sd_lun *un) 7067 { 7068 ASSERT(un != NULL); 7069 7070 mutex_enter(SD_MUTEX(un)); 7071 7072 ASSERT(un->un_in_callback >= 0); 7073 7074 while (un->un_in_callback > 0) { 7075 mutex_exit(SD_MUTEX(un)); 7076 delay(2); 7077 mutex_enter(SD_MUTEX(un)); 7078 } 7079 7080 mutex_exit(SD_MUTEX(un)); 7081 } 7082 7083 /* 7084 * Function: sd_unit_attach 7085 * 7086 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates 7087 * the soft state structure for the device and performs 7088 * all necessary structure and device initializations. 7089 * 7090 * Arguments: devi: the system's dev_info_t for the device. 7091 * 7092 * Return Code: DDI_SUCCESS if attach is successful. 7093 * DDI_FAILURE if any part of the attach fails. 7094 * 7095 * Context: Called at attach(9e) time for the DDI_ATTACH flag. 7096 * Kernel thread context only. Can sleep. 7097 */ 7098 7099 static int 7100 sd_unit_attach(dev_info_t *devi) 7101 { 7102 struct scsi_device *devp; 7103 struct sd_lun *un; 7104 char *variantp; 7105 char name_str[48]; 7106 int reservation_flag = SD_TARGET_IS_UNRESERVED; 7107 int instance; 7108 int rval; 7109 int wc_enabled; 7110 int tgt; 7111 uint64_t capacity; 7112 uint_t lbasize = 0; 7113 dev_info_t *pdip = ddi_get_parent(devi); 7114 int offbyone = 0; 7115 int geom_label_valid = 0; 7116 sd_ssc_t *ssc; 7117 int status; 7118 struct sd_fm_internal *sfip = NULL; 7119 int max_xfer_size; 7120 7121 /* 7122 * Retrieve the target driver's private data area. This was set 7123 * up by the HBA. 7124 */ 7125 devp = ddi_get_driver_private(devi); 7126 7127 /* 7128 * Retrieve the target ID of the device. 7129 */ 7130 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7131 SCSI_ADDR_PROP_TARGET, -1); 7132 7133 /* 7134 * Since we have no idea what state things were left in by the last 7135 * user of the device, set up some 'default' settings, ie. turn 'em 7136 * off. The scsi_ifsetcap calls force re-negotiations with the drive. 7137 * Do this before the scsi_probe, which sends an inquiry. 7138 * This is a fix for bug (4430280). 7139 * Of special importance is wide-xfer. The drive could have been left 7140 * in wide transfer mode by the last driver to communicate with it, 7141 * this includes us. If that's the case, and if the following is not 7142 * setup properly or we don't re-negotiate with the drive prior to 7143 * transferring data to/from the drive, it causes bus parity errors, 7144 * data overruns, and unexpected interrupts. This first occurred when 7145 * the fix for bug (4378686) was made. 7146 */ 7147 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1); 7148 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1); 7149 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1); 7150 7151 /* 7152 * Currently, scsi_ifsetcap sets tagged-qing capability for all LUNs 7153 * on a target. Setting it per lun instance actually sets the 7154 * capability of this target, which affects those luns already 7155 * attached on the same target. So during attach, we can only disable 7156 * this capability only when no other lun has been attached on this 7157 * target. By doing this, we assume a target has the same tagged-qing 7158 * capability for every lun. The condition can be removed when HBA 7159 * is changed to support per lun based tagged-qing capability. 7160 */ 7161 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 7162 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1); 7163 } 7164 7165 /* 7166 * Use scsi_probe() to issue an INQUIRY command to the device. 7167 * This call will allocate and fill in the scsi_inquiry structure 7168 * and point the sd_inq member of the scsi_device structure to it. 7169 * If the attach succeeds, then this memory will not be de-allocated 7170 * (via scsi_unprobe()) until the instance is detached. 7171 */ 7172 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 7173 goto probe_failed; 7174 } 7175 7176 /* 7177 * Check the device type as specified in the inquiry data and 7178 * claim it if it is of a type that we support. 7179 */ 7180 switch (devp->sd_inq->inq_dtype) { 7181 case DTYPE_DIRECT: 7182 break; 7183 case DTYPE_RODIRECT: 7184 break; 7185 case DTYPE_OPTICAL: 7186 break; 7187 case DTYPE_NOTPRESENT: 7188 default: 7189 /* Unsupported device type; fail the attach. */ 7190 goto probe_failed; 7191 } 7192 7193 /* 7194 * Allocate the soft state structure for this unit. 7195 * 7196 * We rely upon this memory being set to all zeroes by 7197 * ddi_soft_state_zalloc(). We assume that any member of the 7198 * soft state structure that is not explicitly initialized by 7199 * this routine will have a value of zero. 7200 */ 7201 instance = ddi_get_instance(devp->sd_dev); 7202 #ifndef XPV_HVM_DRIVER 7203 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) { 7204 goto probe_failed; 7205 } 7206 #endif /* !XPV_HVM_DRIVER */ 7207 7208 /* 7209 * Retrieve a pointer to the newly-allocated soft state. 7210 * 7211 * This should NEVER fail if the ddi_soft_state_zalloc() call above 7212 * was successful, unless something has gone horribly wrong and the 7213 * ddi's soft state internals are corrupt (in which case it is 7214 * probably better to halt here than just fail the attach....) 7215 */ 7216 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 7217 panic("sd_unit_attach: NULL soft state on instance:0x%x", 7218 instance); 7219 /*NOTREACHED*/ 7220 } 7221 7222 /* 7223 * Link the back ptr of the driver soft state to the scsi_device 7224 * struct for this lun. 7225 * Save a pointer to the softstate in the driver-private area of 7226 * the scsi_device struct. 7227 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until 7228 * we first set un->un_sd below. 7229 */ 7230 un->un_sd = devp; 7231 devp->sd_private = (opaque_t)un; 7232 7233 /* 7234 * The following must be after devp is stored in the soft state struct. 7235 */ 7236 #ifdef SDDEBUG 7237 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7238 "%s_unit_attach: un:0x%p instance:%d\n", 7239 ddi_driver_name(devi), un, instance); 7240 #endif 7241 7242 /* 7243 * Set up the device type and node type (for the minor nodes). 7244 * By default we assume that the device can at least support the 7245 * Common Command Set. Call it a CD-ROM if it reports itself 7246 * as a RODIRECT device. 7247 */ 7248 switch (devp->sd_inq->inq_dtype) { 7249 case DTYPE_RODIRECT: 7250 un->un_node_type = DDI_NT_CD_CHAN; 7251 un->un_ctype = CTYPE_CDROM; 7252 break; 7253 case DTYPE_OPTICAL: 7254 un->un_node_type = DDI_NT_BLOCK_CHAN; 7255 un->un_ctype = CTYPE_ROD; 7256 break; 7257 default: 7258 un->un_node_type = DDI_NT_BLOCK_CHAN; 7259 un->un_ctype = CTYPE_CCS; 7260 break; 7261 } 7262 7263 /* 7264 * Try to read the interconnect type from the HBA. 7265 * 7266 * Note: This driver is currently compiled as two binaries, a parallel 7267 * scsi version (sd) and a fibre channel version (ssd). All functional 7268 * differences are determined at compile time. In the future a single 7269 * binary will be provided and the interconnect type will be used to 7270 * differentiate between fibre and parallel scsi behaviors. At that time 7271 * it will be necessary for all fibre channel HBAs to support this 7272 * property. 7273 * 7274 * set un_f_is_fiber to TRUE ( default fiber ) 7275 */ 7276 un->un_f_is_fibre = TRUE; 7277 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { 7278 case INTERCONNECT_SSA: 7279 un->un_interconnect_type = SD_INTERCONNECT_SSA; 7280 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7281 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un); 7282 break; 7283 case INTERCONNECT_PARALLEL: 7284 un->un_f_is_fibre = FALSE; 7285 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7286 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7287 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); 7288 break; 7289 case INTERCONNECT_SAS: 7290 un->un_f_is_fibre = FALSE; 7291 un->un_interconnect_type = SD_INTERCONNECT_SAS; 7292 un->un_node_type = DDI_NT_BLOCK_SAS; 7293 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7294 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SAS\n", un); 7295 break; 7296 case INTERCONNECT_SATA: 7297 un->un_f_is_fibre = FALSE; 7298 un->un_interconnect_type = SD_INTERCONNECT_SATA; 7299 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7300 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un); 7301 break; 7302 case INTERCONNECT_FIBRE: 7303 un->un_interconnect_type = SD_INTERCONNECT_FIBRE; 7304 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7305 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); 7306 break; 7307 case INTERCONNECT_FABRIC: 7308 un->un_interconnect_type = SD_INTERCONNECT_FABRIC; 7309 un->un_node_type = DDI_NT_BLOCK_FABRIC; 7310 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7311 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); 7312 break; 7313 default: 7314 #ifdef SD_DEFAULT_INTERCONNECT_TYPE 7315 /* 7316 * The HBA does not support the "interconnect-type" property 7317 * (or did not provide a recognized type). 7318 * 7319 * Note: This will be obsoleted when a single fibre channel 7320 * and parallel scsi driver is delivered. In the meantime the 7321 * interconnect type will be set to the platform default.If that 7322 * type is not parallel SCSI, it means that we should be 7323 * assuming "ssd" semantics. However, here this also means that 7324 * the FC HBA is not supporting the "interconnect-type" property 7325 * like we expect it to, so log this occurrence. 7326 */ 7327 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE; 7328 if (!SD_IS_PARALLEL_SCSI(un)) { 7329 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7330 "sd_unit_attach: un:0x%p Assuming " 7331 "INTERCONNECT_FIBRE\n", un); 7332 } else { 7333 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7334 "sd_unit_attach: un:0x%p Assuming " 7335 "INTERCONNECT_PARALLEL\n", un); 7336 un->un_f_is_fibre = FALSE; 7337 } 7338 #else 7339 /* 7340 * Note: This source will be implemented when a single fibre 7341 * channel and parallel scsi driver is delivered. The default 7342 * will be to assume that if a device does not support the 7343 * "interconnect-type" property it is a parallel SCSI HBA and 7344 * we will set the interconnect type for parallel scsi. 7345 */ 7346 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7347 un->un_f_is_fibre = FALSE; 7348 #endif 7349 break; 7350 } 7351 7352 if (un->un_f_is_fibre == TRUE) { 7353 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) == 7354 SCSI_VERSION_3) { 7355 switch (un->un_interconnect_type) { 7356 case SD_INTERCONNECT_FIBRE: 7357 case SD_INTERCONNECT_SSA: 7358 un->un_node_type = DDI_NT_BLOCK_WWN; 7359 break; 7360 default: 7361 break; 7362 } 7363 } 7364 } 7365 7366 /* 7367 * Initialize the Request Sense command for the target 7368 */ 7369 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { 7370 goto alloc_rqs_failed; 7371 } 7372 7373 /* 7374 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc 7375 * with separate binary for sd and ssd. 7376 * 7377 * x86 has 1 binary, un_retry_count is set base on connection type. 7378 * The hardcoded values will go away when Sparc uses 1 binary 7379 * for sd and ssd. This hardcoded values need to match 7380 * SD_RETRY_COUNT in sddef.h 7381 * The value used is base on interconnect type. 7382 * fibre = 3, parallel = 5 7383 */ 7384 #if defined(__i386) || defined(__amd64) 7385 un->un_retry_count = un->un_f_is_fibre ? 3 : 5; 7386 #else 7387 un->un_retry_count = SD_RETRY_COUNT; 7388 #endif 7389 7390 /* 7391 * Set the per disk retry count to the default number of retries 7392 * for disks and CDROMs. This value can be overridden by the 7393 * disk property list or an entry in sd.conf. 7394 */ 7395 un->un_notready_retry_count = 7396 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un) 7397 : DISK_NOT_READY_RETRY_COUNT(un); 7398 7399 /* 7400 * Set the busy retry count to the default value of un_retry_count. 7401 * This can be overridden by entries in sd.conf or the device 7402 * config table. 7403 */ 7404 un->un_busy_retry_count = un->un_retry_count; 7405 7406 /* 7407 * Init the reset threshold for retries. This number determines 7408 * how many retries must be performed before a reset can be issued 7409 * (for certain error conditions). This can be overridden by entries 7410 * in sd.conf or the device config table. 7411 */ 7412 un->un_reset_retry_count = (un->un_retry_count / 2); 7413 7414 /* 7415 * Set the victim_retry_count to the default un_retry_count 7416 */ 7417 un->un_victim_retry_count = (2 * un->un_retry_count); 7418 7419 /* 7420 * Set the reservation release timeout to the default value of 7421 * 5 seconds. This can be overridden by entries in ssd.conf or the 7422 * device config table. 7423 */ 7424 un->un_reserve_release_time = 5; 7425 7426 /* 7427 * Set up the default maximum transfer size. Note that this may 7428 * get updated later in the attach, when setting up default wide 7429 * operations for disks. 7430 */ 7431 #if defined(__i386) || defined(__amd64) 7432 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; 7433 un->un_partial_dma_supported = 1; 7434 #else 7435 un->un_max_xfer_size = (uint_t)maxphys; 7436 #endif 7437 7438 /* 7439 * Get "allow bus device reset" property (defaults to "enabled" if 7440 * the property was not defined). This is to disable bus resets for 7441 * certain kinds of error recovery. Note: In the future when a run-time 7442 * fibre check is available the soft state flag should default to 7443 * enabled. 7444 */ 7445 if (un->un_f_is_fibre == TRUE) { 7446 un->un_f_allow_bus_device_reset = TRUE; 7447 } else { 7448 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7449 "allow-bus-device-reset", 1) != 0) { 7450 un->un_f_allow_bus_device_reset = TRUE; 7451 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7452 "sd_unit_attach: un:0x%p Bus device reset " 7453 "enabled\n", un); 7454 } else { 7455 un->un_f_allow_bus_device_reset = FALSE; 7456 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7457 "sd_unit_attach: un:0x%p Bus device reset " 7458 "disabled\n", un); 7459 } 7460 } 7461 7462 /* 7463 * Check if this is an ATAPI device. ATAPI devices use Group 1 7464 * Read/Write commands and Group 2 Mode Sense/Select commands. 7465 * 7466 * Note: The "obsolete" way of doing this is to check for the "atapi" 7467 * property. The new "variant" property with a value of "atapi" has been 7468 * introduced so that future 'variants' of standard SCSI behavior (like 7469 * atapi) could be specified by the underlying HBA drivers by supplying 7470 * a new value for the "variant" property, instead of having to define a 7471 * new property. 7472 */ 7473 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { 7474 un->un_f_cfg_is_atapi = TRUE; 7475 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7476 "sd_unit_attach: un:0x%p Atapi device\n", un); 7477 } 7478 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", 7479 &variantp) == DDI_PROP_SUCCESS) { 7480 if (strcmp(variantp, "atapi") == 0) { 7481 un->un_f_cfg_is_atapi = TRUE; 7482 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7483 "sd_unit_attach: un:0x%p Atapi device\n", un); 7484 } 7485 ddi_prop_free(variantp); 7486 } 7487 7488 un->un_cmd_timeout = SD_IO_TIME; 7489 7490 un->un_busy_timeout = SD_BSY_TIMEOUT; 7491 7492 /* Info on current states, statuses, etc. (Updated frequently) */ 7493 un->un_state = SD_STATE_NORMAL; 7494 un->un_last_state = SD_STATE_NORMAL; 7495 7496 /* Control & status info for command throttling */ 7497 un->un_throttle = sd_max_throttle; 7498 un->un_saved_throttle = sd_max_throttle; 7499 un->un_min_throttle = sd_min_throttle; 7500 7501 if (un->un_f_is_fibre == TRUE) { 7502 un->un_f_use_adaptive_throttle = TRUE; 7503 } else { 7504 un->un_f_use_adaptive_throttle = FALSE; 7505 } 7506 7507 /* Removable media support. */ 7508 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 7509 un->un_mediastate = DKIO_NONE; 7510 un->un_specified_mediastate = DKIO_NONE; 7511 7512 /* CVs for suspend/resume (PM or DR) */ 7513 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 7514 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 7515 7516 /* Power management support. */ 7517 un->un_power_level = SD_SPINDLE_UNINIT; 7518 7519 cv_init(&un->un_wcc_cv, NULL, CV_DRIVER, NULL); 7520 un->un_f_wcc_inprog = 0; 7521 7522 /* 7523 * The open/close semaphore is used to serialize threads executing 7524 * in the driver's open & close entry point routines for a given 7525 * instance. 7526 */ 7527 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); 7528 7529 /* 7530 * The conf file entry and softstate variable is a forceful override, 7531 * meaning a non-zero value must be entered to change the default. 7532 */ 7533 un->un_f_disksort_disabled = FALSE; 7534 un->un_f_rmw_type = SD_RMW_TYPE_DEFAULT; 7535 7536 /* 7537 * Retrieve the properties from the static driver table or the driver 7538 * configuration file (.conf) for this unit and update the soft state 7539 * for the device as needed for the indicated properties. 7540 * Note: the property configuration needs to occur here as some of the 7541 * following routines may have dependencies on soft state flags set 7542 * as part of the driver property configuration. 7543 */ 7544 sd_read_unit_properties(un); 7545 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7546 "sd_unit_attach: un:0x%p property configuration complete.\n", un); 7547 7548 /* 7549 * Only if a device has "hotpluggable" property, it is 7550 * treated as hotpluggable device. Otherwise, it is 7551 * regarded as non-hotpluggable one. 7552 */ 7553 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "hotpluggable", 7554 -1) != -1) { 7555 un->un_f_is_hotpluggable = TRUE; 7556 } 7557 7558 /* 7559 * set unit's attributes(flags) according to "hotpluggable" and 7560 * RMB bit in INQUIRY data. 7561 */ 7562 sd_set_unit_attributes(un, devi); 7563 7564 /* 7565 * By default, we mark the capacity, lbasize, and geometry 7566 * as invalid. Only if we successfully read a valid capacity 7567 * will we update the un_blockcount and un_tgt_blocksize with the 7568 * valid values (the geometry will be validated later). 7569 */ 7570 un->un_f_blockcount_is_valid = FALSE; 7571 un->un_f_tgt_blocksize_is_valid = FALSE; 7572 7573 /* 7574 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine 7575 * otherwise. 7576 */ 7577 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE; 7578 un->un_blockcount = 0; 7579 7580 /* 7581 * Set up the per-instance info needed to determine the correct 7582 * CDBs and other info for issuing commands to the target. 7583 */ 7584 sd_init_cdb_limits(un); 7585 7586 /* 7587 * Set up the IO chains to use, based upon the target type. 7588 */ 7589 if (un->un_f_non_devbsize_supported) { 7590 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 7591 } else { 7592 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 7593 } 7594 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 7595 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD; 7596 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD; 7597 7598 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf), 7599 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit, 7600 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER); 7601 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi); 7602 7603 7604 if (ISCD(un)) { 7605 un->un_additional_codes = sd_additional_codes; 7606 } else { 7607 un->un_additional_codes = NULL; 7608 } 7609 7610 /* 7611 * Create the kstats here so they can be available for attach-time 7612 * routines that send commands to the unit (either polled or via 7613 * sd_send_scsi_cmd). 7614 * 7615 * Note: This is a critical sequence that needs to be maintained: 7616 * 1) Instantiate the kstats here, before any routines using the 7617 * iopath (i.e. sd_send_scsi_cmd). 7618 * 2) Instantiate and initialize the partition stats 7619 * (sd_set_pstats). 7620 * 3) Initialize the error stats (sd_set_errstats), following 7621 * sd_validate_geometry(),sd_register_devid(), 7622 * and sd_cache_control(). 7623 */ 7624 7625 un->un_stats = kstat_create(sd_label, instance, 7626 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 7627 if (un->un_stats != NULL) { 7628 un->un_stats->ks_lock = SD_MUTEX(un); 7629 kstat_install(un->un_stats); 7630 } 7631 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7632 "sd_unit_attach: un:0x%p un_stats created\n", un); 7633 7634 sd_create_errstats(un, instance); 7635 if (un->un_errstats == NULL) { 7636 goto create_errstats_failed; 7637 } 7638 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7639 "sd_unit_attach: un:0x%p errstats created\n", un); 7640 7641 /* 7642 * The following if/else code was relocated here from below as part 7643 * of the fix for bug (4430280). However with the default setup added 7644 * on entry to this routine, it's no longer absolutely necessary for 7645 * this to be before the call to sd_spin_up_unit. 7646 */ 7647 if (SD_IS_PARALLEL_SCSI(un) || SD_IS_SERIAL(un)) { 7648 int tq_trigger_flag = (((devp->sd_inq->inq_ansi == 4) || 7649 (devp->sd_inq->inq_ansi == 5)) && 7650 devp->sd_inq->inq_bque) || devp->sd_inq->inq_cmdque; 7651 7652 /* 7653 * If tagged queueing is supported by the target 7654 * and by the host adapter then we will enable it 7655 */ 7656 un->un_tagflags = 0; 7657 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && tq_trigger_flag && 7658 (un->un_f_arq_enabled == TRUE)) { 7659 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 7660 1, 1) == 1) { 7661 un->un_tagflags = FLAG_STAG; 7662 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7663 "sd_unit_attach: un:0x%p tag queueing " 7664 "enabled\n", un); 7665 } else if (scsi_ifgetcap(SD_ADDRESS(un), 7666 "untagged-qing", 0) == 1) { 7667 un->un_f_opt_queueing = TRUE; 7668 un->un_saved_throttle = un->un_throttle = 7669 min(un->un_throttle, 3); 7670 } else { 7671 un->un_f_opt_queueing = FALSE; 7672 un->un_saved_throttle = un->un_throttle = 1; 7673 } 7674 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) 7675 == 1) && (un->un_f_arq_enabled == TRUE)) { 7676 /* The Host Adapter supports internal queueing. */ 7677 un->un_f_opt_queueing = TRUE; 7678 un->un_saved_throttle = un->un_throttle = 7679 min(un->un_throttle, 3); 7680 } else { 7681 un->un_f_opt_queueing = FALSE; 7682 un->un_saved_throttle = un->un_throttle = 1; 7683 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7684 "sd_unit_attach: un:0x%p no tag queueing\n", un); 7685 } 7686 7687 /* 7688 * Enable large transfers for SATA/SAS drives 7689 */ 7690 if (SD_IS_SERIAL(un)) { 7691 un->un_max_xfer_size = 7692 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7693 sd_max_xfer_size, SD_MAX_XFER_SIZE); 7694 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7695 "sd_unit_attach: un:0x%p max transfer " 7696 "size=0x%x\n", un, un->un_max_xfer_size); 7697 7698 } 7699 7700 /* Setup or tear down default wide operations for disks */ 7701 7702 /* 7703 * Note: Legacy: it may be possible for both "sd_max_xfer_size" 7704 * and "ssd_max_xfer_size" to exist simultaneously on the same 7705 * system and be set to different values. In the future this 7706 * code may need to be updated when the ssd module is 7707 * obsoleted and removed from the system. (4299588) 7708 */ 7709 if (SD_IS_PARALLEL_SCSI(un) && 7710 (devp->sd_inq->inq_rdf == RDF_SCSI2) && 7711 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { 7712 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7713 1, 1) == 1) { 7714 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7715 "sd_unit_attach: un:0x%p Wide Transfer " 7716 "enabled\n", un); 7717 } 7718 7719 /* 7720 * If tagged queuing has also been enabled, then 7721 * enable large xfers 7722 */ 7723 if (un->un_saved_throttle == sd_max_throttle) { 7724 un->un_max_xfer_size = 7725 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7726 sd_max_xfer_size, SD_MAX_XFER_SIZE); 7727 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7728 "sd_unit_attach: un:0x%p max transfer " 7729 "size=0x%x\n", un, un->un_max_xfer_size); 7730 } 7731 } else { 7732 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7733 0, 1) == 1) { 7734 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7735 "sd_unit_attach: un:0x%p " 7736 "Wide Transfer disabled\n", un); 7737 } 7738 } 7739 } else { 7740 un->un_tagflags = FLAG_STAG; 7741 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, 7742 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE); 7743 } 7744 7745 /* 7746 * If this target supports LUN reset, try to enable it. 7747 */ 7748 if (un->un_f_lun_reset_enabled) { 7749 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { 7750 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7751 "un:0x%p lun_reset capability set\n", un); 7752 } else { 7753 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7754 "un:0x%p lun-reset capability not set\n", un); 7755 } 7756 } 7757 7758 /* 7759 * Adjust the maximum transfer size. This is to fix 7760 * the problem of partial DMA support on SPARC. Some 7761 * HBA driver, like aac, has very small dma_attr_maxxfer 7762 * size, which requires partial DMA support on SPARC. 7763 * In the future the SPARC pci nexus driver may solve 7764 * the problem instead of this fix. 7765 */ 7766 max_xfer_size = scsi_ifgetcap(SD_ADDRESS(un), "dma-max", 1); 7767 if ((max_xfer_size > 0) && (max_xfer_size < un->un_max_xfer_size)) { 7768 /* We need DMA partial even on sparc to ensure sddump() works */ 7769 un->un_max_xfer_size = max_xfer_size; 7770 if (un->un_partial_dma_supported == 0) 7771 un->un_partial_dma_supported = 1; 7772 } 7773 if (ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 7774 DDI_PROP_DONTPASS, "buf_break", 0) == 1) { 7775 if (ddi_xbuf_attr_setup_brk(un->un_xbuf_attr, 7776 un->un_max_xfer_size) == 1) { 7777 un->un_buf_breakup_supported = 1; 7778 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7779 "un:0x%p Buf breakup enabled\n", un); 7780 } 7781 } 7782 7783 /* 7784 * Set PKT_DMA_PARTIAL flag. 7785 */ 7786 if (un->un_partial_dma_supported == 1) { 7787 un->un_pkt_flags = PKT_DMA_PARTIAL; 7788 } else { 7789 un->un_pkt_flags = 0; 7790 } 7791 7792 /* Initialize sd_ssc_t for internal uscsi commands */ 7793 ssc = sd_ssc_init(un); 7794 scsi_fm_init(devp); 7795 7796 /* 7797 * Allocate memory for SCSI FMA stuffs. 7798 */ 7799 un->un_fm_private = 7800 kmem_zalloc(sizeof (struct sd_fm_internal), KM_SLEEP); 7801 sfip = (struct sd_fm_internal *)un->un_fm_private; 7802 sfip->fm_ssc.ssc_uscsi_cmd = &sfip->fm_ucmd; 7803 sfip->fm_ssc.ssc_uscsi_info = &sfip->fm_uinfo; 7804 sfip->fm_ssc.ssc_un = un; 7805 7806 if (ISCD(un) || 7807 un->un_f_has_removable_media || 7808 devp->sd_fm_capable == DDI_FM_NOT_CAPABLE) { 7809 /* 7810 * We don't touch CDROM or the DDI_FM_NOT_CAPABLE device. 7811 * Their log are unchanged. 7812 */ 7813 sfip->fm_log_level = SD_FM_LOG_NSUP; 7814 } else { 7815 /* 7816 * If enter here, it should be non-CDROM and FM-capable 7817 * device, and it will not keep the old scsi_log as before 7818 * in /var/adm/messages. However, the property 7819 * "fm-scsi-log" will control whether the FM telemetry will 7820 * be logged in /var/adm/messages. 7821 */ 7822 int fm_scsi_log; 7823 fm_scsi_log = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 7824 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "fm-scsi-log", 0); 7825 7826 if (fm_scsi_log) 7827 sfip->fm_log_level = SD_FM_LOG_EREPORT; 7828 else 7829 sfip->fm_log_level = SD_FM_LOG_SILENT; 7830 } 7831 7832 /* 7833 * At this point in the attach, we have enough info in the 7834 * soft state to be able to issue commands to the target. 7835 * 7836 * All command paths used below MUST issue their commands as 7837 * SD_PATH_DIRECT. This is important as intermediate layers 7838 * are not all initialized yet (such as PM). 7839 */ 7840 7841 /* 7842 * Send a TEST UNIT READY command to the device. This should clear 7843 * any outstanding UNIT ATTENTION that may be present. 7844 * 7845 * Note: Don't check for success, just track if there is a reservation, 7846 * this is a throw away command to clear any unit attentions. 7847 * 7848 * Note: This MUST be the first command issued to the target during 7849 * attach to ensure power on UNIT ATTENTIONS are cleared. 7850 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated 7851 * with attempts at spinning up a device with no media. 7852 */ 7853 status = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR); 7854 if (status != 0) { 7855 if (status == EACCES) 7856 reservation_flag = SD_TARGET_IS_RESERVED; 7857 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7858 } 7859 7860 /* 7861 * If the device is NOT a removable media device, attempt to spin 7862 * it up (using the START_STOP_UNIT command) and read its capacity 7863 * (using the READ CAPACITY command). Note, however, that either 7864 * of these could fail and in some cases we would continue with 7865 * the attach despite the failure (see below). 7866 */ 7867 if (un->un_f_descr_format_supported) { 7868 7869 switch (sd_spin_up_unit(ssc)) { 7870 case 0: 7871 /* 7872 * Spin-up was successful; now try to read the 7873 * capacity. If successful then save the results 7874 * and mark the capacity & lbasize as valid. 7875 */ 7876 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7877 "sd_unit_attach: un:0x%p spin-up successful\n", un); 7878 7879 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity, 7880 &lbasize, SD_PATH_DIRECT); 7881 7882 switch (status) { 7883 case 0: { 7884 if (capacity > DK_MAX_BLOCKS) { 7885 #ifdef _LP64 7886 if ((capacity + 1) > 7887 SD_GROUP1_MAX_ADDRESS) { 7888 /* 7889 * Enable descriptor format 7890 * sense data so that we can 7891 * get 64 bit sense data 7892 * fields. 7893 */ 7894 sd_enable_descr_sense(ssc); 7895 } 7896 #else 7897 /* 32-bit kernels can't handle this */ 7898 scsi_log(SD_DEVINFO(un), 7899 sd_label, CE_WARN, 7900 "disk has %llu blocks, which " 7901 "is too large for a 32-bit " 7902 "kernel", capacity); 7903 7904 #if defined(__i386) || defined(__amd64) 7905 /* 7906 * 1TB disk was treated as (1T - 512)B 7907 * in the past, so that it might have 7908 * valid VTOC and solaris partitions, 7909 * we have to allow it to continue to 7910 * work. 7911 */ 7912 if (capacity -1 > DK_MAX_BLOCKS) 7913 #endif 7914 goto spinup_failed; 7915 #endif 7916 } 7917 7918 /* 7919 * Here it's not necessary to check the case: 7920 * the capacity of the device is bigger than 7921 * what the max hba cdb can support. Because 7922 * sd_send_scsi_READ_CAPACITY will retrieve 7923 * the capacity by sending USCSI command, which 7924 * is constrained by the max hba cdb. Actually, 7925 * sd_send_scsi_READ_CAPACITY will return 7926 * EINVAL when using bigger cdb than required 7927 * cdb length. Will handle this case in 7928 * "case EINVAL". 7929 */ 7930 7931 /* 7932 * The following relies on 7933 * sd_send_scsi_READ_CAPACITY never 7934 * returning 0 for capacity and/or lbasize. 7935 */ 7936 sd_update_block_info(un, lbasize, capacity); 7937 7938 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7939 "sd_unit_attach: un:0x%p capacity = %ld " 7940 "blocks; lbasize= %ld.\n", un, 7941 un->un_blockcount, un->un_tgt_blocksize); 7942 7943 break; 7944 } 7945 case EINVAL: 7946 /* 7947 * In the case where the max-cdb-length property 7948 * is smaller than the required CDB length for 7949 * a SCSI device, a target driver can fail to 7950 * attach to that device. 7951 */ 7952 scsi_log(SD_DEVINFO(un), 7953 sd_label, CE_WARN, 7954 "disk capacity is too large " 7955 "for current cdb length"); 7956 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7957 7958 goto spinup_failed; 7959 case EACCES: 7960 /* 7961 * Should never get here if the spin-up 7962 * succeeded, but code it in anyway. 7963 * From here, just continue with the attach... 7964 */ 7965 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7966 "sd_unit_attach: un:0x%p " 7967 "sd_send_scsi_READ_CAPACITY " 7968 "returned reservation conflict\n", un); 7969 reservation_flag = SD_TARGET_IS_RESERVED; 7970 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7971 break; 7972 default: 7973 /* 7974 * Likewise, should never get here if the 7975 * spin-up succeeded. Just continue with 7976 * the attach... 7977 */ 7978 if (status == EIO) 7979 sd_ssc_assessment(ssc, 7980 SD_FMT_STATUS_CHECK); 7981 else 7982 sd_ssc_assessment(ssc, 7983 SD_FMT_IGNORE); 7984 break; 7985 } 7986 break; 7987 case EACCES: 7988 /* 7989 * Device is reserved by another host. In this case 7990 * we could not spin it up or read the capacity, but 7991 * we continue with the attach anyway. 7992 */ 7993 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7994 "sd_unit_attach: un:0x%p spin-up reservation " 7995 "conflict.\n", un); 7996 reservation_flag = SD_TARGET_IS_RESERVED; 7997 break; 7998 default: 7999 /* Fail the attach if the spin-up failed. */ 8000 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8001 "sd_unit_attach: un:0x%p spin-up failed.", un); 8002 goto spinup_failed; 8003 } 8004 8005 } 8006 8007 /* 8008 * Check to see if this is a MMC drive 8009 */ 8010 if (ISCD(un)) { 8011 sd_set_mmc_caps(ssc); 8012 } 8013 8014 8015 /* 8016 * Add a zero-length attribute to tell the world we support 8017 * kernel ioctls (for layered drivers) 8018 */ 8019 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 8020 DDI_KERNEL_IOCTL, NULL, 0); 8021 8022 /* 8023 * Add a boolean property to tell the world we support 8024 * the B_FAILFAST flag (for layered drivers) 8025 */ 8026 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 8027 "ddi-failfast-supported", NULL, 0); 8028 8029 /* 8030 * Initialize power management 8031 */ 8032 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL); 8033 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL); 8034 sd_setup_pm(ssc, devi); 8035 if (un->un_f_pm_is_enabled == FALSE) { 8036 /* 8037 * For performance, point to a jump table that does 8038 * not include pm. 8039 * The direct and priority chains don't change with PM. 8040 * 8041 * Note: this is currently done based on individual device 8042 * capabilities. When an interface for determining system 8043 * power enabled state becomes available, or when additional 8044 * layers are added to the command chain, these values will 8045 * have to be re-evaluated for correctness. 8046 */ 8047 if (un->un_f_non_devbsize_supported) { 8048 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM; 8049 } else { 8050 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM; 8051 } 8052 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 8053 } 8054 8055 /* 8056 * This property is set to 0 by HA software to avoid retries 8057 * on a reserved disk. (The preferred property name is 8058 * "retry-on-reservation-conflict") (1189689) 8059 * 8060 * Note: The use of a global here can have unintended consequences. A 8061 * per instance variable is preferable to match the capabilities of 8062 * different underlying hba's (4402600) 8063 */ 8064 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, 8065 DDI_PROP_DONTPASS, "retry-on-reservation-conflict", 8066 sd_retry_on_reservation_conflict); 8067 if (sd_retry_on_reservation_conflict != 0) { 8068 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, 8069 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name, 8070 sd_retry_on_reservation_conflict); 8071 } 8072 8073 /* Set up options for QFULL handling. */ 8074 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 8075 "qfull-retries", -1)) != -1) { 8076 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries", 8077 rval, 1); 8078 } 8079 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 8080 "qfull-retry-interval", -1)) != -1) { 8081 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval", 8082 rval, 1); 8083 } 8084 8085 /* 8086 * This just prints a message that announces the existence of the 8087 * device. The message is always printed in the system logfile, but 8088 * only appears on the console if the system is booted with the 8089 * -v (verbose) argument. 8090 */ 8091 ddi_report_dev(devi); 8092 8093 un->un_mediastate = DKIO_NONE; 8094 8095 cmlb_alloc_handle(&un->un_cmlbhandle); 8096 8097 #if defined(__i386) || defined(__amd64) 8098 /* 8099 * On x86, compensate for off-by-1 legacy error 8100 */ 8101 if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable && 8102 (lbasize == un->un_sys_blocksize)) 8103 offbyone = CMLB_OFF_BY_ONE; 8104 #endif 8105 8106 if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype, 8107 VOID2BOOLEAN(un->un_f_has_removable_media != 0), 8108 VOID2BOOLEAN(un->un_f_is_hotpluggable != 0), 8109 un->un_node_type, offbyone, un->un_cmlbhandle, 8110 (void *)SD_PATH_DIRECT) != 0) { 8111 goto cmlb_attach_failed; 8112 } 8113 8114 8115 /* 8116 * Read and validate the device's geometry (ie, disk label) 8117 * A new unformatted drive will not have a valid geometry, but 8118 * the driver needs to successfully attach to this device so 8119 * the drive can be formatted via ioctls. 8120 */ 8121 geom_label_valid = (cmlb_validate(un->un_cmlbhandle, 0, 8122 (void *)SD_PATH_DIRECT) == 0) ? 1: 0; 8123 8124 mutex_enter(SD_MUTEX(un)); 8125 8126 /* 8127 * Read and initialize the devid for the unit. 8128 */ 8129 if (un->un_f_devid_supported) { 8130 sd_register_devid(ssc, devi, reservation_flag); 8131 } 8132 mutex_exit(SD_MUTEX(un)); 8133 8134 #if (defined(__fibre)) 8135 /* 8136 * Register callbacks for fibre only. You can't do this solely 8137 * on the basis of the devid_type because this is hba specific. 8138 * We need to query our hba capabilities to find out whether to 8139 * register or not. 8140 */ 8141 if (un->un_f_is_fibre) { 8142 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 8143 sd_init_event_callbacks(un); 8144 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8145 "sd_unit_attach: un:0x%p event callbacks inserted", 8146 un); 8147 } 8148 } 8149 #endif 8150 8151 if (un->un_f_opt_disable_cache == TRUE) { 8152 /* 8153 * Disable both read cache and write cache. This is 8154 * the historic behavior of the keywords in the config file. 8155 */ 8156 if (sd_cache_control(ssc, SD_CACHE_DISABLE, SD_CACHE_DISABLE) != 8157 0) { 8158 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8159 "sd_unit_attach: un:0x%p Could not disable " 8160 "caching", un); 8161 goto devid_failed; 8162 } 8163 } 8164 8165 /* 8166 * Check the value of the WCE bit now and 8167 * set un_f_write_cache_enabled accordingly. 8168 */ 8169 (void) sd_get_write_cache_enabled(ssc, &wc_enabled); 8170 mutex_enter(SD_MUTEX(un)); 8171 un->un_f_write_cache_enabled = (wc_enabled != 0); 8172 mutex_exit(SD_MUTEX(un)); 8173 8174 if (un->un_f_rmw_type != SD_RMW_TYPE_RETURN_ERROR && 8175 un->un_tgt_blocksize != DEV_BSIZE) { 8176 if (!(un->un_wm_cache)) { 8177 (void) snprintf(name_str, sizeof (name_str), 8178 "%s%d_cache", 8179 ddi_driver_name(SD_DEVINFO(un)), 8180 ddi_get_instance(SD_DEVINFO(un))); 8181 un->un_wm_cache = kmem_cache_create( 8182 name_str, sizeof (struct sd_w_map), 8183 8, sd_wm_cache_constructor, 8184 sd_wm_cache_destructor, NULL, 8185 (void *)un, NULL, 0); 8186 if (!(un->un_wm_cache)) { 8187 goto wm_cache_failed; 8188 } 8189 } 8190 } 8191 8192 /* 8193 * Check the value of the NV_SUP bit and set 8194 * un_f_suppress_cache_flush accordingly. 8195 */ 8196 sd_get_nv_sup(ssc); 8197 8198 /* 8199 * Find out what type of reservation this disk supports. 8200 */ 8201 status = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS, 0, NULL); 8202 8203 switch (status) { 8204 case 0: 8205 /* 8206 * SCSI-3 reservations are supported. 8207 */ 8208 un->un_reservation_type = SD_SCSI3_RESERVATION; 8209 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8210 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un); 8211 break; 8212 case ENOTSUP: 8213 /* 8214 * The PERSISTENT RESERVE IN command would not be recognized by 8215 * a SCSI-2 device, so assume the reservation type is SCSI-2. 8216 */ 8217 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8218 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un); 8219 un->un_reservation_type = SD_SCSI2_RESERVATION; 8220 8221 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 8222 break; 8223 default: 8224 /* 8225 * default to SCSI-3 reservations 8226 */ 8227 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8228 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un); 8229 un->un_reservation_type = SD_SCSI3_RESERVATION; 8230 8231 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 8232 break; 8233 } 8234 8235 /* 8236 * Set the pstat and error stat values here, so data obtained during the 8237 * previous attach-time routines is available. 8238 * 8239 * Note: This is a critical sequence that needs to be maintained: 8240 * 1) Instantiate the kstats before any routines using the iopath 8241 * (i.e. sd_send_scsi_cmd). 8242 * 2) Initialize the error stats (sd_set_errstats) and partition 8243 * stats (sd_set_pstats)here, following 8244 * cmlb_validate_geometry(), sd_register_devid(), and 8245 * sd_cache_control(). 8246 */ 8247 8248 if (un->un_f_pkstats_enabled && geom_label_valid) { 8249 sd_set_pstats(un); 8250 SD_TRACE(SD_LOG_IO_PARTITION, un, 8251 "sd_unit_attach: un:0x%p pstats created and set\n", un); 8252 } 8253 8254 sd_set_errstats(un); 8255 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8256 "sd_unit_attach: un:0x%p errstats set\n", un); 8257 8258 8259 /* 8260 * After successfully attaching an instance, we record the information 8261 * of how many luns have been attached on the relative target and 8262 * controller for parallel SCSI. This information is used when sd tries 8263 * to set the tagged queuing capability in HBA. 8264 */ 8265 if (SD_IS_PARALLEL_SCSI(un) && (tgt >= 0) && (tgt < NTARGETS_WIDE)) { 8266 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_ATTACH); 8267 } 8268 8269 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8270 "sd_unit_attach: un:0x%p exit success\n", un); 8271 8272 /* Uninitialize sd_ssc_t pointer */ 8273 sd_ssc_fini(ssc); 8274 8275 return (DDI_SUCCESS); 8276 8277 /* 8278 * An error occurred during the attach; clean up & return failure. 8279 */ 8280 wm_cache_failed: 8281 devid_failed: 8282 8283 setup_pm_failed: 8284 ddi_remove_minor_node(devi, NULL); 8285 8286 cmlb_attach_failed: 8287 /* 8288 * Cleanup from the scsi_ifsetcap() calls (437868) 8289 */ 8290 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 8291 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 8292 8293 /* 8294 * Refer to the comments of setting tagged-qing in the beginning of 8295 * sd_unit_attach. We can only disable tagged queuing when there is 8296 * no lun attached on the target. 8297 */ 8298 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 8299 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 8300 } 8301 8302 if (un->un_f_is_fibre == FALSE) { 8303 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 8304 } 8305 8306 spinup_failed: 8307 8308 /* Uninitialize sd_ssc_t pointer */ 8309 sd_ssc_fini(ssc); 8310 8311 mutex_enter(SD_MUTEX(un)); 8312 8313 /* Deallocate SCSI FMA memory spaces */ 8314 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal)); 8315 8316 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ 8317 if (un->un_direct_priority_timeid != NULL) { 8318 timeout_id_t temp_id = un->un_direct_priority_timeid; 8319 un->un_direct_priority_timeid = NULL; 8320 mutex_exit(SD_MUTEX(un)); 8321 (void) untimeout(temp_id); 8322 mutex_enter(SD_MUTEX(un)); 8323 } 8324 8325 /* Cancel any pending start/stop timeouts */ 8326 if (un->un_startstop_timeid != NULL) { 8327 timeout_id_t temp_id = un->un_startstop_timeid; 8328 un->un_startstop_timeid = NULL; 8329 mutex_exit(SD_MUTEX(un)); 8330 (void) untimeout(temp_id); 8331 mutex_enter(SD_MUTEX(un)); 8332 } 8333 8334 /* Cancel any pending reset-throttle timeouts */ 8335 if (un->un_reset_throttle_timeid != NULL) { 8336 timeout_id_t temp_id = un->un_reset_throttle_timeid; 8337 un->un_reset_throttle_timeid = NULL; 8338 mutex_exit(SD_MUTEX(un)); 8339 (void) untimeout(temp_id); 8340 mutex_enter(SD_MUTEX(un)); 8341 } 8342 8343 /* Cancel rmw warning message timeouts */ 8344 if (un->un_rmw_msg_timeid != NULL) { 8345 timeout_id_t temp_id = un->un_rmw_msg_timeid; 8346 un->un_rmw_msg_timeid = NULL; 8347 mutex_exit(SD_MUTEX(un)); 8348 (void) untimeout(temp_id); 8349 mutex_enter(SD_MUTEX(un)); 8350 } 8351 8352 /* Cancel any pending retry timeouts */ 8353 if (un->un_retry_timeid != NULL) { 8354 timeout_id_t temp_id = un->un_retry_timeid; 8355 un->un_retry_timeid = NULL; 8356 mutex_exit(SD_MUTEX(un)); 8357 (void) untimeout(temp_id); 8358 mutex_enter(SD_MUTEX(un)); 8359 } 8360 8361 /* Cancel any pending delayed cv broadcast timeouts */ 8362 if (un->un_dcvb_timeid != NULL) { 8363 timeout_id_t temp_id = un->un_dcvb_timeid; 8364 un->un_dcvb_timeid = NULL; 8365 mutex_exit(SD_MUTEX(un)); 8366 (void) untimeout(temp_id); 8367 mutex_enter(SD_MUTEX(un)); 8368 } 8369 8370 mutex_exit(SD_MUTEX(un)); 8371 8372 /* There should not be any in-progress I/O so ASSERT this check */ 8373 ASSERT(un->un_ncmds_in_transport == 0); 8374 ASSERT(un->un_ncmds_in_driver == 0); 8375 8376 /* Do not free the softstate if the callback routine is active */ 8377 sd_sync_with_callback(un); 8378 8379 /* 8380 * Partition stats apparently are not used with removables. These would 8381 * not have been created during attach, so no need to clean them up... 8382 */ 8383 if (un->un_errstats != NULL) { 8384 kstat_delete(un->un_errstats); 8385 un->un_errstats = NULL; 8386 } 8387 8388 create_errstats_failed: 8389 8390 if (un->un_stats != NULL) { 8391 kstat_delete(un->un_stats); 8392 un->un_stats = NULL; 8393 } 8394 8395 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8396 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8397 8398 ddi_prop_remove_all(devi); 8399 sema_destroy(&un->un_semoclose); 8400 cv_destroy(&un->un_state_cv); 8401 8402 getrbuf_failed: 8403 8404 sd_free_rqs(un); 8405 8406 alloc_rqs_failed: 8407 8408 devp->sd_private = NULL; 8409 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ 8410 8411 get_softstate_failed: 8412 /* 8413 * Note: the man pages are unclear as to whether or not doing a 8414 * ddi_soft_state_free(sd_state, instance) is the right way to 8415 * clean up after the ddi_soft_state_zalloc() if the subsequent 8416 * ddi_get_soft_state() fails. The implication seems to be 8417 * that the get_soft_state cannot fail if the zalloc succeeds. 8418 */ 8419 #ifndef XPV_HVM_DRIVER 8420 ddi_soft_state_free(sd_state, instance); 8421 #endif /* !XPV_HVM_DRIVER */ 8422 8423 probe_failed: 8424 scsi_unprobe(devp); 8425 8426 return (DDI_FAILURE); 8427 } 8428 8429 8430 /* 8431 * Function: sd_unit_detach 8432 * 8433 * Description: Performs DDI_DETACH processing for sddetach(). 8434 * 8435 * Return Code: DDI_SUCCESS 8436 * DDI_FAILURE 8437 * 8438 * Context: Kernel thread context 8439 */ 8440 8441 static int 8442 sd_unit_detach(dev_info_t *devi) 8443 { 8444 struct scsi_device *devp; 8445 struct sd_lun *un; 8446 int i; 8447 int tgt; 8448 dev_t dev; 8449 dev_info_t *pdip = ddi_get_parent(devi); 8450 #ifndef XPV_HVM_DRIVER 8451 int instance = ddi_get_instance(devi); 8452 #endif /* !XPV_HVM_DRIVER */ 8453 8454 mutex_enter(&sd_detach_mutex); 8455 8456 /* 8457 * Fail the detach for any of the following: 8458 * - Unable to get the sd_lun struct for the instance 8459 * - A layered driver has an outstanding open on the instance 8460 * - Another thread is already detaching this instance 8461 * - Another thread is currently performing an open 8462 */ 8463 devp = ddi_get_driver_private(devi); 8464 if ((devp == NULL) || 8465 ((un = (struct sd_lun *)devp->sd_private) == NULL) || 8466 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) || 8467 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) { 8468 mutex_exit(&sd_detach_mutex); 8469 return (DDI_FAILURE); 8470 } 8471 8472 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un); 8473 8474 /* 8475 * Mark this instance as currently in a detach, to inhibit any 8476 * opens from a layered driver. 8477 */ 8478 un->un_detach_count++; 8479 mutex_exit(&sd_detach_mutex); 8480 8481 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 8482 SCSI_ADDR_PROP_TARGET, -1); 8483 8484 dev = sd_make_device(SD_DEVINFO(un)); 8485 8486 #ifndef lint 8487 _NOTE(COMPETING_THREADS_NOW); 8488 #endif 8489 8490 mutex_enter(SD_MUTEX(un)); 8491 8492 /* 8493 * Fail the detach if there are any outstanding layered 8494 * opens on this device. 8495 */ 8496 for (i = 0; i < NDKMAP; i++) { 8497 if (un->un_ocmap.lyropen[i] != 0) { 8498 goto err_notclosed; 8499 } 8500 } 8501 8502 /* 8503 * Verify there are NO outstanding commands issued to this device. 8504 * ie, un_ncmds_in_transport == 0. 8505 * It's possible to have outstanding commands through the physio 8506 * code path, even though everything's closed. 8507 */ 8508 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) || 8509 (un->un_direct_priority_timeid != NULL) || 8510 (un->un_state == SD_STATE_RWAIT)) { 8511 mutex_exit(SD_MUTEX(un)); 8512 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8513 "sd_dr_detach: Detach failure due to outstanding cmds\n"); 8514 goto err_stillbusy; 8515 } 8516 8517 /* 8518 * If we have the device reserved, release the reservation. 8519 */ 8520 if ((un->un_resvd_status & SD_RESERVE) && 8521 !(un->un_resvd_status & SD_LOST_RESERVE)) { 8522 mutex_exit(SD_MUTEX(un)); 8523 /* 8524 * Note: sd_reserve_release sends a command to the device 8525 * via the sd_ioctlcmd() path, and can sleep. 8526 */ 8527 if (sd_reserve_release(dev, SD_RELEASE) != 0) { 8528 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8529 "sd_dr_detach: Cannot release reservation \n"); 8530 } 8531 } else { 8532 mutex_exit(SD_MUTEX(un)); 8533 } 8534 8535 /* 8536 * Untimeout any reserve recover, throttle reset, restart unit 8537 * and delayed broadcast timeout threads. Protect the timeout pointer 8538 * from getting nulled by their callback functions. 8539 */ 8540 mutex_enter(SD_MUTEX(un)); 8541 if (un->un_resvd_timeid != NULL) { 8542 timeout_id_t temp_id = un->un_resvd_timeid; 8543 un->un_resvd_timeid = NULL; 8544 mutex_exit(SD_MUTEX(un)); 8545 (void) untimeout(temp_id); 8546 mutex_enter(SD_MUTEX(un)); 8547 } 8548 8549 if (un->un_reset_throttle_timeid != NULL) { 8550 timeout_id_t temp_id = un->un_reset_throttle_timeid; 8551 un->un_reset_throttle_timeid = NULL; 8552 mutex_exit(SD_MUTEX(un)); 8553 (void) untimeout(temp_id); 8554 mutex_enter(SD_MUTEX(un)); 8555 } 8556 8557 if (un->un_startstop_timeid != NULL) { 8558 timeout_id_t temp_id = un->un_startstop_timeid; 8559 un->un_startstop_timeid = NULL; 8560 mutex_exit(SD_MUTEX(un)); 8561 (void) untimeout(temp_id); 8562 mutex_enter(SD_MUTEX(un)); 8563 } 8564 8565 if (un->un_rmw_msg_timeid != NULL) { 8566 timeout_id_t temp_id = un->un_rmw_msg_timeid; 8567 un->un_rmw_msg_timeid = NULL; 8568 mutex_exit(SD_MUTEX(un)); 8569 (void) untimeout(temp_id); 8570 mutex_enter(SD_MUTEX(un)); 8571 } 8572 8573 if (un->un_dcvb_timeid != NULL) { 8574 timeout_id_t temp_id = un->un_dcvb_timeid; 8575 un->un_dcvb_timeid = NULL; 8576 mutex_exit(SD_MUTEX(un)); 8577 (void) untimeout(temp_id); 8578 } else { 8579 mutex_exit(SD_MUTEX(un)); 8580 } 8581 8582 /* Remove any pending reservation reclaim requests for this device */ 8583 sd_rmv_resv_reclaim_req(dev); 8584 8585 mutex_enter(SD_MUTEX(un)); 8586 8587 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */ 8588 if (un->un_direct_priority_timeid != NULL) { 8589 timeout_id_t temp_id = un->un_direct_priority_timeid; 8590 un->un_direct_priority_timeid = NULL; 8591 mutex_exit(SD_MUTEX(un)); 8592 (void) untimeout(temp_id); 8593 mutex_enter(SD_MUTEX(un)); 8594 } 8595 8596 /* Cancel any active multi-host disk watch thread requests */ 8597 if (un->un_mhd_token != NULL) { 8598 mutex_exit(SD_MUTEX(un)); 8599 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token)); 8600 if (scsi_watch_request_terminate(un->un_mhd_token, 8601 SCSI_WATCH_TERMINATE_NOWAIT)) { 8602 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8603 "sd_dr_detach: Cannot cancel mhd watch request\n"); 8604 /* 8605 * Note: We are returning here after having removed 8606 * some driver timeouts above. This is consistent with 8607 * the legacy implementation but perhaps the watch 8608 * terminate call should be made with the wait flag set. 8609 */ 8610 goto err_stillbusy; 8611 } 8612 mutex_enter(SD_MUTEX(un)); 8613 un->un_mhd_token = NULL; 8614 } 8615 8616 if (un->un_swr_token != NULL) { 8617 mutex_exit(SD_MUTEX(un)); 8618 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token)); 8619 if (scsi_watch_request_terminate(un->un_swr_token, 8620 SCSI_WATCH_TERMINATE_NOWAIT)) { 8621 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8622 "sd_dr_detach: Cannot cancel swr watch request\n"); 8623 /* 8624 * Note: We are returning here after having removed 8625 * some driver timeouts above. This is consistent with 8626 * the legacy implementation but perhaps the watch 8627 * terminate call should be made with the wait flag set. 8628 */ 8629 goto err_stillbusy; 8630 } 8631 mutex_enter(SD_MUTEX(un)); 8632 un->un_swr_token = NULL; 8633 } 8634 8635 mutex_exit(SD_MUTEX(un)); 8636 8637 /* 8638 * Clear any scsi_reset_notifies. We clear the reset notifies 8639 * if we have not registered one. 8640 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! 8641 */ 8642 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 8643 sd_mhd_reset_notify_cb, (caddr_t)un); 8644 8645 /* 8646 * protect the timeout pointers from getting nulled by 8647 * their callback functions during the cancellation process. 8648 * In such a scenario untimeout can be invoked with a null value. 8649 */ 8650 _NOTE(NO_COMPETING_THREADS_NOW); 8651 8652 mutex_enter(&un->un_pm_mutex); 8653 if (un->un_pm_idle_timeid != NULL) { 8654 timeout_id_t temp_id = un->un_pm_idle_timeid; 8655 un->un_pm_idle_timeid = NULL; 8656 mutex_exit(&un->un_pm_mutex); 8657 8658 /* 8659 * Timeout is active; cancel it. 8660 * Note that it'll never be active on a device 8661 * that does not support PM therefore we don't 8662 * have to check before calling pm_idle_component. 8663 */ 8664 (void) untimeout(temp_id); 8665 (void) pm_idle_component(SD_DEVINFO(un), 0); 8666 mutex_enter(&un->un_pm_mutex); 8667 } 8668 8669 /* 8670 * Check whether there is already a timeout scheduled for power 8671 * management. If yes then don't lower the power here, that's. 8672 * the timeout handler's job. 8673 */ 8674 if (un->un_pm_timeid != NULL) { 8675 timeout_id_t temp_id = un->un_pm_timeid; 8676 un->un_pm_timeid = NULL; 8677 mutex_exit(&un->un_pm_mutex); 8678 /* 8679 * Timeout is active; cancel it. 8680 * Note that it'll never be active on a device 8681 * that does not support PM therefore we don't 8682 * have to check before calling pm_idle_component. 8683 */ 8684 (void) untimeout(temp_id); 8685 (void) pm_idle_component(SD_DEVINFO(un), 0); 8686 8687 } else { 8688 mutex_exit(&un->un_pm_mutex); 8689 if ((un->un_f_pm_is_enabled == TRUE) && 8690 (pm_lower_power(SD_DEVINFO(un), 0, SD_PM_STATE_STOPPED(un)) 8691 != DDI_SUCCESS)) { 8692 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8693 "sd_dr_detach: Lower power request failed, ignoring.\n"); 8694 /* 8695 * Fix for bug: 4297749, item # 13 8696 * The above test now includes a check to see if PM is 8697 * supported by this device before call 8698 * pm_lower_power(). 8699 * Note, the following is not dead code. The call to 8700 * pm_lower_power above will generate a call back into 8701 * our sdpower routine which might result in a timeout 8702 * handler getting activated. Therefore the following 8703 * code is valid and necessary. 8704 */ 8705 mutex_enter(&un->un_pm_mutex); 8706 if (un->un_pm_timeid != NULL) { 8707 timeout_id_t temp_id = un->un_pm_timeid; 8708 un->un_pm_timeid = NULL; 8709 mutex_exit(&un->un_pm_mutex); 8710 (void) untimeout(temp_id); 8711 (void) pm_idle_component(SD_DEVINFO(un), 0); 8712 } else { 8713 mutex_exit(&un->un_pm_mutex); 8714 } 8715 } 8716 } 8717 8718 /* 8719 * Cleanup from the scsi_ifsetcap() calls (437868) 8720 * Relocated here from above to be after the call to 8721 * pm_lower_power, which was getting errors. 8722 */ 8723 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 8724 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 8725 8726 /* 8727 * Currently, tagged queuing is supported per target based by HBA. 8728 * Setting this per lun instance actually sets the capability of this 8729 * target in HBA, which affects those luns already attached on the 8730 * same target. So during detach, we can only disable this capability 8731 * only when this is the only lun left on this target. By doing 8732 * this, we assume a target has the same tagged queuing capability 8733 * for every lun. The condition can be removed when HBA is changed to 8734 * support per lun based tagged queuing capability. 8735 */ 8736 if (sd_scsi_get_target_lun_count(pdip, tgt) <= 1) { 8737 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 8738 } 8739 8740 if (un->un_f_is_fibre == FALSE) { 8741 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 8742 } 8743 8744 /* 8745 * Remove any event callbacks, fibre only 8746 */ 8747 if (un->un_f_is_fibre == TRUE) { 8748 if ((un->un_insert_event != NULL) && 8749 (ddi_remove_event_handler(un->un_insert_cb_id) != 8750 DDI_SUCCESS)) { 8751 /* 8752 * Note: We are returning here after having done 8753 * substantial cleanup above. This is consistent 8754 * with the legacy implementation but this may not 8755 * be the right thing to do. 8756 */ 8757 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8758 "sd_dr_detach: Cannot cancel insert event\n"); 8759 goto err_remove_event; 8760 } 8761 un->un_insert_event = NULL; 8762 8763 if ((un->un_remove_event != NULL) && 8764 (ddi_remove_event_handler(un->un_remove_cb_id) != 8765 DDI_SUCCESS)) { 8766 /* 8767 * Note: We are returning here after having done 8768 * substantial cleanup above. This is consistent 8769 * with the legacy implementation but this may not 8770 * be the right thing to do. 8771 */ 8772 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8773 "sd_dr_detach: Cannot cancel remove event\n"); 8774 goto err_remove_event; 8775 } 8776 un->un_remove_event = NULL; 8777 } 8778 8779 /* Do not free the softstate if the callback routine is active */ 8780 sd_sync_with_callback(un); 8781 8782 cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 8783 cmlb_free_handle(&un->un_cmlbhandle); 8784 8785 /* 8786 * Hold the detach mutex here, to make sure that no other threads ever 8787 * can access a (partially) freed soft state structure. 8788 */ 8789 mutex_enter(&sd_detach_mutex); 8790 8791 /* 8792 * Clean up the soft state struct. 8793 * Cleanup is done in reverse order of allocs/inits. 8794 * At this point there should be no competing threads anymore. 8795 */ 8796 8797 scsi_fm_fini(devp); 8798 8799 /* 8800 * Deallocate memory for SCSI FMA. 8801 */ 8802 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal)); 8803 8804 /* 8805 * Unregister and free device id if it was not registered 8806 * by the transport. 8807 */ 8808 if (un->un_f_devid_transport_defined == FALSE) 8809 ddi_devid_unregister(devi); 8810 8811 /* 8812 * free the devid structure if allocated before (by ddi_devid_init() 8813 * or ddi_devid_get()). 8814 */ 8815 if (un->un_devid) { 8816 ddi_devid_free(un->un_devid); 8817 un->un_devid = NULL; 8818 } 8819 8820 /* 8821 * Destroy wmap cache if it exists. 8822 */ 8823 if (un->un_wm_cache != NULL) { 8824 kmem_cache_destroy(un->un_wm_cache); 8825 un->un_wm_cache = NULL; 8826 } 8827 8828 /* 8829 * kstat cleanup is done in detach for all device types (4363169). 8830 * We do not want to fail detach if the device kstats are not deleted 8831 * since there is a confusion about the devo_refcnt for the device. 8832 * We just delete the kstats and let detach complete successfully. 8833 */ 8834 if (un->un_stats != NULL) { 8835 kstat_delete(un->un_stats); 8836 un->un_stats = NULL; 8837 } 8838 if (un->un_errstats != NULL) { 8839 kstat_delete(un->un_errstats); 8840 un->un_errstats = NULL; 8841 } 8842 8843 /* Remove partition stats */ 8844 if (un->un_f_pkstats_enabled) { 8845 for (i = 0; i < NSDMAP; i++) { 8846 if (un->un_pstats[i] != NULL) { 8847 kstat_delete(un->un_pstats[i]); 8848 un->un_pstats[i] = NULL; 8849 } 8850 } 8851 } 8852 8853 /* Remove xbuf registration */ 8854 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8855 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8856 8857 /* Remove driver properties */ 8858 ddi_prop_remove_all(devi); 8859 8860 mutex_destroy(&un->un_pm_mutex); 8861 cv_destroy(&un->un_pm_busy_cv); 8862 8863 cv_destroy(&un->un_wcc_cv); 8864 8865 /* Open/close semaphore */ 8866 sema_destroy(&un->un_semoclose); 8867 8868 /* Removable media condvar. */ 8869 cv_destroy(&un->un_state_cv); 8870 8871 /* Suspend/resume condvar. */ 8872 cv_destroy(&un->un_suspend_cv); 8873 cv_destroy(&un->un_disk_busy_cv); 8874 8875 sd_free_rqs(un); 8876 8877 /* Free up soft state */ 8878 devp->sd_private = NULL; 8879 8880 bzero(un, sizeof (struct sd_lun)); 8881 #ifndef XPV_HVM_DRIVER 8882 ddi_soft_state_free(sd_state, instance); 8883 #endif /* !XPV_HVM_DRIVER */ 8884 8885 mutex_exit(&sd_detach_mutex); 8886 8887 /* This frees up the INQUIRY data associated with the device. */ 8888 scsi_unprobe(devp); 8889 8890 /* 8891 * After successfully detaching an instance, we update the information 8892 * of how many luns have been attached in the relative target and 8893 * controller for parallel SCSI. This information is used when sd tries 8894 * to set the tagged queuing capability in HBA. 8895 * Since un has been released, we can't use SD_IS_PARALLEL_SCSI(un) to 8896 * check if the device is parallel SCSI. However, we don't need to 8897 * check here because we've already checked during attach. No device 8898 * that is not parallel SCSI is in the chain. 8899 */ 8900 if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) { 8901 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH); 8902 } 8903 8904 return (DDI_SUCCESS); 8905 8906 err_notclosed: 8907 mutex_exit(SD_MUTEX(un)); 8908 8909 err_stillbusy: 8910 _NOTE(NO_COMPETING_THREADS_NOW); 8911 8912 err_remove_event: 8913 mutex_enter(&sd_detach_mutex); 8914 un->un_detach_count--; 8915 mutex_exit(&sd_detach_mutex); 8916 8917 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n"); 8918 return (DDI_FAILURE); 8919 } 8920 8921 8922 /* 8923 * Function: sd_create_errstats 8924 * 8925 * Description: This routine instantiates the device error stats. 8926 * 8927 * Note: During attach the stats are instantiated first so they are 8928 * available for attach-time routines that utilize the driver 8929 * iopath to send commands to the device. The stats are initialized 8930 * separately so data obtained during some attach-time routines is 8931 * available. (4362483) 8932 * 8933 * Arguments: un - driver soft state (unit) structure 8934 * instance - driver instance 8935 * 8936 * Context: Kernel thread context 8937 */ 8938 8939 static void 8940 sd_create_errstats(struct sd_lun *un, int instance) 8941 { 8942 struct sd_errstats *stp; 8943 char kstatmodule_err[KSTAT_STRLEN]; 8944 char kstatname[KSTAT_STRLEN]; 8945 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t)); 8946 8947 ASSERT(un != NULL); 8948 8949 if (un->un_errstats != NULL) { 8950 return; 8951 } 8952 8953 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err), 8954 "%serr", sd_label); 8955 (void) snprintf(kstatname, sizeof (kstatname), 8956 "%s%d,err", sd_label, instance); 8957 8958 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname, 8959 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT); 8960 8961 if (un->un_errstats == NULL) { 8962 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8963 "sd_create_errstats: Failed kstat_create\n"); 8964 return; 8965 } 8966 8967 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8968 kstat_named_init(&stp->sd_softerrs, "Soft Errors", 8969 KSTAT_DATA_UINT32); 8970 kstat_named_init(&stp->sd_harderrs, "Hard Errors", 8971 KSTAT_DATA_UINT32); 8972 kstat_named_init(&stp->sd_transerrs, "Transport Errors", 8973 KSTAT_DATA_UINT32); 8974 kstat_named_init(&stp->sd_vid, "Vendor", 8975 KSTAT_DATA_CHAR); 8976 kstat_named_init(&stp->sd_pid, "Product", 8977 KSTAT_DATA_CHAR); 8978 kstat_named_init(&stp->sd_revision, "Revision", 8979 KSTAT_DATA_CHAR); 8980 kstat_named_init(&stp->sd_serial, "Serial No", 8981 KSTAT_DATA_CHAR); 8982 kstat_named_init(&stp->sd_capacity, "Size", 8983 KSTAT_DATA_ULONGLONG); 8984 kstat_named_init(&stp->sd_rq_media_err, "Media Error", 8985 KSTAT_DATA_UINT32); 8986 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready", 8987 KSTAT_DATA_UINT32); 8988 kstat_named_init(&stp->sd_rq_nodev_err, "No Device", 8989 KSTAT_DATA_UINT32); 8990 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable", 8991 KSTAT_DATA_UINT32); 8992 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request", 8993 KSTAT_DATA_UINT32); 8994 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis", 8995 KSTAT_DATA_UINT32); 8996 8997 un->un_errstats->ks_private = un; 8998 un->un_errstats->ks_update = nulldev; 8999 9000 kstat_install(un->un_errstats); 9001 } 9002 9003 9004 /* 9005 * Function: sd_set_errstats 9006 * 9007 * Description: This routine sets the value of the vendor id, product id, 9008 * revision, serial number, and capacity device error stats. 9009 * 9010 * Note: During attach the stats are instantiated first so they are 9011 * available for attach-time routines that utilize the driver 9012 * iopath to send commands to the device. The stats are initialized 9013 * separately so data obtained during some attach-time routines is 9014 * available. (4362483) 9015 * 9016 * Arguments: un - driver soft state (unit) structure 9017 * 9018 * Context: Kernel thread context 9019 */ 9020 9021 static void 9022 sd_set_errstats(struct sd_lun *un) 9023 { 9024 struct sd_errstats *stp; 9025 9026 ASSERT(un != NULL); 9027 ASSERT(un->un_errstats != NULL); 9028 stp = (struct sd_errstats *)un->un_errstats->ks_data; 9029 ASSERT(stp != NULL); 9030 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8); 9031 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16); 9032 (void) strncpy(stp->sd_revision.value.c, 9033 un->un_sd->sd_inq->inq_revision, 4); 9034 9035 /* 9036 * All the errstats are persistent across detach/attach, 9037 * so reset all the errstats here in case of the hot 9038 * replacement of disk drives, except for not changed 9039 * Sun qualified drives. 9040 */ 9041 if ((bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) != 0) || 9042 (bcmp(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 9043 sizeof (SD_INQUIRY(un)->inq_serial)) != 0)) { 9044 stp->sd_softerrs.value.ui32 = 0; 9045 stp->sd_harderrs.value.ui32 = 0; 9046 stp->sd_transerrs.value.ui32 = 0; 9047 stp->sd_rq_media_err.value.ui32 = 0; 9048 stp->sd_rq_ntrdy_err.value.ui32 = 0; 9049 stp->sd_rq_nodev_err.value.ui32 = 0; 9050 stp->sd_rq_recov_err.value.ui32 = 0; 9051 stp->sd_rq_illrq_err.value.ui32 = 0; 9052 stp->sd_rq_pfa_err.value.ui32 = 0; 9053 } 9054 9055 /* 9056 * Set the "Serial No" kstat for Sun qualified drives (indicated by 9057 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid) 9058 * (4376302)) 9059 */ 9060 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) { 9061 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 9062 sizeof (SD_INQUIRY(un)->inq_serial)); 9063 } 9064 9065 if (un->un_f_blockcount_is_valid != TRUE) { 9066 /* 9067 * Set capacity error stat to 0 for no media. This ensures 9068 * a valid capacity is displayed in response to 'iostat -E' 9069 * when no media is present in the device. 9070 */ 9071 stp->sd_capacity.value.ui64 = 0; 9072 } else { 9073 /* 9074 * Multiply un_blockcount by un->un_sys_blocksize to get 9075 * capacity. 9076 * 9077 * Note: for non-512 blocksize devices "un_blockcount" has been 9078 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by 9079 * (un_tgt_blocksize / un->un_sys_blocksize). 9080 */ 9081 stp->sd_capacity.value.ui64 = (uint64_t) 9082 ((uint64_t)un->un_blockcount * un->un_sys_blocksize); 9083 } 9084 } 9085 9086 9087 /* 9088 * Function: sd_set_pstats 9089 * 9090 * Description: This routine instantiates and initializes the partition 9091 * stats for each partition with more than zero blocks. 9092 * (4363169) 9093 * 9094 * Arguments: un - driver soft state (unit) structure 9095 * 9096 * Context: Kernel thread context 9097 */ 9098 9099 static void 9100 sd_set_pstats(struct sd_lun *un) 9101 { 9102 char kstatname[KSTAT_STRLEN]; 9103 int instance; 9104 int i; 9105 diskaddr_t nblks = 0; 9106 char *partname = NULL; 9107 9108 ASSERT(un != NULL); 9109 9110 instance = ddi_get_instance(SD_DEVINFO(un)); 9111 9112 /* Note:x86: is this a VTOC8/VTOC16 difference? */ 9113 for (i = 0; i < NSDMAP; i++) { 9114 9115 if (cmlb_partinfo(un->un_cmlbhandle, i, 9116 &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0) 9117 continue; 9118 mutex_enter(SD_MUTEX(un)); 9119 9120 if ((un->un_pstats[i] == NULL) && 9121 (nblks != 0)) { 9122 9123 (void) snprintf(kstatname, sizeof (kstatname), 9124 "%s%d,%s", sd_label, instance, 9125 partname); 9126 9127 un->un_pstats[i] = kstat_create(sd_label, 9128 instance, kstatname, "partition", KSTAT_TYPE_IO, 9129 1, KSTAT_FLAG_PERSISTENT); 9130 if (un->un_pstats[i] != NULL) { 9131 un->un_pstats[i]->ks_lock = SD_MUTEX(un); 9132 kstat_install(un->un_pstats[i]); 9133 } 9134 } 9135 mutex_exit(SD_MUTEX(un)); 9136 } 9137 } 9138 9139 9140 #if (defined(__fibre)) 9141 /* 9142 * Function: sd_init_event_callbacks 9143 * 9144 * Description: This routine initializes the insertion and removal event 9145 * callbacks. (fibre only) 9146 * 9147 * Arguments: un - driver soft state (unit) structure 9148 * 9149 * Context: Kernel thread context 9150 */ 9151 9152 static void 9153 sd_init_event_callbacks(struct sd_lun *un) 9154 { 9155 ASSERT(un != NULL); 9156 9157 if ((un->un_insert_event == NULL) && 9158 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT, 9159 &un->un_insert_event) == DDI_SUCCESS)) { 9160 /* 9161 * Add the callback for an insertion event 9162 */ 9163 (void) ddi_add_event_handler(SD_DEVINFO(un), 9164 un->un_insert_event, sd_event_callback, (void *)un, 9165 &(un->un_insert_cb_id)); 9166 } 9167 9168 if ((un->un_remove_event == NULL) && 9169 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT, 9170 &un->un_remove_event) == DDI_SUCCESS)) { 9171 /* 9172 * Add the callback for a removal event 9173 */ 9174 (void) ddi_add_event_handler(SD_DEVINFO(un), 9175 un->un_remove_event, sd_event_callback, (void *)un, 9176 &(un->un_remove_cb_id)); 9177 } 9178 } 9179 9180 9181 /* 9182 * Function: sd_event_callback 9183 * 9184 * Description: This routine handles insert/remove events (photon). The 9185 * state is changed to OFFLINE which can be used to supress 9186 * error msgs. (fibre only) 9187 * 9188 * Arguments: un - driver soft state (unit) structure 9189 * 9190 * Context: Callout thread context 9191 */ 9192 /* ARGSUSED */ 9193 static void 9194 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg, 9195 void *bus_impldata) 9196 { 9197 struct sd_lun *un = (struct sd_lun *)arg; 9198 9199 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event)); 9200 if (event == un->un_insert_event) { 9201 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event"); 9202 mutex_enter(SD_MUTEX(un)); 9203 if (un->un_state == SD_STATE_OFFLINE) { 9204 if (un->un_last_state != SD_STATE_SUSPENDED) { 9205 un->un_state = un->un_last_state; 9206 } else { 9207 /* 9208 * We have gone through SUSPEND/RESUME while 9209 * we were offline. Restore the last state 9210 */ 9211 un->un_state = un->un_save_state; 9212 } 9213 } 9214 mutex_exit(SD_MUTEX(un)); 9215 9216 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event)); 9217 } else if (event == un->un_remove_event) { 9218 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event"); 9219 mutex_enter(SD_MUTEX(un)); 9220 /* 9221 * We need to handle an event callback that occurs during 9222 * the suspend operation, since we don't prevent it. 9223 */ 9224 if (un->un_state != SD_STATE_OFFLINE) { 9225 if (un->un_state != SD_STATE_SUSPENDED) { 9226 New_state(un, SD_STATE_OFFLINE); 9227 } else { 9228 un->un_last_state = SD_STATE_OFFLINE; 9229 } 9230 } 9231 mutex_exit(SD_MUTEX(un)); 9232 } else { 9233 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 9234 "!Unknown event\n"); 9235 } 9236 9237 } 9238 #endif 9239 9240 /* 9241 * Function: sd_cache_control() 9242 * 9243 * Description: This routine is the driver entry point for setting 9244 * read and write caching by modifying the WCE (write cache 9245 * enable) and RCD (read cache disable) bits of mode 9246 * page 8 (MODEPAGE_CACHING). 9247 * 9248 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 9249 * structure for this target. 9250 * rcd_flag - flag for controlling the read cache 9251 * wce_flag - flag for controlling the write cache 9252 * 9253 * Return Code: EIO 9254 * code returned by sd_send_scsi_MODE_SENSE and 9255 * sd_send_scsi_MODE_SELECT 9256 * 9257 * Context: Kernel Thread 9258 */ 9259 9260 static int 9261 sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag) 9262 { 9263 struct mode_caching *mode_caching_page; 9264 uchar_t *header; 9265 size_t buflen; 9266 int hdrlen; 9267 int bd_len; 9268 int rval = 0; 9269 struct mode_header_grp2 *mhp; 9270 struct sd_lun *un; 9271 int status; 9272 9273 ASSERT(ssc != NULL); 9274 un = ssc->ssc_un; 9275 ASSERT(un != NULL); 9276 9277 /* 9278 * Do a test unit ready, otherwise a mode sense may not work if this 9279 * is the first command sent to the device after boot. 9280 */ 9281 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 9282 if (status != 0) 9283 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9284 9285 if (un->un_f_cfg_is_atapi == TRUE) { 9286 hdrlen = MODE_HEADER_LENGTH_GRP2; 9287 } else { 9288 hdrlen = MODE_HEADER_LENGTH; 9289 } 9290 9291 /* 9292 * Allocate memory for the retrieved mode page and its headers. Set 9293 * a pointer to the page itself. Use mode_cache_scsi3 to insure 9294 * we get all of the mode sense data otherwise, the mode select 9295 * will fail. mode_cache_scsi3 is a superset of mode_caching. 9296 */ 9297 buflen = hdrlen + MODE_BLK_DESC_LENGTH + 9298 sizeof (struct mode_cache_scsi3); 9299 9300 header = kmem_zalloc(buflen, KM_SLEEP); 9301 9302 /* Get the information from the device. */ 9303 if (un->un_f_cfg_is_atapi == TRUE) { 9304 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, header, buflen, 9305 MODEPAGE_CACHING, SD_PATH_DIRECT); 9306 } else { 9307 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 9308 MODEPAGE_CACHING, SD_PATH_DIRECT); 9309 } 9310 9311 if (rval != 0) { 9312 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 9313 "sd_cache_control: Mode Sense Failed\n"); 9314 goto mode_sense_failed; 9315 } 9316 9317 /* 9318 * Determine size of Block Descriptors in order to locate 9319 * the mode page data. ATAPI devices return 0, SCSI devices 9320 * should return MODE_BLK_DESC_LENGTH. 9321 */ 9322 if (un->un_f_cfg_is_atapi == TRUE) { 9323 mhp = (struct mode_header_grp2 *)header; 9324 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 9325 } else { 9326 bd_len = ((struct mode_header *)header)->bdesc_length; 9327 } 9328 9329 if (bd_len > MODE_BLK_DESC_LENGTH) { 9330 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 0, 9331 "sd_cache_control: Mode Sense returned invalid block " 9332 "descriptor length\n"); 9333 rval = EIO; 9334 goto mode_sense_failed; 9335 } 9336 9337 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 9338 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 9339 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 9340 "sd_cache_control: Mode Sense caching page code mismatch " 9341 "%d\n", mode_caching_page->mode_page.code); 9342 rval = EIO; 9343 goto mode_sense_failed; 9344 } 9345 9346 /* Check the relevant bits on successful mode sense. */ 9347 if ((mode_caching_page->rcd && rcd_flag == SD_CACHE_ENABLE) || 9348 (!mode_caching_page->rcd && rcd_flag == SD_CACHE_DISABLE) || 9349 (mode_caching_page->wce && wce_flag == SD_CACHE_DISABLE) || 9350 (!mode_caching_page->wce && wce_flag == SD_CACHE_ENABLE)) { 9351 9352 size_t sbuflen; 9353 uchar_t save_pg; 9354 9355 /* 9356 * Construct select buffer length based on the 9357 * length of the sense data returned. 9358 */ 9359 sbuflen = hdrlen + bd_len + 9360 sizeof (struct mode_page) + 9361 (int)mode_caching_page->mode_page.length; 9362 9363 /* 9364 * Set the caching bits as requested. 9365 */ 9366 if (rcd_flag == SD_CACHE_ENABLE) 9367 mode_caching_page->rcd = 0; 9368 else if (rcd_flag == SD_CACHE_DISABLE) 9369 mode_caching_page->rcd = 1; 9370 9371 if (wce_flag == SD_CACHE_ENABLE) 9372 mode_caching_page->wce = 1; 9373 else if (wce_flag == SD_CACHE_DISABLE) 9374 mode_caching_page->wce = 0; 9375 9376 /* 9377 * Save the page if the mode sense says the 9378 * drive supports it. 9379 */ 9380 save_pg = mode_caching_page->mode_page.ps ? 9381 SD_SAVE_PAGE : SD_DONTSAVE_PAGE; 9382 9383 /* Clear reserved bits before mode select. */ 9384 mode_caching_page->mode_page.ps = 0; 9385 9386 /* 9387 * Clear out mode header for mode select. 9388 * The rest of the retrieved page will be reused. 9389 */ 9390 bzero(header, hdrlen); 9391 9392 if (un->un_f_cfg_is_atapi == TRUE) { 9393 mhp = (struct mode_header_grp2 *)header; 9394 mhp->bdesc_length_hi = bd_len >> 8; 9395 mhp->bdesc_length_lo = (uchar_t)bd_len & 0xff; 9396 } else { 9397 ((struct mode_header *)header)->bdesc_length = bd_len; 9398 } 9399 9400 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9401 9402 /* Issue mode select to change the cache settings */ 9403 if (un->un_f_cfg_is_atapi == TRUE) { 9404 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP1, header, 9405 sbuflen, save_pg, SD_PATH_DIRECT); 9406 } else { 9407 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, header, 9408 sbuflen, save_pg, SD_PATH_DIRECT); 9409 } 9410 9411 } 9412 9413 9414 mode_sense_failed: 9415 9416 kmem_free(header, buflen); 9417 9418 if (rval != 0) { 9419 if (rval == EIO) 9420 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 9421 else 9422 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9423 } 9424 return (rval); 9425 } 9426 9427 9428 /* 9429 * Function: sd_get_write_cache_enabled() 9430 * 9431 * Description: This routine is the driver entry point for determining if 9432 * write caching is enabled. It examines the WCE (write cache 9433 * enable) bits of mode page 8 (MODEPAGE_CACHING). 9434 * 9435 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 9436 * structure for this target. 9437 * is_enabled - pointer to int where write cache enabled state 9438 * is returned (non-zero -> write cache enabled) 9439 * 9440 * 9441 * Return Code: EIO 9442 * code returned by sd_send_scsi_MODE_SENSE 9443 * 9444 * Context: Kernel Thread 9445 * 9446 * NOTE: If ioctl is added to disable write cache, this sequence should 9447 * be followed so that no locking is required for accesses to 9448 * un->un_f_write_cache_enabled: 9449 * do mode select to clear wce 9450 * do synchronize cache to flush cache 9451 * set un->un_f_write_cache_enabled = FALSE 9452 * 9453 * Conversely, an ioctl to enable the write cache should be done 9454 * in this order: 9455 * set un->un_f_write_cache_enabled = TRUE 9456 * do mode select to set wce 9457 */ 9458 9459 static int 9460 sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled) 9461 { 9462 struct mode_caching *mode_caching_page; 9463 uchar_t *header; 9464 size_t buflen; 9465 int hdrlen; 9466 int bd_len; 9467 int rval = 0; 9468 struct sd_lun *un; 9469 int status; 9470 9471 ASSERT(ssc != NULL); 9472 un = ssc->ssc_un; 9473 ASSERT(un != NULL); 9474 ASSERT(is_enabled != NULL); 9475 9476 /* in case of error, flag as enabled */ 9477 *is_enabled = TRUE; 9478 9479 /* 9480 * Do a test unit ready, otherwise a mode sense may not work if this 9481 * is the first command sent to the device after boot. 9482 */ 9483 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 9484 9485 if (status != 0) 9486 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9487 9488 if (un->un_f_cfg_is_atapi == TRUE) { 9489 hdrlen = MODE_HEADER_LENGTH_GRP2; 9490 } else { 9491 hdrlen = MODE_HEADER_LENGTH; 9492 } 9493 9494 /* 9495 * Allocate memory for the retrieved mode page and its headers. Set 9496 * a pointer to the page itself. 9497 */ 9498 buflen = hdrlen + MODE_BLK_DESC_LENGTH + sizeof (struct mode_caching); 9499 header = kmem_zalloc(buflen, KM_SLEEP); 9500 9501 /* Get the information from the device. */ 9502 if (un->un_f_cfg_is_atapi == TRUE) { 9503 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, header, buflen, 9504 MODEPAGE_CACHING, SD_PATH_DIRECT); 9505 } else { 9506 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 9507 MODEPAGE_CACHING, SD_PATH_DIRECT); 9508 } 9509 9510 if (rval != 0) { 9511 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 9512 "sd_get_write_cache_enabled: Mode Sense Failed\n"); 9513 goto mode_sense_failed; 9514 } 9515 9516 /* 9517 * Determine size of Block Descriptors in order to locate 9518 * the mode page data. ATAPI devices return 0, SCSI devices 9519 * should return MODE_BLK_DESC_LENGTH. 9520 */ 9521 if (un->un_f_cfg_is_atapi == TRUE) { 9522 struct mode_header_grp2 *mhp; 9523 mhp = (struct mode_header_grp2 *)header; 9524 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 9525 } else { 9526 bd_len = ((struct mode_header *)header)->bdesc_length; 9527 } 9528 9529 if (bd_len > MODE_BLK_DESC_LENGTH) { 9530 /* FMA should make upset complain here */ 9531 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 0, 9532 "sd_get_write_cache_enabled: Mode Sense returned invalid " 9533 "block descriptor length\n"); 9534 rval = EIO; 9535 goto mode_sense_failed; 9536 } 9537 9538 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 9539 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 9540 /* FMA could make upset complain here */ 9541 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 9542 "sd_get_write_cache_enabled: Mode Sense caching page " 9543 "code mismatch %d\n", mode_caching_page->mode_page.code); 9544 rval = EIO; 9545 goto mode_sense_failed; 9546 } 9547 *is_enabled = mode_caching_page->wce; 9548 9549 mode_sense_failed: 9550 if (rval == 0) { 9551 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 9552 } else if (rval == EIO) { 9553 /* 9554 * Some disks do not support mode sense(6), we 9555 * should ignore this kind of error(sense key is 9556 * 0x5 - illegal request). 9557 */ 9558 uint8_t *sensep; 9559 int senlen; 9560 9561 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 9562 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 9563 ssc->ssc_uscsi_cmd->uscsi_rqresid); 9564 9565 if (senlen > 0 && 9566 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 9567 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 9568 } else { 9569 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 9570 } 9571 } else { 9572 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9573 } 9574 kmem_free(header, buflen); 9575 return (rval); 9576 } 9577 9578 /* 9579 * Function: sd_get_nv_sup() 9580 * 9581 * Description: This routine is the driver entry point for 9582 * determining whether non-volatile cache is supported. This 9583 * determination process works as follows: 9584 * 9585 * 1. sd first queries sd.conf on whether 9586 * suppress_cache_flush bit is set for this device. 9587 * 9588 * 2. if not there, then queries the internal disk table. 9589 * 9590 * 3. if either sd.conf or internal disk table specifies 9591 * cache flush be suppressed, we don't bother checking 9592 * NV_SUP bit. 9593 * 9594 * If SUPPRESS_CACHE_FLUSH bit is not set to 1, sd queries 9595 * the optional INQUIRY VPD page 0x86. If the device 9596 * supports VPD page 0x86, sd examines the NV_SUP 9597 * (non-volatile cache support) bit in the INQUIRY VPD page 9598 * 0x86: 9599 * o If NV_SUP bit is set, sd assumes the device has a 9600 * non-volatile cache and set the 9601 * un_f_sync_nv_supported to TRUE. 9602 * o Otherwise cache is not non-volatile, 9603 * un_f_sync_nv_supported is set to FALSE. 9604 * 9605 * Arguments: un - driver soft state (unit) structure 9606 * 9607 * Return Code: 9608 * 9609 * Context: Kernel Thread 9610 */ 9611 9612 static void 9613 sd_get_nv_sup(sd_ssc_t *ssc) 9614 { 9615 int rval = 0; 9616 uchar_t *inq86 = NULL; 9617 size_t inq86_len = MAX_INQUIRY_SIZE; 9618 size_t inq86_resid = 0; 9619 struct dk_callback *dkc; 9620 struct sd_lun *un; 9621 9622 ASSERT(ssc != NULL); 9623 un = ssc->ssc_un; 9624 ASSERT(un != NULL); 9625 9626 mutex_enter(SD_MUTEX(un)); 9627 9628 /* 9629 * Be conservative on the device's support of 9630 * SYNC_NV bit: un_f_sync_nv_supported is 9631 * initialized to be false. 9632 */ 9633 un->un_f_sync_nv_supported = FALSE; 9634 9635 /* 9636 * If either sd.conf or internal disk table 9637 * specifies cache flush be suppressed, then 9638 * we don't bother checking NV_SUP bit. 9639 */ 9640 if (un->un_f_suppress_cache_flush == TRUE) { 9641 mutex_exit(SD_MUTEX(un)); 9642 return; 9643 } 9644 9645 if (sd_check_vpd_page_support(ssc) == 0 && 9646 un->un_vpd_page_mask & SD_VPD_EXTENDED_DATA_PG) { 9647 mutex_exit(SD_MUTEX(un)); 9648 /* collect page 86 data if available */ 9649 inq86 = kmem_zalloc(inq86_len, KM_SLEEP); 9650 9651 rval = sd_send_scsi_INQUIRY(ssc, inq86, inq86_len, 9652 0x01, 0x86, &inq86_resid); 9653 9654 if (rval == 0 && (inq86_len - inq86_resid > 6)) { 9655 SD_TRACE(SD_LOG_COMMON, un, 9656 "sd_get_nv_sup: \ 9657 successfully get VPD page: %x \ 9658 PAGE LENGTH: %x BYTE 6: %x\n", 9659 inq86[1], inq86[3], inq86[6]); 9660 9661 mutex_enter(SD_MUTEX(un)); 9662 /* 9663 * check the value of NV_SUP bit: only if the device 9664 * reports NV_SUP bit to be 1, the 9665 * un_f_sync_nv_supported bit will be set to true. 9666 */ 9667 if (inq86[6] & SD_VPD_NV_SUP) { 9668 un->un_f_sync_nv_supported = TRUE; 9669 } 9670 mutex_exit(SD_MUTEX(un)); 9671 } else if (rval != 0) { 9672 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9673 } 9674 9675 kmem_free(inq86, inq86_len); 9676 } else { 9677 mutex_exit(SD_MUTEX(un)); 9678 } 9679 9680 /* 9681 * Send a SYNC CACHE command to check whether 9682 * SYNC_NV bit is supported. This command should have 9683 * un_f_sync_nv_supported set to correct value. 9684 */ 9685 mutex_enter(SD_MUTEX(un)); 9686 if (un->un_f_sync_nv_supported) { 9687 mutex_exit(SD_MUTEX(un)); 9688 dkc = kmem_zalloc(sizeof (struct dk_callback), KM_SLEEP); 9689 dkc->dkc_flag = FLUSH_VOLATILE; 9690 (void) sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 9691 9692 /* 9693 * Send a TEST UNIT READY command to the device. This should 9694 * clear any outstanding UNIT ATTENTION that may be present. 9695 */ 9696 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR); 9697 if (rval != 0) 9698 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9699 9700 kmem_free(dkc, sizeof (struct dk_callback)); 9701 } else { 9702 mutex_exit(SD_MUTEX(un)); 9703 } 9704 9705 SD_TRACE(SD_LOG_COMMON, un, "sd_get_nv_sup: \ 9706 un_f_suppress_cache_flush is set to %d\n", 9707 un->un_f_suppress_cache_flush); 9708 } 9709 9710 /* 9711 * Function: sd_make_device 9712 * 9713 * Description: Utility routine to return the Solaris device number from 9714 * the data in the device's dev_info structure. 9715 * 9716 * Return Code: The Solaris device number 9717 * 9718 * Context: Any 9719 */ 9720 9721 static dev_t 9722 sd_make_device(dev_info_t *devi) 9723 { 9724 return (makedevice(ddi_driver_major(devi), 9725 ddi_get_instance(devi) << SDUNIT_SHIFT)); 9726 } 9727 9728 9729 /* 9730 * Function: sd_pm_entry 9731 * 9732 * Description: Called at the start of a new command to manage power 9733 * and busy status of a device. This includes determining whether 9734 * the current power state of the device is sufficient for 9735 * performing the command or whether it must be changed. 9736 * The PM framework is notified appropriately. 9737 * Only with a return status of DDI_SUCCESS will the 9738 * component be busy to the framework. 9739 * 9740 * All callers of sd_pm_entry must check the return status 9741 * and only call sd_pm_exit it it was DDI_SUCCESS. A status 9742 * of DDI_FAILURE indicates the device failed to power up. 9743 * In this case un_pm_count has been adjusted so the result 9744 * on exit is still powered down, ie. count is less than 0. 9745 * Calling sd_pm_exit with this count value hits an ASSERT. 9746 * 9747 * Return Code: DDI_SUCCESS or DDI_FAILURE 9748 * 9749 * Context: Kernel thread context. 9750 */ 9751 9752 static int 9753 sd_pm_entry(struct sd_lun *un) 9754 { 9755 int return_status = DDI_SUCCESS; 9756 9757 ASSERT(!mutex_owned(SD_MUTEX(un))); 9758 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9759 9760 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n"); 9761 9762 if (un->un_f_pm_is_enabled == FALSE) { 9763 SD_TRACE(SD_LOG_IO_PM, un, 9764 "sd_pm_entry: exiting, PM not enabled\n"); 9765 return (return_status); 9766 } 9767 9768 /* 9769 * Just increment a counter if PM is enabled. On the transition from 9770 * 0 ==> 1, mark the device as busy. The iodone side will decrement 9771 * the count with each IO and mark the device as idle when the count 9772 * hits 0. 9773 * 9774 * If the count is less than 0 the device is powered down. If a powered 9775 * down device is successfully powered up then the count must be 9776 * incremented to reflect the power up. Note that it'll get incremented 9777 * a second time to become busy. 9778 * 9779 * Because the following has the potential to change the device state 9780 * and must release the un_pm_mutex to do so, only one thread can be 9781 * allowed through at a time. 9782 */ 9783 9784 mutex_enter(&un->un_pm_mutex); 9785 while (un->un_pm_busy == TRUE) { 9786 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex); 9787 } 9788 un->un_pm_busy = TRUE; 9789 9790 if (un->un_pm_count < 1) { 9791 9792 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n"); 9793 9794 /* 9795 * Indicate we are now busy so the framework won't attempt to 9796 * power down the device. This call will only fail if either 9797 * we passed a bad component number or the device has no 9798 * components. Neither of these should ever happen. 9799 */ 9800 mutex_exit(&un->un_pm_mutex); 9801 return_status = pm_busy_component(SD_DEVINFO(un), 0); 9802 ASSERT(return_status == DDI_SUCCESS); 9803 9804 mutex_enter(&un->un_pm_mutex); 9805 9806 if (un->un_pm_count < 0) { 9807 mutex_exit(&un->un_pm_mutex); 9808 9809 SD_TRACE(SD_LOG_IO_PM, un, 9810 "sd_pm_entry: power up component\n"); 9811 9812 /* 9813 * pm_raise_power will cause sdpower to be called 9814 * which brings the device power level to the 9815 * desired state, If successful, un_pm_count and 9816 * un_power_level will be updated appropriately. 9817 */ 9818 return_status = pm_raise_power(SD_DEVINFO(un), 0, 9819 SD_PM_STATE_ACTIVE(un)); 9820 9821 mutex_enter(&un->un_pm_mutex); 9822 9823 if (return_status != DDI_SUCCESS) { 9824 /* 9825 * Power up failed. 9826 * Idle the device and adjust the count 9827 * so the result on exit is that we're 9828 * still powered down, ie. count is less than 0. 9829 */ 9830 SD_TRACE(SD_LOG_IO_PM, un, 9831 "sd_pm_entry: power up failed," 9832 " idle the component\n"); 9833 9834 (void) pm_idle_component(SD_DEVINFO(un), 0); 9835 un->un_pm_count--; 9836 } else { 9837 /* 9838 * Device is powered up, verify the 9839 * count is non-negative. 9840 * This is debug only. 9841 */ 9842 ASSERT(un->un_pm_count == 0); 9843 } 9844 } 9845 9846 if (return_status == DDI_SUCCESS) { 9847 /* 9848 * For performance, now that the device has been tagged 9849 * as busy, and it's known to be powered up, update the 9850 * chain types to use jump tables that do not include 9851 * pm. This significantly lowers the overhead and 9852 * therefore improves performance. 9853 */ 9854 9855 mutex_exit(&un->un_pm_mutex); 9856 mutex_enter(SD_MUTEX(un)); 9857 SD_TRACE(SD_LOG_IO_PM, un, 9858 "sd_pm_entry: changing uscsi_chain_type from %d\n", 9859 un->un_uscsi_chain_type); 9860 9861 if (un->un_f_non_devbsize_supported) { 9862 un->un_buf_chain_type = 9863 SD_CHAIN_INFO_RMMEDIA_NO_PM; 9864 } else { 9865 un->un_buf_chain_type = 9866 SD_CHAIN_INFO_DISK_NO_PM; 9867 } 9868 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 9869 9870 SD_TRACE(SD_LOG_IO_PM, un, 9871 " changed uscsi_chain_type to %d\n", 9872 un->un_uscsi_chain_type); 9873 mutex_exit(SD_MUTEX(un)); 9874 mutex_enter(&un->un_pm_mutex); 9875 9876 if (un->un_pm_idle_timeid == NULL) { 9877 /* 300 ms. */ 9878 un->un_pm_idle_timeid = 9879 timeout(sd_pm_idletimeout_handler, un, 9880 (drv_usectohz((clock_t)300000))); 9881 /* 9882 * Include an extra call to busy which keeps the 9883 * device busy with-respect-to the PM layer 9884 * until the timer fires, at which time it'll 9885 * get the extra idle call. 9886 */ 9887 (void) pm_busy_component(SD_DEVINFO(un), 0); 9888 } 9889 } 9890 } 9891 un->un_pm_busy = FALSE; 9892 /* Next... */ 9893 cv_signal(&un->un_pm_busy_cv); 9894 9895 un->un_pm_count++; 9896 9897 SD_TRACE(SD_LOG_IO_PM, un, 9898 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count); 9899 9900 mutex_exit(&un->un_pm_mutex); 9901 9902 return (return_status); 9903 } 9904 9905 9906 /* 9907 * Function: sd_pm_exit 9908 * 9909 * Description: Called at the completion of a command to manage busy 9910 * status for the device. If the device becomes idle the 9911 * PM framework is notified. 9912 * 9913 * Context: Kernel thread context 9914 */ 9915 9916 static void 9917 sd_pm_exit(struct sd_lun *un) 9918 { 9919 ASSERT(!mutex_owned(SD_MUTEX(un))); 9920 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9921 9922 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n"); 9923 9924 /* 9925 * After attach the following flag is only read, so don't 9926 * take the penalty of acquiring a mutex for it. 9927 */ 9928 if (un->un_f_pm_is_enabled == TRUE) { 9929 9930 mutex_enter(&un->un_pm_mutex); 9931 un->un_pm_count--; 9932 9933 SD_TRACE(SD_LOG_IO_PM, un, 9934 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count); 9935 9936 ASSERT(un->un_pm_count >= 0); 9937 if (un->un_pm_count == 0) { 9938 mutex_exit(&un->un_pm_mutex); 9939 9940 SD_TRACE(SD_LOG_IO_PM, un, 9941 "sd_pm_exit: idle component\n"); 9942 9943 (void) pm_idle_component(SD_DEVINFO(un), 0); 9944 9945 } else { 9946 mutex_exit(&un->un_pm_mutex); 9947 } 9948 } 9949 9950 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n"); 9951 } 9952 9953 9954 /* 9955 * Function: sdopen 9956 * 9957 * Description: Driver's open(9e) entry point function. 9958 * 9959 * Arguments: dev_i - pointer to device number 9960 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE) 9961 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9962 * cred_p - user credential pointer 9963 * 9964 * Return Code: EINVAL 9965 * ENXIO 9966 * EIO 9967 * EROFS 9968 * EBUSY 9969 * 9970 * Context: Kernel thread context 9971 */ 9972 /* ARGSUSED */ 9973 static int 9974 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 9975 { 9976 struct sd_lun *un; 9977 int nodelay; 9978 int part; 9979 uint64_t partmask; 9980 int instance; 9981 dev_t dev; 9982 int rval = EIO; 9983 diskaddr_t nblks = 0; 9984 diskaddr_t label_cap; 9985 9986 /* Validate the open type */ 9987 if (otyp >= OTYPCNT) { 9988 return (EINVAL); 9989 } 9990 9991 dev = *dev_p; 9992 instance = SDUNIT(dev); 9993 mutex_enter(&sd_detach_mutex); 9994 9995 /* 9996 * Fail the open if there is no softstate for the instance, or 9997 * if another thread somewhere is trying to detach the instance. 9998 */ 9999 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 10000 (un->un_detach_count != 0)) { 10001 mutex_exit(&sd_detach_mutex); 10002 /* 10003 * The probe cache only needs to be cleared when open (9e) fails 10004 * with ENXIO (4238046). 10005 */ 10006 /* 10007 * un-conditionally clearing probe cache is ok with 10008 * separate sd/ssd binaries 10009 * x86 platform can be an issue with both parallel 10010 * and fibre in 1 binary 10011 */ 10012 sd_scsi_clear_probe_cache(); 10013 return (ENXIO); 10014 } 10015 10016 /* 10017 * The un_layer_count is to prevent another thread in specfs from 10018 * trying to detach the instance, which can happen when we are 10019 * called from a higher-layer driver instead of thru specfs. 10020 * This will not be needed when DDI provides a layered driver 10021 * interface that allows specfs to know that an instance is in 10022 * use by a layered driver & should not be detached. 10023 * 10024 * Note: the semantics for layered driver opens are exactly one 10025 * close for every open. 10026 */ 10027 if (otyp == OTYP_LYR) { 10028 un->un_layer_count++; 10029 } 10030 10031 /* 10032 * Keep a count of the current # of opens in progress. This is because 10033 * some layered drivers try to call us as a regular open. This can 10034 * cause problems that we cannot prevent, however by keeping this count 10035 * we can at least keep our open and detach routines from racing against 10036 * each other under such conditions. 10037 */ 10038 un->un_opens_in_progress++; 10039 mutex_exit(&sd_detach_mutex); 10040 10041 nodelay = (flag & (FNDELAY | FNONBLOCK)); 10042 part = SDPART(dev); 10043 partmask = 1 << part; 10044 10045 /* 10046 * We use a semaphore here in order to serialize 10047 * open and close requests on the device. 10048 */ 10049 sema_p(&un->un_semoclose); 10050 10051 mutex_enter(SD_MUTEX(un)); 10052 10053 /* 10054 * All device accesses go thru sdstrategy() where we check 10055 * on suspend status but there could be a scsi_poll command, 10056 * which bypasses sdstrategy(), so we need to check pm 10057 * status. 10058 */ 10059 10060 if (!nodelay) { 10061 while ((un->un_state == SD_STATE_SUSPENDED) || 10062 (un->un_state == SD_STATE_PM_CHANGING)) { 10063 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10064 } 10065 10066 mutex_exit(SD_MUTEX(un)); 10067 if (sd_pm_entry(un) != DDI_SUCCESS) { 10068 rval = EIO; 10069 SD_ERROR(SD_LOG_OPEN_CLOSE, un, 10070 "sdopen: sd_pm_entry failed\n"); 10071 goto open_failed_with_pm; 10072 } 10073 mutex_enter(SD_MUTEX(un)); 10074 } 10075 10076 /* check for previous exclusive open */ 10077 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); 10078 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 10079 "sdopen: exclopen=%x, flag=%x, regopen=%x\n", 10080 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 10081 10082 if (un->un_exclopen & (partmask)) { 10083 goto excl_open_fail; 10084 } 10085 10086 if (flag & FEXCL) { 10087 int i; 10088 if (un->un_ocmap.lyropen[part]) { 10089 goto excl_open_fail; 10090 } 10091 for (i = 0; i < (OTYPCNT - 1); i++) { 10092 if (un->un_ocmap.regopen[i] & (partmask)) { 10093 goto excl_open_fail; 10094 } 10095 } 10096 } 10097 10098 /* 10099 * Check the write permission if this is a removable media device, 10100 * NDELAY has not been set, and writable permission is requested. 10101 * 10102 * Note: If NDELAY was set and this is write-protected media the WRITE 10103 * attempt will fail with EIO as part of the I/O processing. This is a 10104 * more permissive implementation that allows the open to succeed and 10105 * WRITE attempts to fail when appropriate. 10106 */ 10107 if (un->un_f_chk_wp_open) { 10108 if ((flag & FWRITE) && (!nodelay)) { 10109 mutex_exit(SD_MUTEX(un)); 10110 /* 10111 * Defer the check for write permission on writable 10112 * DVD drive till sdstrategy and will not fail open even 10113 * if FWRITE is set as the device can be writable 10114 * depending upon the media and the media can change 10115 * after the call to open(). 10116 */ 10117 if (un->un_f_dvdram_writable_device == FALSE) { 10118 if (ISCD(un) || sr_check_wp(dev)) { 10119 rval = EROFS; 10120 mutex_enter(SD_MUTEX(un)); 10121 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10122 "write to cd or write protected media\n"); 10123 goto open_fail; 10124 } 10125 } 10126 mutex_enter(SD_MUTEX(un)); 10127 } 10128 } 10129 10130 /* 10131 * If opening in NDELAY/NONBLOCK mode, just return. 10132 * Check if disk is ready and has a valid geometry later. 10133 */ 10134 if (!nodelay) { 10135 sd_ssc_t *ssc; 10136 10137 mutex_exit(SD_MUTEX(un)); 10138 ssc = sd_ssc_init(un); 10139 rval = sd_ready_and_valid(ssc, part); 10140 sd_ssc_fini(ssc); 10141 mutex_enter(SD_MUTEX(un)); 10142 /* 10143 * Fail if device is not ready or if the number of disk 10144 * blocks is zero or negative for non CD devices. 10145 */ 10146 10147 nblks = 0; 10148 10149 if (rval == SD_READY_VALID && (!ISCD(un))) { 10150 /* if cmlb_partinfo fails, nblks remains 0 */ 10151 mutex_exit(SD_MUTEX(un)); 10152 (void) cmlb_partinfo(un->un_cmlbhandle, part, &nblks, 10153 NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 10154 mutex_enter(SD_MUTEX(un)); 10155 } 10156 10157 if ((rval != SD_READY_VALID) || 10158 (!ISCD(un) && nblks <= 0)) { 10159 rval = un->un_f_has_removable_media ? ENXIO : EIO; 10160 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10161 "device not ready or invalid disk block value\n"); 10162 goto open_fail; 10163 } 10164 #if defined(__i386) || defined(__amd64) 10165 } else { 10166 uchar_t *cp; 10167 /* 10168 * x86 requires special nodelay handling, so that p0 is 10169 * always defined and accessible. 10170 * Invalidate geometry only if device is not already open. 10171 */ 10172 cp = &un->un_ocmap.chkd[0]; 10173 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 10174 if (*cp != (uchar_t)0) { 10175 break; 10176 } 10177 cp++; 10178 } 10179 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 10180 mutex_exit(SD_MUTEX(un)); 10181 cmlb_invalidate(un->un_cmlbhandle, 10182 (void *)SD_PATH_DIRECT); 10183 mutex_enter(SD_MUTEX(un)); 10184 } 10185 10186 #endif 10187 } 10188 10189 if (otyp == OTYP_LYR) { 10190 un->un_ocmap.lyropen[part]++; 10191 } else { 10192 un->un_ocmap.regopen[otyp] |= partmask; 10193 } 10194 10195 /* Set up open and exclusive open flags */ 10196 if (flag & FEXCL) { 10197 un->un_exclopen |= (partmask); 10198 } 10199 10200 /* 10201 * If the lun is EFI labeled and lun capacity is greater than the 10202 * capacity contained in the label, log a sys-event to notify the 10203 * interested module. 10204 * To avoid an infinite loop of logging sys-event, we only log the 10205 * event when the lun is not opened in NDELAY mode. The event handler 10206 * should open the lun in NDELAY mode. 10207 */ 10208 if (!(flag & FNDELAY)) { 10209 mutex_exit(SD_MUTEX(un)); 10210 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 10211 (void*)SD_PATH_DIRECT) == 0) { 10212 mutex_enter(SD_MUTEX(un)); 10213 if (un->un_f_blockcount_is_valid && 10214 un->un_blockcount > label_cap) { 10215 mutex_exit(SD_MUTEX(un)); 10216 sd_log_lun_expansion_event(un, 10217 (nodelay ? KM_NOSLEEP : KM_SLEEP)); 10218 mutex_enter(SD_MUTEX(un)); 10219 } 10220 } else { 10221 mutex_enter(SD_MUTEX(un)); 10222 } 10223 } 10224 10225 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10226 "open of part %d type %d\n", part, otyp); 10227 10228 mutex_exit(SD_MUTEX(un)); 10229 if (!nodelay) { 10230 sd_pm_exit(un); 10231 } 10232 10233 sema_v(&un->un_semoclose); 10234 10235 mutex_enter(&sd_detach_mutex); 10236 un->un_opens_in_progress--; 10237 mutex_exit(&sd_detach_mutex); 10238 10239 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n"); 10240 return (DDI_SUCCESS); 10241 10242 excl_open_fail: 10243 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n"); 10244 rval = EBUSY; 10245 10246 open_fail: 10247 mutex_exit(SD_MUTEX(un)); 10248 10249 /* 10250 * On a failed open we must exit the pm management. 10251 */ 10252 if (!nodelay) { 10253 sd_pm_exit(un); 10254 } 10255 open_failed_with_pm: 10256 sema_v(&un->un_semoclose); 10257 10258 mutex_enter(&sd_detach_mutex); 10259 un->un_opens_in_progress--; 10260 if (otyp == OTYP_LYR) { 10261 un->un_layer_count--; 10262 } 10263 mutex_exit(&sd_detach_mutex); 10264 10265 return (rval); 10266 } 10267 10268 10269 /* 10270 * Function: sdclose 10271 * 10272 * Description: Driver's close(9e) entry point function. 10273 * 10274 * Arguments: dev - device number 10275 * flag - file status flag, informational only 10276 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 10277 * cred_p - user credential pointer 10278 * 10279 * Return Code: ENXIO 10280 * 10281 * Context: Kernel thread context 10282 */ 10283 /* ARGSUSED */ 10284 static int 10285 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 10286 { 10287 struct sd_lun *un; 10288 uchar_t *cp; 10289 int part; 10290 int nodelay; 10291 int rval = 0; 10292 10293 /* Validate the open type */ 10294 if (otyp >= OTYPCNT) { 10295 return (ENXIO); 10296 } 10297 10298 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10299 return (ENXIO); 10300 } 10301 10302 part = SDPART(dev); 10303 nodelay = flag & (FNDELAY | FNONBLOCK); 10304 10305 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 10306 "sdclose: close of part %d type %d\n", part, otyp); 10307 10308 /* 10309 * We use a semaphore here in order to serialize 10310 * open and close requests on the device. 10311 */ 10312 sema_p(&un->un_semoclose); 10313 10314 mutex_enter(SD_MUTEX(un)); 10315 10316 /* Don't proceed if power is being changed. */ 10317 while (un->un_state == SD_STATE_PM_CHANGING) { 10318 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10319 } 10320 10321 if (un->un_exclopen & (1 << part)) { 10322 un->un_exclopen &= ~(1 << part); 10323 } 10324 10325 /* Update the open partition map */ 10326 if (otyp == OTYP_LYR) { 10327 un->un_ocmap.lyropen[part] -= 1; 10328 } else { 10329 un->un_ocmap.regopen[otyp] &= ~(1 << part); 10330 } 10331 10332 cp = &un->un_ocmap.chkd[0]; 10333 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 10334 if (*cp != NULL) { 10335 break; 10336 } 10337 cp++; 10338 } 10339 10340 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 10341 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n"); 10342 10343 /* 10344 * We avoid persistance upon the last close, and set 10345 * the throttle back to the maximum. 10346 */ 10347 un->un_throttle = un->un_saved_throttle; 10348 10349 if (un->un_state == SD_STATE_OFFLINE) { 10350 if (un->un_f_is_fibre == FALSE) { 10351 scsi_log(SD_DEVINFO(un), sd_label, 10352 CE_WARN, "offline\n"); 10353 } 10354 mutex_exit(SD_MUTEX(un)); 10355 cmlb_invalidate(un->un_cmlbhandle, 10356 (void *)SD_PATH_DIRECT); 10357 mutex_enter(SD_MUTEX(un)); 10358 10359 } else { 10360 /* 10361 * Flush any outstanding writes in NVRAM cache. 10362 * Note: SYNCHRONIZE CACHE is an optional SCSI-2 10363 * cmd, it may not work for non-Pluto devices. 10364 * SYNCHRONIZE CACHE is not required for removables, 10365 * except DVD-RAM drives. 10366 * 10367 * Also note: because SYNCHRONIZE CACHE is currently 10368 * the only command issued here that requires the 10369 * drive be powered up, only do the power up before 10370 * sending the Sync Cache command. If additional 10371 * commands are added which require a powered up 10372 * drive, the following sequence may have to change. 10373 * 10374 * And finally, note that parallel SCSI on SPARC 10375 * only issues a Sync Cache to DVD-RAM, a newly 10376 * supported device. 10377 */ 10378 #if defined(__i386) || defined(__amd64) 10379 if ((un->un_f_sync_cache_supported && 10380 un->un_f_sync_cache_required) || 10381 un->un_f_dvdram_writable_device == TRUE) { 10382 #else 10383 if (un->un_f_dvdram_writable_device == TRUE) { 10384 #endif 10385 mutex_exit(SD_MUTEX(un)); 10386 if (sd_pm_entry(un) == DDI_SUCCESS) { 10387 rval = 10388 sd_send_scsi_SYNCHRONIZE_CACHE(un, 10389 NULL); 10390 /* ignore error if not supported */ 10391 if (rval == ENOTSUP) { 10392 rval = 0; 10393 } else if (rval != 0) { 10394 rval = EIO; 10395 } 10396 sd_pm_exit(un); 10397 } else { 10398 rval = EIO; 10399 } 10400 mutex_enter(SD_MUTEX(un)); 10401 } 10402 10403 /* 10404 * For devices which supports DOOR_LOCK, send an ALLOW 10405 * MEDIA REMOVAL command, but don't get upset if it 10406 * fails. We need to raise the power of the drive before 10407 * we can call sd_send_scsi_DOORLOCK() 10408 */ 10409 if (un->un_f_doorlock_supported) { 10410 mutex_exit(SD_MUTEX(un)); 10411 if (sd_pm_entry(un) == DDI_SUCCESS) { 10412 sd_ssc_t *ssc; 10413 10414 ssc = sd_ssc_init(un); 10415 rval = sd_send_scsi_DOORLOCK(ssc, 10416 SD_REMOVAL_ALLOW, SD_PATH_DIRECT); 10417 if (rval != 0) 10418 sd_ssc_assessment(ssc, 10419 SD_FMT_IGNORE); 10420 sd_ssc_fini(ssc); 10421 10422 sd_pm_exit(un); 10423 if (ISCD(un) && (rval != 0) && 10424 (nodelay != 0)) { 10425 rval = ENXIO; 10426 } 10427 } else { 10428 rval = EIO; 10429 } 10430 mutex_enter(SD_MUTEX(un)); 10431 } 10432 10433 /* 10434 * If a device has removable media, invalidate all 10435 * parameters related to media, such as geometry, 10436 * blocksize, and blockcount. 10437 */ 10438 if (un->un_f_has_removable_media) { 10439 sr_ejected(un); 10440 } 10441 10442 /* 10443 * Destroy the cache (if it exists) which was 10444 * allocated for the write maps since this is 10445 * the last close for this media. 10446 */ 10447 if (un->un_wm_cache) { 10448 /* 10449 * Check if there are pending commands. 10450 * and if there are give a warning and 10451 * do not destroy the cache. 10452 */ 10453 if (un->un_ncmds_in_driver > 0) { 10454 scsi_log(SD_DEVINFO(un), 10455 sd_label, CE_WARN, 10456 "Unable to clean up memory " 10457 "because of pending I/O\n"); 10458 } else { 10459 kmem_cache_destroy( 10460 un->un_wm_cache); 10461 un->un_wm_cache = NULL; 10462 } 10463 } 10464 } 10465 } 10466 10467 mutex_exit(SD_MUTEX(un)); 10468 sema_v(&un->un_semoclose); 10469 10470 if (otyp == OTYP_LYR) { 10471 mutex_enter(&sd_detach_mutex); 10472 /* 10473 * The detach routine may run when the layer count 10474 * drops to zero. 10475 */ 10476 un->un_layer_count--; 10477 mutex_exit(&sd_detach_mutex); 10478 } 10479 10480 return (rval); 10481 } 10482 10483 10484 /* 10485 * Function: sd_ready_and_valid 10486 * 10487 * Description: Test if device is ready and has a valid geometry. 10488 * 10489 * Arguments: ssc - sd_ssc_t will contain un 10490 * un - driver soft state (unit) structure 10491 * 10492 * Return Code: SD_READY_VALID ready and valid label 10493 * SD_NOT_READY_VALID not ready, no label 10494 * SD_RESERVED_BY_OTHERS reservation conflict 10495 * 10496 * Context: Never called at interrupt context. 10497 */ 10498 10499 static int 10500 sd_ready_and_valid(sd_ssc_t *ssc, int part) 10501 { 10502 struct sd_errstats *stp; 10503 uint64_t capacity; 10504 uint_t lbasize; 10505 int rval = SD_READY_VALID; 10506 char name_str[48]; 10507 boolean_t is_valid; 10508 struct sd_lun *un; 10509 int status; 10510 10511 ASSERT(ssc != NULL); 10512 un = ssc->ssc_un; 10513 ASSERT(un != NULL); 10514 ASSERT(!mutex_owned(SD_MUTEX(un))); 10515 10516 mutex_enter(SD_MUTEX(un)); 10517 /* 10518 * If a device has removable media, we must check if media is 10519 * ready when checking if this device is ready and valid. 10520 */ 10521 if (un->un_f_has_removable_media) { 10522 mutex_exit(SD_MUTEX(un)); 10523 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10524 10525 if (status != 0) { 10526 rval = SD_NOT_READY_VALID; 10527 mutex_enter(SD_MUTEX(un)); 10528 10529 /* Ignore all failed status for removalbe media */ 10530 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10531 10532 goto done; 10533 } 10534 10535 is_valid = SD_IS_VALID_LABEL(un); 10536 mutex_enter(SD_MUTEX(un)); 10537 if (!is_valid || 10538 (un->un_f_blockcount_is_valid == FALSE) || 10539 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 10540 10541 /* capacity has to be read every open. */ 10542 mutex_exit(SD_MUTEX(un)); 10543 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity, 10544 &lbasize, SD_PATH_DIRECT); 10545 10546 if (status != 0) { 10547 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10548 10549 cmlb_invalidate(un->un_cmlbhandle, 10550 (void *)SD_PATH_DIRECT); 10551 mutex_enter(SD_MUTEX(un)); 10552 rval = SD_NOT_READY_VALID; 10553 10554 goto done; 10555 } else { 10556 mutex_enter(SD_MUTEX(un)); 10557 sd_update_block_info(un, lbasize, capacity); 10558 } 10559 } 10560 10561 /* 10562 * Check if the media in the device is writable or not. 10563 */ 10564 if (!is_valid && ISCD(un)) { 10565 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT); 10566 } 10567 10568 } else { 10569 /* 10570 * Do a test unit ready to clear any unit attention from non-cd 10571 * devices. 10572 */ 10573 mutex_exit(SD_MUTEX(un)); 10574 10575 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10576 if (status != 0) { 10577 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10578 } 10579 10580 mutex_enter(SD_MUTEX(un)); 10581 } 10582 10583 10584 /* 10585 * If this is a non 512 block device, allocate space for 10586 * the wmap cache. This is being done here since every time 10587 * a media is changed this routine will be called and the 10588 * block size is a function of media rather than device. 10589 */ 10590 if ((un->un_f_rmw_type != SD_RMW_TYPE_RETURN_ERROR || 10591 un->un_f_non_devbsize_supported) && 10592 un->un_tgt_blocksize != DEV_BSIZE) { 10593 if (!(un->un_wm_cache)) { 10594 (void) snprintf(name_str, sizeof (name_str), 10595 "%s%d_cache", 10596 ddi_driver_name(SD_DEVINFO(un)), 10597 ddi_get_instance(SD_DEVINFO(un))); 10598 un->un_wm_cache = kmem_cache_create( 10599 name_str, sizeof (struct sd_w_map), 10600 8, sd_wm_cache_constructor, 10601 sd_wm_cache_destructor, NULL, 10602 (void *)un, NULL, 0); 10603 if (!(un->un_wm_cache)) { 10604 rval = ENOMEM; 10605 goto done; 10606 } 10607 } 10608 } 10609 10610 if (un->un_state == SD_STATE_NORMAL) { 10611 /* 10612 * If the target is not yet ready here (defined by a TUR 10613 * failure), invalidate the geometry and print an 'offline' 10614 * message. This is a legacy message, as the state of the 10615 * target is not actually changed to SD_STATE_OFFLINE. 10616 * 10617 * If the TUR fails for EACCES (Reservation Conflict), 10618 * SD_RESERVED_BY_OTHERS will be returned to indicate 10619 * reservation conflict. If the TUR fails for other 10620 * reasons, SD_NOT_READY_VALID will be returned. 10621 */ 10622 int err; 10623 10624 mutex_exit(SD_MUTEX(un)); 10625 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10626 mutex_enter(SD_MUTEX(un)); 10627 10628 if (err != 0) { 10629 mutex_exit(SD_MUTEX(un)); 10630 cmlb_invalidate(un->un_cmlbhandle, 10631 (void *)SD_PATH_DIRECT); 10632 mutex_enter(SD_MUTEX(un)); 10633 if (err == EACCES) { 10634 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10635 "reservation conflict\n"); 10636 rval = SD_RESERVED_BY_OTHERS; 10637 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10638 } else { 10639 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10640 "drive offline\n"); 10641 rval = SD_NOT_READY_VALID; 10642 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 10643 } 10644 goto done; 10645 } 10646 } 10647 10648 if (un->un_f_format_in_progress == FALSE) { 10649 mutex_exit(SD_MUTEX(un)); 10650 10651 (void) cmlb_validate(un->un_cmlbhandle, 0, 10652 (void *)SD_PATH_DIRECT); 10653 if (cmlb_partinfo(un->un_cmlbhandle, part, NULL, NULL, NULL, 10654 NULL, (void *) SD_PATH_DIRECT) != 0) { 10655 rval = SD_NOT_READY_VALID; 10656 mutex_enter(SD_MUTEX(un)); 10657 10658 goto done; 10659 } 10660 if (un->un_f_pkstats_enabled) { 10661 sd_set_pstats(un); 10662 SD_TRACE(SD_LOG_IO_PARTITION, un, 10663 "sd_ready_and_valid: un:0x%p pstats created and " 10664 "set\n", un); 10665 } 10666 mutex_enter(SD_MUTEX(un)); 10667 } 10668 10669 /* 10670 * If this device supports DOOR_LOCK command, try and send 10671 * this command to PREVENT MEDIA REMOVAL, but don't get upset 10672 * if it fails. For a CD, however, it is an error 10673 */ 10674 if (un->un_f_doorlock_supported) { 10675 mutex_exit(SD_MUTEX(un)); 10676 status = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 10677 SD_PATH_DIRECT); 10678 10679 if ((status != 0) && ISCD(un)) { 10680 rval = SD_NOT_READY_VALID; 10681 mutex_enter(SD_MUTEX(un)); 10682 10683 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10684 10685 goto done; 10686 } else if (status != 0) 10687 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10688 mutex_enter(SD_MUTEX(un)); 10689 } 10690 10691 /* The state has changed, inform the media watch routines */ 10692 un->un_mediastate = DKIO_INSERTED; 10693 cv_broadcast(&un->un_state_cv); 10694 rval = SD_READY_VALID; 10695 10696 done: 10697 10698 /* 10699 * Initialize the capacity kstat value, if no media previously 10700 * (capacity kstat is 0) and a media has been inserted 10701 * (un_blockcount > 0). 10702 */ 10703 if (un->un_errstats != NULL) { 10704 stp = (struct sd_errstats *)un->un_errstats->ks_data; 10705 if ((stp->sd_capacity.value.ui64 == 0) && 10706 (un->un_f_blockcount_is_valid == TRUE)) { 10707 stp->sd_capacity.value.ui64 = 10708 (uint64_t)((uint64_t)un->un_blockcount * 10709 un->un_sys_blocksize); 10710 } 10711 } 10712 10713 mutex_exit(SD_MUTEX(un)); 10714 return (rval); 10715 } 10716 10717 10718 /* 10719 * Function: sdmin 10720 * 10721 * Description: Routine to limit the size of a data transfer. Used in 10722 * conjunction with physio(9F). 10723 * 10724 * Arguments: bp - pointer to the indicated buf(9S) struct. 10725 * 10726 * Context: Kernel thread context. 10727 */ 10728 10729 static void 10730 sdmin(struct buf *bp) 10731 { 10732 struct sd_lun *un; 10733 int instance; 10734 10735 instance = SDUNIT(bp->b_edev); 10736 10737 un = ddi_get_soft_state(sd_state, instance); 10738 ASSERT(un != NULL); 10739 10740 /* 10741 * We depend on DMA partial or buf breakup to restrict 10742 * IO size if any of them enabled. 10743 */ 10744 if (un->un_partial_dma_supported || 10745 un->un_buf_breakup_supported) { 10746 return; 10747 } 10748 10749 if (bp->b_bcount > un->un_max_xfer_size) { 10750 bp->b_bcount = un->un_max_xfer_size; 10751 } 10752 } 10753 10754 10755 /* 10756 * Function: sdread 10757 * 10758 * Description: Driver's read(9e) entry point function. 10759 * 10760 * Arguments: dev - device number 10761 * uio - structure pointer describing where data is to be stored 10762 * in user's space 10763 * cred_p - user credential pointer 10764 * 10765 * Return Code: ENXIO 10766 * EIO 10767 * EINVAL 10768 * value returned by physio 10769 * 10770 * Context: Kernel thread context. 10771 */ 10772 /* ARGSUSED */ 10773 static int 10774 sdread(dev_t dev, struct uio *uio, cred_t *cred_p) 10775 { 10776 struct sd_lun *un = NULL; 10777 int secmask; 10778 int err = 0; 10779 sd_ssc_t *ssc; 10780 10781 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10782 return (ENXIO); 10783 } 10784 10785 ASSERT(!mutex_owned(SD_MUTEX(un))); 10786 10787 10788 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10789 mutex_enter(SD_MUTEX(un)); 10790 /* 10791 * Because the call to sd_ready_and_valid will issue I/O we 10792 * must wait here if either the device is suspended or 10793 * if it's power level is changing. 10794 */ 10795 while ((un->un_state == SD_STATE_SUSPENDED) || 10796 (un->un_state == SD_STATE_PM_CHANGING)) { 10797 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10798 } 10799 un->un_ncmds_in_driver++; 10800 mutex_exit(SD_MUTEX(un)); 10801 10802 /* Initialize sd_ssc_t for internal uscsi commands */ 10803 ssc = sd_ssc_init(un); 10804 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10805 err = EIO; 10806 } else { 10807 err = 0; 10808 } 10809 sd_ssc_fini(ssc); 10810 10811 mutex_enter(SD_MUTEX(un)); 10812 un->un_ncmds_in_driver--; 10813 ASSERT(un->un_ncmds_in_driver >= 0); 10814 mutex_exit(SD_MUTEX(un)); 10815 if (err != 0) 10816 return (err); 10817 } 10818 10819 /* 10820 * Read requests are restricted to multiples of the system block size. 10821 */ 10822 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR) 10823 secmask = un->un_tgt_blocksize - 1; 10824 else 10825 secmask = DEV_BSIZE - 1; 10826 10827 if (uio->uio_loffset & ((offset_t)(secmask))) { 10828 SD_ERROR(SD_LOG_READ_WRITE, un, 10829 "sdread: file offset not modulo %d\n", 10830 secmask + 1); 10831 err = EINVAL; 10832 } else if (uio->uio_iov->iov_len & (secmask)) { 10833 SD_ERROR(SD_LOG_READ_WRITE, un, 10834 "sdread: transfer length not modulo %d\n", 10835 secmask + 1); 10836 err = EINVAL; 10837 } else { 10838 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio); 10839 } 10840 10841 return (err); 10842 } 10843 10844 10845 /* 10846 * Function: sdwrite 10847 * 10848 * Description: Driver's write(9e) entry point function. 10849 * 10850 * Arguments: dev - device number 10851 * uio - structure pointer describing where data is stored in 10852 * user's space 10853 * cred_p - user credential pointer 10854 * 10855 * Return Code: ENXIO 10856 * EIO 10857 * EINVAL 10858 * value returned by physio 10859 * 10860 * Context: Kernel thread context. 10861 */ 10862 /* ARGSUSED */ 10863 static int 10864 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 10865 { 10866 struct sd_lun *un = NULL; 10867 int secmask; 10868 int err = 0; 10869 sd_ssc_t *ssc; 10870 10871 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10872 return (ENXIO); 10873 } 10874 10875 ASSERT(!mutex_owned(SD_MUTEX(un))); 10876 10877 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10878 mutex_enter(SD_MUTEX(un)); 10879 /* 10880 * Because the call to sd_ready_and_valid will issue I/O we 10881 * must wait here if either the device is suspended or 10882 * if it's power level is changing. 10883 */ 10884 while ((un->un_state == SD_STATE_SUSPENDED) || 10885 (un->un_state == SD_STATE_PM_CHANGING)) { 10886 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10887 } 10888 un->un_ncmds_in_driver++; 10889 mutex_exit(SD_MUTEX(un)); 10890 10891 /* Initialize sd_ssc_t for internal uscsi commands */ 10892 ssc = sd_ssc_init(un); 10893 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10894 err = EIO; 10895 } else { 10896 err = 0; 10897 } 10898 sd_ssc_fini(ssc); 10899 10900 mutex_enter(SD_MUTEX(un)); 10901 un->un_ncmds_in_driver--; 10902 ASSERT(un->un_ncmds_in_driver >= 0); 10903 mutex_exit(SD_MUTEX(un)); 10904 if (err != 0) 10905 return (err); 10906 } 10907 10908 /* 10909 * Write requests are restricted to multiples of the system block size. 10910 */ 10911 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR) 10912 secmask = un->un_tgt_blocksize - 1; 10913 else 10914 secmask = DEV_BSIZE - 1; 10915 10916 if (uio->uio_loffset & ((offset_t)(secmask))) { 10917 SD_ERROR(SD_LOG_READ_WRITE, un, 10918 "sdwrite: file offset not modulo %d\n", 10919 secmask + 1); 10920 err = EINVAL; 10921 } else if (uio->uio_iov->iov_len & (secmask)) { 10922 SD_ERROR(SD_LOG_READ_WRITE, un, 10923 "sdwrite: transfer length not modulo %d\n", 10924 secmask + 1); 10925 err = EINVAL; 10926 } else { 10927 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio); 10928 } 10929 10930 return (err); 10931 } 10932 10933 10934 /* 10935 * Function: sdaread 10936 * 10937 * Description: Driver's aread(9e) entry point function. 10938 * 10939 * Arguments: dev - device number 10940 * aio - structure pointer describing where data is to be stored 10941 * cred_p - user credential pointer 10942 * 10943 * Return Code: ENXIO 10944 * EIO 10945 * EINVAL 10946 * value returned by aphysio 10947 * 10948 * Context: Kernel thread context. 10949 */ 10950 /* ARGSUSED */ 10951 static int 10952 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10953 { 10954 struct sd_lun *un = NULL; 10955 struct uio *uio = aio->aio_uio; 10956 int secmask; 10957 int err = 0; 10958 sd_ssc_t *ssc; 10959 10960 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10961 return (ENXIO); 10962 } 10963 10964 ASSERT(!mutex_owned(SD_MUTEX(un))); 10965 10966 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10967 mutex_enter(SD_MUTEX(un)); 10968 /* 10969 * Because the call to sd_ready_and_valid will issue I/O we 10970 * must wait here if either the device is suspended or 10971 * if it's power level is changing. 10972 */ 10973 while ((un->un_state == SD_STATE_SUSPENDED) || 10974 (un->un_state == SD_STATE_PM_CHANGING)) { 10975 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10976 } 10977 un->un_ncmds_in_driver++; 10978 mutex_exit(SD_MUTEX(un)); 10979 10980 /* Initialize sd_ssc_t for internal uscsi commands */ 10981 ssc = sd_ssc_init(un); 10982 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10983 err = EIO; 10984 } else { 10985 err = 0; 10986 } 10987 sd_ssc_fini(ssc); 10988 10989 mutex_enter(SD_MUTEX(un)); 10990 un->un_ncmds_in_driver--; 10991 ASSERT(un->un_ncmds_in_driver >= 0); 10992 mutex_exit(SD_MUTEX(un)); 10993 if (err != 0) 10994 return (err); 10995 } 10996 10997 /* 10998 * Read requests are restricted to multiples of the system block size. 10999 */ 11000 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR) 11001 secmask = un->un_tgt_blocksize - 1; 11002 else 11003 secmask = DEV_BSIZE - 1; 11004 11005 if (uio->uio_loffset & ((offset_t)(secmask))) { 11006 SD_ERROR(SD_LOG_READ_WRITE, un, 11007 "sdaread: file offset not modulo %d\n", 11008 secmask + 1); 11009 err = EINVAL; 11010 } else if (uio->uio_iov->iov_len & (secmask)) { 11011 SD_ERROR(SD_LOG_READ_WRITE, un, 11012 "sdaread: transfer length not modulo %d\n", 11013 secmask + 1); 11014 err = EINVAL; 11015 } else { 11016 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio); 11017 } 11018 11019 return (err); 11020 } 11021 11022 11023 /* 11024 * Function: sdawrite 11025 * 11026 * Description: Driver's awrite(9e) entry point function. 11027 * 11028 * Arguments: dev - device number 11029 * aio - structure pointer describing where data is stored 11030 * cred_p - user credential pointer 11031 * 11032 * Return Code: ENXIO 11033 * EIO 11034 * EINVAL 11035 * value returned by aphysio 11036 * 11037 * Context: Kernel thread context. 11038 */ 11039 /* ARGSUSED */ 11040 static int 11041 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 11042 { 11043 struct sd_lun *un = NULL; 11044 struct uio *uio = aio->aio_uio; 11045 int secmask; 11046 int err = 0; 11047 sd_ssc_t *ssc; 11048 11049 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 11050 return (ENXIO); 11051 } 11052 11053 ASSERT(!mutex_owned(SD_MUTEX(un))); 11054 11055 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 11056 mutex_enter(SD_MUTEX(un)); 11057 /* 11058 * Because the call to sd_ready_and_valid will issue I/O we 11059 * must wait here if either the device is suspended or 11060 * if it's power level is changing. 11061 */ 11062 while ((un->un_state == SD_STATE_SUSPENDED) || 11063 (un->un_state == SD_STATE_PM_CHANGING)) { 11064 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 11065 } 11066 un->un_ncmds_in_driver++; 11067 mutex_exit(SD_MUTEX(un)); 11068 11069 /* Initialize sd_ssc_t for internal uscsi commands */ 11070 ssc = sd_ssc_init(un); 11071 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 11072 err = EIO; 11073 } else { 11074 err = 0; 11075 } 11076 sd_ssc_fini(ssc); 11077 11078 mutex_enter(SD_MUTEX(un)); 11079 un->un_ncmds_in_driver--; 11080 ASSERT(un->un_ncmds_in_driver >= 0); 11081 mutex_exit(SD_MUTEX(un)); 11082 if (err != 0) 11083 return (err); 11084 } 11085 11086 /* 11087 * Write requests are restricted to multiples of the system block size. 11088 */ 11089 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR) 11090 secmask = un->un_tgt_blocksize - 1; 11091 else 11092 secmask = DEV_BSIZE - 1; 11093 11094 if (uio->uio_loffset & ((offset_t)(secmask))) { 11095 SD_ERROR(SD_LOG_READ_WRITE, un, 11096 "sdawrite: file offset not modulo %d\n", 11097 secmask + 1); 11098 err = EINVAL; 11099 } else if (uio->uio_iov->iov_len & (secmask)) { 11100 SD_ERROR(SD_LOG_READ_WRITE, un, 11101 "sdawrite: transfer length not modulo %d\n", 11102 secmask + 1); 11103 err = EINVAL; 11104 } else { 11105 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio); 11106 } 11107 11108 return (err); 11109 } 11110 11111 11112 11113 11114 11115 /* 11116 * Driver IO processing follows the following sequence: 11117 * 11118 * sdioctl(9E) sdstrategy(9E) biodone(9F) 11119 * | | ^ 11120 * v v | 11121 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+ 11122 * | | | | 11123 * v | | | 11124 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone() 11125 * | | ^ ^ 11126 * v v | | 11127 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | | 11128 * | | | | 11129 * +---+ | +------------+ +-------+ 11130 * | | | | 11131 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 11132 * | v | | 11133 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() | 11134 * | | ^ | 11135 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 11136 * | v | | 11137 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() | 11138 * | | ^ | 11139 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 11140 * | v | | 11141 * | sd_checksum_iostart() sd_checksum_iodone() | 11142 * | | ^ | 11143 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+ 11144 * | v | | 11145 * | sd_pm_iostart() sd_pm_iodone() | 11146 * | | ^ | 11147 * | | | | 11148 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+ 11149 * | ^ 11150 * v | 11151 * sd_core_iostart() | 11152 * | | 11153 * | +------>(*destroypkt)() 11154 * +-> sd_start_cmds() <-+ | | 11155 * | | | v 11156 * | | | scsi_destroy_pkt(9F) 11157 * | | | 11158 * +->(*initpkt)() +- sdintr() 11159 * | | | | 11160 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx() 11161 * | +-> scsi_setup_cdb(9F) | 11162 * | | 11163 * +--> scsi_transport(9F) | 11164 * | | 11165 * +----> SCSA ---->+ 11166 * 11167 * 11168 * This code is based upon the following presumptions: 11169 * 11170 * - iostart and iodone functions operate on buf(9S) structures. These 11171 * functions perform the necessary operations on the buf(9S) and pass 11172 * them along to the next function in the chain by using the macros 11173 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE() 11174 * (for iodone side functions). 11175 * 11176 * - The iostart side functions may sleep. The iodone side functions 11177 * are called under interrupt context and may NOT sleep. Therefore 11178 * iodone side functions also may not call iostart side functions. 11179 * (NOTE: iostart side functions should NOT sleep for memory, as 11180 * this could result in deadlock.) 11181 * 11182 * - An iostart side function may call its corresponding iodone side 11183 * function directly (if necessary). 11184 * 11185 * - In the event of an error, an iostart side function can return a buf(9S) 11186 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and 11187 * b_error in the usual way of course). 11188 * 11189 * - The taskq mechanism may be used by the iodone side functions to dispatch 11190 * requests to the iostart side functions. The iostart side functions in 11191 * this case would be called under the context of a taskq thread, so it's 11192 * OK for them to block/sleep/spin in this case. 11193 * 11194 * - iostart side functions may allocate "shadow" buf(9S) structs and 11195 * pass them along to the next function in the chain. The corresponding 11196 * iodone side functions must coalesce the "shadow" bufs and return 11197 * the "original" buf to the next higher layer. 11198 * 11199 * - The b_private field of the buf(9S) struct holds a pointer to 11200 * an sd_xbuf struct, which contains information needed to 11201 * construct the scsi_pkt for the command. 11202 * 11203 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each 11204 * layer must acquire & release the SD_MUTEX(un) as needed. 11205 */ 11206 11207 11208 /* 11209 * Create taskq for all targets in the system. This is created at 11210 * _init(9E) and destroyed at _fini(9E). 11211 * 11212 * Note: here we set the minalloc to a reasonably high number to ensure that 11213 * we will have an adequate supply of task entries available at interrupt time. 11214 * This is used in conjunction with the TASKQ_PREPOPULATE flag in 11215 * sd_create_taskq(). Since we do not want to sleep for allocations at 11216 * interrupt time, set maxalloc equal to minalloc. That way we will just fail 11217 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq 11218 * requests any one instant in time. 11219 */ 11220 #define SD_TASKQ_NUMTHREADS 8 11221 #define SD_TASKQ_MINALLOC 256 11222 #define SD_TASKQ_MAXALLOC 256 11223 11224 static taskq_t *sd_tq = NULL; 11225 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq)) 11226 11227 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; 11228 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; 11229 11230 /* 11231 * The following task queue is being created for the write part of 11232 * read-modify-write of non-512 block size devices. 11233 * Limit the number of threads to 1 for now. This number has been chosen 11234 * considering the fact that it applies only to dvd ram drives/MO drives 11235 * currently. Performance for which is not main criteria at this stage. 11236 * Note: It needs to be explored if we can use a single taskq in future 11237 */ 11238 #define SD_WMR_TASKQ_NUMTHREADS 1 11239 static taskq_t *sd_wmr_tq = NULL; 11240 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq)) 11241 11242 /* 11243 * Function: sd_taskq_create 11244 * 11245 * Description: Create taskq thread(s) and preallocate task entries 11246 * 11247 * Return Code: Returns a pointer to the allocated taskq_t. 11248 * 11249 * Context: Can sleep. Requires blockable context. 11250 * 11251 * Notes: - The taskq() facility currently is NOT part of the DDI. 11252 * (definitely NOT recommeded for 3rd-party drivers!) :-) 11253 * - taskq_create() will block for memory, also it will panic 11254 * if it cannot create the requested number of threads. 11255 * - Currently taskq_create() creates threads that cannot be 11256 * swapped. 11257 * - We use TASKQ_PREPOPULATE to ensure we have an adequate 11258 * supply of taskq entries at interrupt time (ie, so that we 11259 * do not have to sleep for memory) 11260 */ 11261 11262 static void 11263 sd_taskq_create(void) 11264 { 11265 char taskq_name[TASKQ_NAMELEN]; 11266 11267 ASSERT(sd_tq == NULL); 11268 ASSERT(sd_wmr_tq == NULL); 11269 11270 (void) snprintf(taskq_name, sizeof (taskq_name), 11271 "%s_drv_taskq", sd_label); 11272 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS, 11273 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 11274 TASKQ_PREPOPULATE)); 11275 11276 (void) snprintf(taskq_name, sizeof (taskq_name), 11277 "%s_rmw_taskq", sd_label); 11278 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS, 11279 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 11280 TASKQ_PREPOPULATE)); 11281 } 11282 11283 11284 /* 11285 * Function: sd_taskq_delete 11286 * 11287 * Description: Complementary cleanup routine for sd_taskq_create(). 11288 * 11289 * Context: Kernel thread context. 11290 */ 11291 11292 static void 11293 sd_taskq_delete(void) 11294 { 11295 ASSERT(sd_tq != NULL); 11296 ASSERT(sd_wmr_tq != NULL); 11297 taskq_destroy(sd_tq); 11298 taskq_destroy(sd_wmr_tq); 11299 sd_tq = NULL; 11300 sd_wmr_tq = NULL; 11301 } 11302 11303 11304 /* 11305 * Function: sdstrategy 11306 * 11307 * Description: Driver's strategy (9E) entry point function. 11308 * 11309 * Arguments: bp - pointer to buf(9S) 11310 * 11311 * Return Code: Always returns zero 11312 * 11313 * Context: Kernel thread context. 11314 */ 11315 11316 static int 11317 sdstrategy(struct buf *bp) 11318 { 11319 struct sd_lun *un; 11320 11321 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 11322 if (un == NULL) { 11323 bioerror(bp, EIO); 11324 bp->b_resid = bp->b_bcount; 11325 biodone(bp); 11326 return (0); 11327 } 11328 11329 /* As was done in the past, fail new cmds. if state is dumping. */ 11330 if (un->un_state == SD_STATE_DUMPING) { 11331 bioerror(bp, ENXIO); 11332 bp->b_resid = bp->b_bcount; 11333 biodone(bp); 11334 return (0); 11335 } 11336 11337 ASSERT(!mutex_owned(SD_MUTEX(un))); 11338 11339 /* 11340 * Commands may sneak in while we released the mutex in 11341 * DDI_SUSPEND, we should block new commands. However, old 11342 * commands that are still in the driver at this point should 11343 * still be allowed to drain. 11344 */ 11345 mutex_enter(SD_MUTEX(un)); 11346 /* 11347 * Must wait here if either the device is suspended or 11348 * if it's power level is changing. 11349 */ 11350 while ((un->un_state == SD_STATE_SUSPENDED) || 11351 (un->un_state == SD_STATE_PM_CHANGING)) { 11352 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 11353 } 11354 11355 un->un_ncmds_in_driver++; 11356 11357 /* 11358 * atapi: Since we are running the CD for now in PIO mode we need to 11359 * call bp_mapin here to avoid bp_mapin called interrupt context under 11360 * the HBA's init_pkt routine. 11361 */ 11362 if (un->un_f_cfg_is_atapi == TRUE) { 11363 mutex_exit(SD_MUTEX(un)); 11364 bp_mapin(bp); 11365 mutex_enter(SD_MUTEX(un)); 11366 } 11367 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n", 11368 un->un_ncmds_in_driver); 11369 11370 if (bp->b_flags & B_WRITE) 11371 un->un_f_sync_cache_required = TRUE; 11372 11373 mutex_exit(SD_MUTEX(un)); 11374 11375 /* 11376 * This will (eventually) allocate the sd_xbuf area and 11377 * call sd_xbuf_strategy(). We just want to return the 11378 * result of ddi_xbuf_qstrategy so that we have an opt- 11379 * imized tail call which saves us a stack frame. 11380 */ 11381 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); 11382 } 11383 11384 11385 /* 11386 * Function: sd_xbuf_strategy 11387 * 11388 * Description: Function for initiating IO operations via the 11389 * ddi_xbuf_qstrategy() mechanism. 11390 * 11391 * Context: Kernel thread context. 11392 */ 11393 11394 static void 11395 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg) 11396 { 11397 struct sd_lun *un = arg; 11398 11399 ASSERT(bp != NULL); 11400 ASSERT(xp != NULL); 11401 ASSERT(un != NULL); 11402 ASSERT(!mutex_owned(SD_MUTEX(un))); 11403 11404 /* 11405 * Initialize the fields in the xbuf and save a pointer to the 11406 * xbuf in bp->b_private. 11407 */ 11408 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL); 11409 11410 /* Send the buf down the iostart chain */ 11411 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp); 11412 } 11413 11414 11415 /* 11416 * Function: sd_xbuf_init 11417 * 11418 * Description: Prepare the given sd_xbuf struct for use. 11419 * 11420 * Arguments: un - ptr to softstate 11421 * bp - ptr to associated buf(9S) 11422 * xp - ptr to associated sd_xbuf 11423 * chain_type - IO chain type to use: 11424 * SD_CHAIN_NULL 11425 * SD_CHAIN_BUFIO 11426 * SD_CHAIN_USCSI 11427 * SD_CHAIN_DIRECT 11428 * SD_CHAIN_DIRECT_PRIORITY 11429 * pktinfop - ptr to private data struct for scsi_pkt(9S) 11430 * initialization; may be NULL if none. 11431 * 11432 * Context: Kernel thread context 11433 */ 11434 11435 static void 11436 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 11437 uchar_t chain_type, void *pktinfop) 11438 { 11439 int index; 11440 11441 ASSERT(un != NULL); 11442 ASSERT(bp != NULL); 11443 ASSERT(xp != NULL); 11444 11445 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n", 11446 bp, chain_type); 11447 11448 xp->xb_un = un; 11449 xp->xb_pktp = NULL; 11450 xp->xb_pktinfo = pktinfop; 11451 xp->xb_private = bp->b_private; 11452 xp->xb_blkno = (daddr_t)bp->b_blkno; 11453 11454 /* 11455 * Set up the iostart and iodone chain indexes in the xbuf, based 11456 * upon the specified chain type to use. 11457 */ 11458 switch (chain_type) { 11459 case SD_CHAIN_NULL: 11460 /* 11461 * Fall thru to just use the values for the buf type, even 11462 * tho for the NULL chain these values will never be used. 11463 */ 11464 /* FALLTHRU */ 11465 case SD_CHAIN_BUFIO: 11466 index = un->un_buf_chain_type; 11467 if ((!un->un_f_has_removable_media) && 11468 (un->un_tgt_blocksize != 0) && 11469 (un->un_tgt_blocksize != DEV_BSIZE)) { 11470 int secmask = 0, blknomask = 0; 11471 blknomask = 11472 (un->un_tgt_blocksize / DEV_BSIZE) - 1; 11473 secmask = un->un_tgt_blocksize - 1; 11474 11475 if ((bp->b_lblkno & (blknomask)) || 11476 (bp->b_bcount & (secmask))) { 11477 if (un->un_f_rmw_type != 11478 SD_RMW_TYPE_RETURN_ERROR) { 11479 if (un->un_f_pm_is_enabled == FALSE) 11480 index = 11481 SD_CHAIN_INFO_MSS_DSK_NO_PM; 11482 else 11483 index = 11484 SD_CHAIN_INFO_MSS_DISK; 11485 } 11486 } 11487 } 11488 break; 11489 case SD_CHAIN_USCSI: 11490 index = un->un_uscsi_chain_type; 11491 break; 11492 case SD_CHAIN_DIRECT: 11493 index = un->un_direct_chain_type; 11494 break; 11495 case SD_CHAIN_DIRECT_PRIORITY: 11496 index = un->un_priority_chain_type; 11497 break; 11498 default: 11499 /* We're really broken if we ever get here... */ 11500 panic("sd_xbuf_init: illegal chain type!"); 11501 /*NOTREACHED*/ 11502 } 11503 11504 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index; 11505 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index; 11506 11507 /* 11508 * It might be a bit easier to simply bzero the entire xbuf above, 11509 * but it turns out that since we init a fair number of members anyway, 11510 * we save a fair number cycles by doing explicit assignment of zero. 11511 */ 11512 xp->xb_pkt_flags = 0; 11513 xp->xb_dma_resid = 0; 11514 xp->xb_retry_count = 0; 11515 xp->xb_victim_retry_count = 0; 11516 xp->xb_ua_retry_count = 0; 11517 xp->xb_nr_retry_count = 0; 11518 xp->xb_sense_bp = NULL; 11519 xp->xb_sense_status = 0; 11520 xp->xb_sense_state = 0; 11521 xp->xb_sense_resid = 0; 11522 xp->xb_ena = 0; 11523 11524 bp->b_private = xp; 11525 bp->b_flags &= ~(B_DONE | B_ERROR); 11526 bp->b_resid = 0; 11527 bp->av_forw = NULL; 11528 bp->av_back = NULL; 11529 bioerror(bp, 0); 11530 11531 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n"); 11532 } 11533 11534 11535 /* 11536 * Function: sd_uscsi_strategy 11537 * 11538 * Description: Wrapper for calling into the USCSI chain via physio(9F) 11539 * 11540 * Arguments: bp - buf struct ptr 11541 * 11542 * Return Code: Always returns 0 11543 * 11544 * Context: Kernel thread context 11545 */ 11546 11547 static int 11548 sd_uscsi_strategy(struct buf *bp) 11549 { 11550 struct sd_lun *un; 11551 struct sd_uscsi_info *uip; 11552 struct sd_xbuf *xp; 11553 uchar_t chain_type; 11554 uchar_t cmd; 11555 11556 ASSERT(bp != NULL); 11557 11558 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 11559 if (un == NULL) { 11560 bioerror(bp, EIO); 11561 bp->b_resid = bp->b_bcount; 11562 biodone(bp); 11563 return (0); 11564 } 11565 11566 ASSERT(!mutex_owned(SD_MUTEX(un))); 11567 11568 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp); 11569 11570 /* 11571 * A pointer to a struct sd_uscsi_info is expected in bp->b_private 11572 */ 11573 ASSERT(bp->b_private != NULL); 11574 uip = (struct sd_uscsi_info *)bp->b_private; 11575 cmd = ((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_cdb[0]; 11576 11577 mutex_enter(SD_MUTEX(un)); 11578 /* 11579 * atapi: Since we are running the CD for now in PIO mode we need to 11580 * call bp_mapin here to avoid bp_mapin called interrupt context under 11581 * the HBA's init_pkt routine. 11582 */ 11583 if (un->un_f_cfg_is_atapi == TRUE) { 11584 mutex_exit(SD_MUTEX(un)); 11585 bp_mapin(bp); 11586 mutex_enter(SD_MUTEX(un)); 11587 } 11588 un->un_ncmds_in_driver++; 11589 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n", 11590 un->un_ncmds_in_driver); 11591 11592 if ((bp->b_flags & B_WRITE) && (bp->b_bcount != 0) && 11593 (cmd != SCMD_MODE_SELECT) && (cmd != SCMD_MODE_SELECT_G1)) 11594 un->un_f_sync_cache_required = TRUE; 11595 11596 mutex_exit(SD_MUTEX(un)); 11597 11598 switch (uip->ui_flags) { 11599 case SD_PATH_DIRECT: 11600 chain_type = SD_CHAIN_DIRECT; 11601 break; 11602 case SD_PATH_DIRECT_PRIORITY: 11603 chain_type = SD_CHAIN_DIRECT_PRIORITY; 11604 break; 11605 default: 11606 chain_type = SD_CHAIN_USCSI; 11607 break; 11608 } 11609 11610 /* 11611 * We may allocate extra buf for external USCSI commands. If the 11612 * application asks for bigger than 20-byte sense data via USCSI, 11613 * SCSA layer will allocate 252 bytes sense buf for that command. 11614 */ 11615 if (((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_rqlen > 11616 SENSE_LENGTH) { 11617 xp = kmem_zalloc(sizeof (struct sd_xbuf) - SENSE_LENGTH + 11618 MAX_SENSE_LENGTH, KM_SLEEP); 11619 } else { 11620 xp = kmem_zalloc(sizeof (struct sd_xbuf), KM_SLEEP); 11621 } 11622 11623 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp); 11624 11625 /* Use the index obtained within xbuf_init */ 11626 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp); 11627 11628 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp); 11629 11630 return (0); 11631 } 11632 11633 /* 11634 * Function: sd_send_scsi_cmd 11635 * 11636 * Description: Runs a USCSI command for user (when called thru sdioctl), 11637 * or for the driver 11638 * 11639 * Arguments: dev - the dev_t for the device 11640 * incmd - ptr to a valid uscsi_cmd struct 11641 * flag - bit flag, indicating open settings, 32/64 bit type 11642 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 11643 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 11644 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 11645 * to use the USCSI "direct" chain and bypass the normal 11646 * command waitq. 11647 * 11648 * Return Code: 0 - successful completion of the given command 11649 * EIO - scsi_uscsi_handle_command() failed 11650 * ENXIO - soft state not found for specified dev 11651 * EINVAL 11652 * EFAULT - copyin/copyout error 11653 * return code of scsi_uscsi_handle_command(): 11654 * EIO 11655 * ENXIO 11656 * EACCES 11657 * 11658 * Context: Waits for command to complete. Can sleep. 11659 */ 11660 11661 static int 11662 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 11663 enum uio_seg dataspace, int path_flag) 11664 { 11665 struct sd_lun *un; 11666 sd_ssc_t *ssc; 11667 int rval; 11668 11669 un = ddi_get_soft_state(sd_state, SDUNIT(dev)); 11670 if (un == NULL) { 11671 return (ENXIO); 11672 } 11673 11674 /* 11675 * Using sd_ssc_send to handle uscsi cmd 11676 */ 11677 ssc = sd_ssc_init(un); 11678 rval = sd_ssc_send(ssc, incmd, flag, dataspace, path_flag); 11679 sd_ssc_fini(ssc); 11680 11681 return (rval); 11682 } 11683 11684 /* 11685 * Function: sd_ssc_init 11686 * 11687 * Description: Uscsi end-user call this function to initialize necessary 11688 * fields, such as uscsi_cmd and sd_uscsi_info struct. 11689 * 11690 * The return value of sd_send_scsi_cmd will be treated as a 11691 * fault in various conditions. Even it is not Zero, some 11692 * callers may ignore the return value. That is to say, we can 11693 * not make an accurate assessment in sdintr, since if a 11694 * command is failed in sdintr it does not mean the caller of 11695 * sd_send_scsi_cmd will treat it as a real failure. 11696 * 11697 * To avoid printing too many error logs for a failed uscsi 11698 * packet that the caller may not treat it as a failure, the 11699 * sd will keep silent for handling all uscsi commands. 11700 * 11701 * During detach->attach and attach-open, for some types of 11702 * problems, the driver should be providing information about 11703 * the problem encountered. Device use USCSI_SILENT, which 11704 * suppresses all driver information. The result is that no 11705 * information about the problem is available. Being 11706 * completely silent during this time is inappropriate. The 11707 * driver needs a more selective filter than USCSI_SILENT, so 11708 * that information related to faults is provided. 11709 * 11710 * To make the accurate accessment, the caller of 11711 * sd_send_scsi_USCSI_CMD should take the ownership and 11712 * get necessary information to print error messages. 11713 * 11714 * If we want to print necessary info of uscsi command, we need to 11715 * keep the uscsi_cmd and sd_uscsi_info till we can make the 11716 * assessment. We use sd_ssc_init to alloc necessary 11717 * structs for sending an uscsi command and we are also 11718 * responsible for free the memory by calling 11719 * sd_ssc_fini. 11720 * 11721 * The calling secquences will look like: 11722 * sd_ssc_init-> 11723 * 11724 * ... 11725 * 11726 * sd_send_scsi_USCSI_CMD-> 11727 * sd_ssc_send-> - - - sdintr 11728 * ... 11729 * 11730 * if we think the return value should be treated as a 11731 * failure, we make the accessment here and print out 11732 * necessary by retrieving uscsi_cmd and sd_uscsi_info' 11733 * 11734 * ... 11735 * 11736 * sd_ssc_fini 11737 * 11738 * 11739 * Arguments: un - pointer to driver soft state (unit) structure for this 11740 * target. 11741 * 11742 * Return code: sd_ssc_t - pointer to allocated sd_ssc_t struct, it contains 11743 * uscsi_cmd and sd_uscsi_info. 11744 * NULL - if can not alloc memory for sd_ssc_t struct 11745 * 11746 * Context: Kernel Thread. 11747 */ 11748 static sd_ssc_t * 11749 sd_ssc_init(struct sd_lun *un) 11750 { 11751 sd_ssc_t *ssc; 11752 struct uscsi_cmd *ucmdp; 11753 struct sd_uscsi_info *uip; 11754 11755 ASSERT(un != NULL); 11756 ASSERT(!mutex_owned(SD_MUTEX(un))); 11757 11758 /* 11759 * Allocate sd_ssc_t structure 11760 */ 11761 ssc = kmem_zalloc(sizeof (sd_ssc_t), KM_SLEEP); 11762 11763 /* 11764 * Allocate uscsi_cmd by calling scsi_uscsi_alloc common routine 11765 */ 11766 ucmdp = scsi_uscsi_alloc(); 11767 11768 /* 11769 * Allocate sd_uscsi_info structure 11770 */ 11771 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 11772 11773 ssc->ssc_uscsi_cmd = ucmdp; 11774 ssc->ssc_uscsi_info = uip; 11775 ssc->ssc_un = un; 11776 11777 return (ssc); 11778 } 11779 11780 /* 11781 * Function: sd_ssc_fini 11782 * 11783 * Description: To free sd_ssc_t and it's hanging off 11784 * 11785 * Arguments: ssc - struct pointer of sd_ssc_t. 11786 */ 11787 static void 11788 sd_ssc_fini(sd_ssc_t *ssc) 11789 { 11790 scsi_uscsi_free(ssc->ssc_uscsi_cmd); 11791 11792 if (ssc->ssc_uscsi_info != NULL) { 11793 kmem_free(ssc->ssc_uscsi_info, sizeof (struct sd_uscsi_info)); 11794 ssc->ssc_uscsi_info = NULL; 11795 } 11796 11797 kmem_free(ssc, sizeof (sd_ssc_t)); 11798 ssc = NULL; 11799 } 11800 11801 /* 11802 * Function: sd_ssc_send 11803 * 11804 * Description: Runs a USCSI command for user when called through sdioctl, 11805 * or for the driver. 11806 * 11807 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11808 * sd_uscsi_info in. 11809 * incmd - ptr to a valid uscsi_cmd struct 11810 * flag - bit flag, indicating open settings, 32/64 bit type 11811 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 11812 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 11813 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 11814 * to use the USCSI "direct" chain and bypass the normal 11815 * command waitq. 11816 * 11817 * Return Code: 0 - successful completion of the given command 11818 * EIO - scsi_uscsi_handle_command() failed 11819 * ENXIO - soft state not found for specified dev 11820 * ECANCELED - command cancelled due to low power 11821 * EINVAL 11822 * EFAULT - copyin/copyout error 11823 * return code of scsi_uscsi_handle_command(): 11824 * EIO 11825 * ENXIO 11826 * EACCES 11827 * 11828 * Context: Kernel Thread; 11829 * Waits for command to complete. Can sleep. 11830 */ 11831 static int 11832 sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, int flag, 11833 enum uio_seg dataspace, int path_flag) 11834 { 11835 struct sd_uscsi_info *uip; 11836 struct uscsi_cmd *uscmd; 11837 struct sd_lun *un; 11838 dev_t dev; 11839 11840 int format = 0; 11841 int rval; 11842 11843 ASSERT(ssc != NULL); 11844 un = ssc->ssc_un; 11845 ASSERT(un != NULL); 11846 uscmd = ssc->ssc_uscsi_cmd; 11847 ASSERT(uscmd != NULL); 11848 ASSERT(!mutex_owned(SD_MUTEX(un))); 11849 if (ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT) { 11850 /* 11851 * If enter here, it indicates that the previous uscsi 11852 * command has not been processed by sd_ssc_assessment. 11853 * This is violating our rules of FMA telemetry processing. 11854 * We should print out this message and the last undisposed 11855 * uscsi command. 11856 */ 11857 if (uscmd->uscsi_cdb != NULL) { 11858 SD_INFO(SD_LOG_SDTEST, un, 11859 "sd_ssc_send is missing the alternative " 11860 "sd_ssc_assessment when running command 0x%x.\n", 11861 uscmd->uscsi_cdb[0]); 11862 } 11863 /* 11864 * Set the ssc_flags to SSC_FLAGS_UNKNOWN, which should be 11865 * the initial status. 11866 */ 11867 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 11868 } 11869 11870 /* 11871 * We need to make sure sd_ssc_send will have sd_ssc_assessment 11872 * followed to avoid missing FMA telemetries. 11873 */ 11874 ssc->ssc_flags |= SSC_FLAGS_NEED_ASSESSMENT; 11875 11876 /* 11877 * if USCSI_PMFAILFAST is set and un is in low power, fail the 11878 * command immediately. 11879 */ 11880 mutex_enter(SD_MUTEX(un)); 11881 mutex_enter(&un->un_pm_mutex); 11882 if ((uscmd->uscsi_flags & USCSI_PMFAILFAST) && 11883 SD_DEVICE_IS_IN_LOW_POWER(un)) { 11884 SD_TRACE(SD_LOG_IO, un, "sd_ssc_send:" 11885 "un:0x%p is in low power\n", un); 11886 mutex_exit(&un->un_pm_mutex); 11887 mutex_exit(SD_MUTEX(un)); 11888 return (ECANCELED); 11889 } 11890 mutex_exit(&un->un_pm_mutex); 11891 mutex_exit(SD_MUTEX(un)); 11892 11893 #ifdef SDDEBUG 11894 switch (dataspace) { 11895 case UIO_USERSPACE: 11896 SD_TRACE(SD_LOG_IO, un, 11897 "sd_ssc_send: entry: un:0x%p UIO_USERSPACE\n", un); 11898 break; 11899 case UIO_SYSSPACE: 11900 SD_TRACE(SD_LOG_IO, un, 11901 "sd_ssc_send: entry: un:0x%p UIO_SYSSPACE\n", un); 11902 break; 11903 default: 11904 SD_TRACE(SD_LOG_IO, un, 11905 "sd_ssc_send: entry: un:0x%p UNEXPECTED SPACE\n", un); 11906 break; 11907 } 11908 #endif 11909 11910 rval = scsi_uscsi_copyin((intptr_t)incmd, flag, 11911 SD_ADDRESS(un), &uscmd); 11912 if (rval != 0) { 11913 SD_TRACE(SD_LOG_IO, un, "sd_sense_scsi_cmd: " 11914 "scsi_uscsi_alloc_and_copyin failed\n", un); 11915 return (rval); 11916 } 11917 11918 if ((uscmd->uscsi_cdb != NULL) && 11919 (uscmd->uscsi_cdb[0] == SCMD_FORMAT)) { 11920 mutex_enter(SD_MUTEX(un)); 11921 un->un_f_format_in_progress = TRUE; 11922 mutex_exit(SD_MUTEX(un)); 11923 format = 1; 11924 } 11925 11926 /* 11927 * Allocate an sd_uscsi_info struct and fill it with the info 11928 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 11929 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 11930 * since we allocate the buf here in this function, we do not 11931 * need to preserve the prior contents of b_private. 11932 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 11933 */ 11934 uip = ssc->ssc_uscsi_info; 11935 uip->ui_flags = path_flag; 11936 uip->ui_cmdp = uscmd; 11937 11938 /* 11939 * Commands sent with priority are intended for error recovery 11940 * situations, and do not have retries performed. 11941 */ 11942 if (path_flag == SD_PATH_DIRECT_PRIORITY) { 11943 uscmd->uscsi_flags |= USCSI_DIAGNOSE; 11944 } 11945 uscmd->uscsi_flags &= ~USCSI_NOINTR; 11946 11947 dev = SD_GET_DEV(un); 11948 rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd, 11949 sd_uscsi_strategy, NULL, uip); 11950 11951 /* 11952 * mark ssc_flags right after handle_cmd to make sure 11953 * the uscsi has been sent 11954 */ 11955 ssc->ssc_flags |= SSC_FLAGS_CMD_ISSUED; 11956 11957 #ifdef SDDEBUG 11958 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: " 11959 "uscsi_status: 0x%02x uscsi_resid:0x%x\n", 11960 uscmd->uscsi_status, uscmd->uscsi_resid); 11961 if (uscmd->uscsi_bufaddr != NULL) { 11962 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: " 11963 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n", 11964 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen); 11965 if (dataspace == UIO_SYSSPACE) { 11966 SD_DUMP_MEMORY(un, SD_LOG_IO, 11967 "data", (uchar_t *)uscmd->uscsi_bufaddr, 11968 uscmd->uscsi_buflen, SD_LOG_HEX); 11969 } 11970 } 11971 #endif 11972 11973 if (format == 1) { 11974 mutex_enter(SD_MUTEX(un)); 11975 un->un_f_format_in_progress = FALSE; 11976 mutex_exit(SD_MUTEX(un)); 11977 } 11978 11979 (void) scsi_uscsi_copyout((intptr_t)incmd, uscmd); 11980 11981 return (rval); 11982 } 11983 11984 /* 11985 * Function: sd_ssc_print 11986 * 11987 * Description: Print information available to the console. 11988 * 11989 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11990 * sd_uscsi_info in. 11991 * sd_severity - log level. 11992 * Context: Kernel thread or interrupt context. 11993 */ 11994 static void 11995 sd_ssc_print(sd_ssc_t *ssc, int sd_severity) 11996 { 11997 struct uscsi_cmd *ucmdp; 11998 struct scsi_device *devp; 11999 dev_info_t *devinfo; 12000 uchar_t *sensep; 12001 int senlen; 12002 union scsi_cdb *cdbp; 12003 uchar_t com; 12004 extern struct scsi_key_strings scsi_cmds[]; 12005 12006 ASSERT(ssc != NULL); 12007 ASSERT(ssc->ssc_un != NULL); 12008 12009 if (SD_FM_LOG(ssc->ssc_un) != SD_FM_LOG_EREPORT) 12010 return; 12011 ucmdp = ssc->ssc_uscsi_cmd; 12012 devp = SD_SCSI_DEVP(ssc->ssc_un); 12013 devinfo = SD_DEVINFO(ssc->ssc_un); 12014 ASSERT(ucmdp != NULL); 12015 ASSERT(devp != NULL); 12016 ASSERT(devinfo != NULL); 12017 sensep = (uint8_t *)ucmdp->uscsi_rqbuf; 12018 senlen = ucmdp->uscsi_rqlen - ucmdp->uscsi_rqresid; 12019 cdbp = (union scsi_cdb *)ucmdp->uscsi_cdb; 12020 12021 /* In certain case (like DOORLOCK), the cdb could be NULL. */ 12022 if (cdbp == NULL) 12023 return; 12024 /* We don't print log if no sense data available. */ 12025 if (senlen == 0) 12026 sensep = NULL; 12027 com = cdbp->scc_cmd; 12028 scsi_generic_errmsg(devp, sd_label, sd_severity, 0, 0, com, 12029 scsi_cmds, sensep, ssc->ssc_un->un_additional_codes, NULL); 12030 } 12031 12032 /* 12033 * Function: sd_ssc_assessment 12034 * 12035 * Description: We use this function to make an assessment at the point 12036 * where SD driver may encounter a potential error. 12037 * 12038 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 12039 * sd_uscsi_info in. 12040 * tp_assess - a hint of strategy for ereport posting. 12041 * Possible values of tp_assess include: 12042 * SD_FMT_IGNORE - we don't post any ereport because we're 12043 * sure that it is ok to ignore the underlying problems. 12044 * SD_FMT_IGNORE_COMPROMISE - we don't post any ereport for now 12045 * but it might be not correct to ignore the underlying hardware 12046 * error. 12047 * SD_FMT_STATUS_CHECK - we will post an ereport with the 12048 * payload driver-assessment of value "fail" or 12049 * "fatal"(depending on what information we have here). This 12050 * assessment value is usually set when SD driver think there 12051 * is a potential error occurred(Typically, when return value 12052 * of the SCSI command is EIO). 12053 * SD_FMT_STANDARD - we will post an ereport with the payload 12054 * driver-assessment of value "info". This assessment value is 12055 * set when the SCSI command returned successfully and with 12056 * sense data sent back. 12057 * 12058 * Context: Kernel thread. 12059 */ 12060 static void 12061 sd_ssc_assessment(sd_ssc_t *ssc, enum sd_type_assessment tp_assess) 12062 { 12063 int senlen = 0; 12064 struct uscsi_cmd *ucmdp = NULL; 12065 struct sd_lun *un; 12066 12067 ASSERT(ssc != NULL); 12068 un = ssc->ssc_un; 12069 ASSERT(un != NULL); 12070 ucmdp = ssc->ssc_uscsi_cmd; 12071 ASSERT(ucmdp != NULL); 12072 12073 if (ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT) { 12074 ssc->ssc_flags &= ~SSC_FLAGS_NEED_ASSESSMENT; 12075 } else { 12076 /* 12077 * If enter here, it indicates that we have a wrong 12078 * calling sequence of sd_ssc_send and sd_ssc_assessment, 12079 * both of which should be called in a pair in case of 12080 * loss of FMA telemetries. 12081 */ 12082 if (ucmdp->uscsi_cdb != NULL) { 12083 SD_INFO(SD_LOG_SDTEST, un, 12084 "sd_ssc_assessment is missing the " 12085 "alternative sd_ssc_send when running 0x%x, " 12086 "or there are superfluous sd_ssc_assessment for " 12087 "the same sd_ssc_send.\n", 12088 ucmdp->uscsi_cdb[0]); 12089 } 12090 /* 12091 * Set the ssc_flags to the initial value to avoid passing 12092 * down dirty flags to the following sd_ssc_send function. 12093 */ 12094 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12095 return; 12096 } 12097 12098 /* 12099 * Only handle an issued command which is waiting for assessment. 12100 * A command which is not issued will not have 12101 * SSC_FLAGS_INVALID_DATA set, so it'ok we just return here. 12102 */ 12103 if (!(ssc->ssc_flags & SSC_FLAGS_CMD_ISSUED)) { 12104 sd_ssc_print(ssc, SCSI_ERR_INFO); 12105 return; 12106 } else { 12107 /* 12108 * For an issued command, we should clear this flag in 12109 * order to make the sd_ssc_t structure be used off 12110 * multiple uscsi commands. 12111 */ 12112 ssc->ssc_flags &= ~SSC_FLAGS_CMD_ISSUED; 12113 } 12114 12115 /* 12116 * We will not deal with non-retryable(flag USCSI_DIAGNOSE set) 12117 * commands here. And we should clear the ssc_flags before return. 12118 */ 12119 if (ucmdp->uscsi_flags & USCSI_DIAGNOSE) { 12120 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12121 return; 12122 } 12123 12124 switch (tp_assess) { 12125 case SD_FMT_IGNORE: 12126 case SD_FMT_IGNORE_COMPROMISE: 12127 break; 12128 case SD_FMT_STATUS_CHECK: 12129 /* 12130 * For a failed command(including the succeeded command 12131 * with invalid data sent back). 12132 */ 12133 sd_ssc_post(ssc, SD_FM_DRV_FATAL); 12134 break; 12135 case SD_FMT_STANDARD: 12136 /* 12137 * Always for the succeeded commands probably with sense 12138 * data sent back. 12139 * Limitation: 12140 * We can only handle a succeeded command with sense 12141 * data sent back when auto-request-sense is enabled. 12142 */ 12143 senlen = ssc->ssc_uscsi_cmd->uscsi_rqlen - 12144 ssc->ssc_uscsi_cmd->uscsi_rqresid; 12145 if ((ssc->ssc_uscsi_info->ui_pkt_state & STATE_ARQ_DONE) && 12146 (un->un_f_arq_enabled == TRUE) && 12147 senlen > 0 && 12148 ssc->ssc_uscsi_cmd->uscsi_rqbuf != NULL) { 12149 sd_ssc_post(ssc, SD_FM_DRV_NOTICE); 12150 } 12151 break; 12152 default: 12153 /* 12154 * Should not have other type of assessment. 12155 */ 12156 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 12157 "sd_ssc_assessment got wrong " 12158 "sd_type_assessment %d.\n", tp_assess); 12159 break; 12160 } 12161 /* 12162 * Clear up the ssc_flags before return. 12163 */ 12164 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12165 } 12166 12167 /* 12168 * Function: sd_ssc_post 12169 * 12170 * Description: 1. read the driver property to get fm-scsi-log flag. 12171 * 2. print log if fm_log_capable is non-zero. 12172 * 3. call sd_ssc_ereport_post to post ereport if possible. 12173 * 12174 * Context: May be called from kernel thread or interrupt context. 12175 */ 12176 static void 12177 sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess) 12178 { 12179 struct sd_lun *un; 12180 int sd_severity; 12181 12182 ASSERT(ssc != NULL); 12183 un = ssc->ssc_un; 12184 ASSERT(un != NULL); 12185 12186 /* 12187 * We may enter here from sd_ssc_assessment(for USCSI command) or 12188 * by directly called from sdintr context. 12189 * We don't handle a non-disk drive(CD-ROM, removable media). 12190 * Clear the ssc_flags before return in case we've set 12191 * SSC_FLAGS_INVALID_XXX which should be skipped for a non-disk 12192 * driver. 12193 */ 12194 if (ISCD(un) || un->un_f_has_removable_media) { 12195 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12196 return; 12197 } 12198 12199 switch (sd_assess) { 12200 case SD_FM_DRV_FATAL: 12201 sd_severity = SCSI_ERR_FATAL; 12202 break; 12203 case SD_FM_DRV_RECOVERY: 12204 sd_severity = SCSI_ERR_RECOVERED; 12205 break; 12206 case SD_FM_DRV_RETRY: 12207 sd_severity = SCSI_ERR_RETRYABLE; 12208 break; 12209 case SD_FM_DRV_NOTICE: 12210 sd_severity = SCSI_ERR_INFO; 12211 break; 12212 default: 12213 sd_severity = SCSI_ERR_UNKNOWN; 12214 } 12215 /* print log */ 12216 sd_ssc_print(ssc, sd_severity); 12217 12218 /* always post ereport */ 12219 sd_ssc_ereport_post(ssc, sd_assess); 12220 } 12221 12222 /* 12223 * Function: sd_ssc_set_info 12224 * 12225 * Description: Mark ssc_flags and set ssc_info which would be the 12226 * payload of uderr ereport. This function will cause 12227 * sd_ssc_ereport_post to post uderr ereport only. 12228 * Besides, when ssc_flags == SSC_FLAGS_INVALID_DATA(USCSI), 12229 * the function will also call SD_ERROR or scsi_log for a 12230 * CDROM/removable-media/DDI_FM_NOT_CAPABLE device. 12231 * 12232 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 12233 * sd_uscsi_info in. 12234 * ssc_flags - indicate the sub-category of a uderr. 12235 * comp - this argument is meaningful only when 12236 * ssc_flags == SSC_FLAGS_INVALID_DATA, and its possible 12237 * values include: 12238 * > 0, SD_ERROR is used with comp as the driver logging 12239 * component; 12240 * = 0, scsi-log is used to log error telemetries; 12241 * < 0, no log available for this telemetry. 12242 * 12243 * Context: Kernel thread or interrupt context 12244 */ 12245 static void 12246 sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, uint_t comp, const char *fmt, ...) 12247 { 12248 va_list ap; 12249 12250 ASSERT(ssc != NULL); 12251 ASSERT(ssc->ssc_un != NULL); 12252 12253 ssc->ssc_flags |= ssc_flags; 12254 va_start(ap, fmt); 12255 (void) vsnprintf(ssc->ssc_info, sizeof (ssc->ssc_info), fmt, ap); 12256 va_end(ap); 12257 12258 /* 12259 * If SSC_FLAGS_INVALID_DATA is set, it should be a uscsi command 12260 * with invalid data sent back. For non-uscsi command, the 12261 * following code will be bypassed. 12262 */ 12263 if (ssc_flags & SSC_FLAGS_INVALID_DATA) { 12264 if (SD_FM_LOG(ssc->ssc_un) == SD_FM_LOG_NSUP) { 12265 /* 12266 * If the error belong to certain component and we 12267 * do not want it to show up on the console, we 12268 * will use SD_ERROR, otherwise scsi_log is 12269 * preferred. 12270 */ 12271 if (comp > 0) { 12272 SD_ERROR(comp, ssc->ssc_un, ssc->ssc_info); 12273 } else if (comp == 0) { 12274 scsi_log(SD_DEVINFO(ssc->ssc_un), sd_label, 12275 CE_WARN, ssc->ssc_info); 12276 } 12277 } 12278 } 12279 } 12280 12281 /* 12282 * Function: sd_buf_iodone 12283 * 12284 * Description: Frees the sd_xbuf & returns the buf to its originator. 12285 * 12286 * Context: May be called from interrupt context. 12287 */ 12288 /* ARGSUSED */ 12289 static void 12290 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp) 12291 { 12292 struct sd_xbuf *xp; 12293 12294 ASSERT(un != NULL); 12295 ASSERT(bp != NULL); 12296 ASSERT(!mutex_owned(SD_MUTEX(un))); 12297 12298 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n"); 12299 12300 xp = SD_GET_XBUF(bp); 12301 ASSERT(xp != NULL); 12302 12303 /* xbuf is gone after this */ 12304 if (ddi_xbuf_done(bp, un->un_xbuf_attr)) { 12305 mutex_enter(SD_MUTEX(un)); 12306 12307 /* 12308 * Grab time when the cmd completed. 12309 * This is used for determining if the system has been 12310 * idle long enough to make it idle to the PM framework. 12311 * This is for lowering the overhead, and therefore improving 12312 * performance per I/O operation. 12313 */ 12314 un->un_pm_idle_time = ddi_get_time(); 12315 12316 un->un_ncmds_in_driver--; 12317 ASSERT(un->un_ncmds_in_driver >= 0); 12318 SD_INFO(SD_LOG_IO, un, 12319 "sd_buf_iodone: un_ncmds_in_driver = %ld\n", 12320 un->un_ncmds_in_driver); 12321 12322 mutex_exit(SD_MUTEX(un)); 12323 } 12324 12325 biodone(bp); /* bp is gone after this */ 12326 12327 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n"); 12328 } 12329 12330 12331 /* 12332 * Function: sd_uscsi_iodone 12333 * 12334 * Description: Frees the sd_xbuf & returns the buf to its originator. 12335 * 12336 * Context: May be called from interrupt context. 12337 */ 12338 /* ARGSUSED */ 12339 static void 12340 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 12341 { 12342 struct sd_xbuf *xp; 12343 12344 ASSERT(un != NULL); 12345 ASSERT(bp != NULL); 12346 12347 xp = SD_GET_XBUF(bp); 12348 ASSERT(xp != NULL); 12349 ASSERT(!mutex_owned(SD_MUTEX(un))); 12350 12351 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n"); 12352 12353 bp->b_private = xp->xb_private; 12354 12355 mutex_enter(SD_MUTEX(un)); 12356 12357 /* 12358 * Grab time when the cmd completed. 12359 * This is used for determining if the system has been 12360 * idle long enough to make it idle to the PM framework. 12361 * This is for lowering the overhead, and therefore improving 12362 * performance per I/O operation. 12363 */ 12364 un->un_pm_idle_time = ddi_get_time(); 12365 12366 un->un_ncmds_in_driver--; 12367 ASSERT(un->un_ncmds_in_driver >= 0); 12368 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n", 12369 un->un_ncmds_in_driver); 12370 12371 mutex_exit(SD_MUTEX(un)); 12372 12373 if (((struct uscsi_cmd *)(xp->xb_pktinfo))->uscsi_rqlen > 12374 SENSE_LENGTH) { 12375 kmem_free(xp, sizeof (struct sd_xbuf) - SENSE_LENGTH + 12376 MAX_SENSE_LENGTH); 12377 } else { 12378 kmem_free(xp, sizeof (struct sd_xbuf)); 12379 } 12380 12381 biodone(bp); 12382 12383 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n"); 12384 } 12385 12386 12387 /* 12388 * Function: sd_mapblockaddr_iostart 12389 * 12390 * Description: Verify request lies within the partition limits for 12391 * the indicated minor device. Issue "overrun" buf if 12392 * request would exceed partition range. Converts 12393 * partition-relative block address to absolute. 12394 * 12395 * Upon exit of this function: 12396 * 1.I/O is aligned 12397 * xp->xb_blkno represents the absolute sector address 12398 * 2.I/O is misaligned 12399 * xp->xb_blkno represents the absolute logical block address 12400 * based on DEV_BSIZE. The logical block address will be 12401 * converted to physical sector address in sd_mapblocksize_\ 12402 * iostart. 12403 * 3.I/O is misaligned but is aligned in "overrun" buf 12404 * xp->xb_blkno represents the absolute logical block address 12405 * based on DEV_BSIZE. The logical block address will be 12406 * converted to physical sector address in sd_mapblocksize_\ 12407 * iostart. But no RMW will be issued in this case. 12408 * 12409 * Context: Can sleep 12410 * 12411 * Issues: This follows what the old code did, in terms of accessing 12412 * some of the partition info in the unit struct without holding 12413 * the mutext. This is a general issue, if the partition info 12414 * can be altered while IO is in progress... as soon as we send 12415 * a buf, its partitioning can be invalid before it gets to the 12416 * device. Probably the right fix is to move partitioning out 12417 * of the driver entirely. 12418 */ 12419 12420 static void 12421 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp) 12422 { 12423 diskaddr_t nblocks; /* #blocks in the given partition */ 12424 daddr_t blocknum; /* Block number specified by the buf */ 12425 size_t requested_nblocks; 12426 size_t available_nblocks; 12427 int partition; 12428 diskaddr_t partition_offset; 12429 struct sd_xbuf *xp; 12430 int secmask = 0, blknomask = 0; 12431 ushort_t is_aligned = TRUE; 12432 12433 ASSERT(un != NULL); 12434 ASSERT(bp != NULL); 12435 ASSERT(!mutex_owned(SD_MUTEX(un))); 12436 12437 SD_TRACE(SD_LOG_IO_PARTITION, un, 12438 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp); 12439 12440 xp = SD_GET_XBUF(bp); 12441 ASSERT(xp != NULL); 12442 12443 /* 12444 * If the geometry is not indicated as valid, attempt to access 12445 * the unit & verify the geometry/label. This can be the case for 12446 * removable-media devices, of if the device was opened in 12447 * NDELAY/NONBLOCK mode. 12448 */ 12449 partition = SDPART(bp->b_edev); 12450 12451 if (!SD_IS_VALID_LABEL(un)) { 12452 sd_ssc_t *ssc; 12453 /* 12454 * Initialize sd_ssc_t for internal uscsi commands 12455 * In case of potential porformance issue, we need 12456 * to alloc memory only if there is invalid label 12457 */ 12458 ssc = sd_ssc_init(un); 12459 12460 if (sd_ready_and_valid(ssc, partition) != SD_READY_VALID) { 12461 /* 12462 * For removable devices it is possible to start an 12463 * I/O without a media by opening the device in nodelay 12464 * mode. Also for writable CDs there can be many 12465 * scenarios where there is no geometry yet but volume 12466 * manager is trying to issue a read() just because 12467 * it can see TOC on the CD. So do not print a message 12468 * for removables. 12469 */ 12470 if (!un->un_f_has_removable_media) { 12471 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12472 "i/o to invalid geometry\n"); 12473 } 12474 bioerror(bp, EIO); 12475 bp->b_resid = bp->b_bcount; 12476 SD_BEGIN_IODONE(index, un, bp); 12477 12478 sd_ssc_fini(ssc); 12479 return; 12480 } 12481 sd_ssc_fini(ssc); 12482 } 12483 12484 nblocks = 0; 12485 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 12486 &nblocks, &partition_offset, NULL, NULL, (void *)SD_PATH_DIRECT); 12487 12488 blknomask = (un->un_tgt_blocksize / DEV_BSIZE) - 1; 12489 secmask = un->un_tgt_blocksize - 1; 12490 12491 if ((bp->b_lblkno & (blknomask)) || (bp->b_bcount & (secmask))) { 12492 is_aligned = FALSE; 12493 } 12494 12495 if (!(NOT_DEVBSIZE(un))) { 12496 /* 12497 * If I/O is aligned, no need to involve RMW(Read Modify Write) 12498 * Convert the logical block number to target's physical sector 12499 * number. 12500 */ 12501 if (is_aligned) { 12502 xp->xb_blkno = SD_SYS2TGTBLOCK(un, xp->xb_blkno); 12503 } else { 12504 switch (un->un_f_rmw_type) { 12505 case SD_RMW_TYPE_RETURN_ERROR: 12506 bp->b_flags |= B_ERROR; 12507 goto error_exit; 12508 12509 case SD_RMW_TYPE_DEFAULT: 12510 mutex_enter(SD_MUTEX(un)); 12511 if (un->un_rmw_msg_timeid == NULL) { 12512 scsi_log(SD_DEVINFO(un), sd_label, 12513 CE_WARN, "I/O request is not " 12514 "aligned with %d disk sector size. " 12515 "It is handled through Read Modify " 12516 "Write but the performance is " 12517 "very low.\n", 12518 un->un_tgt_blocksize); 12519 un->un_rmw_msg_timeid = 12520 timeout(sd_rmw_msg_print_handler, 12521 un, SD_RMW_MSG_PRINT_TIMEOUT); 12522 } else { 12523 un->un_rmw_incre_count ++; 12524 } 12525 mutex_exit(SD_MUTEX(un)); 12526 break; 12527 12528 case SD_RMW_TYPE_NO_WARNING: 12529 default: 12530 break; 12531 } 12532 12533 nblocks = SD_TGT2SYSBLOCK(un, nblocks); 12534 partition_offset = SD_TGT2SYSBLOCK(un, 12535 partition_offset); 12536 } 12537 } 12538 12539 /* 12540 * blocknum is the starting block number of the request. At this 12541 * point it is still relative to the start of the minor device. 12542 */ 12543 blocknum = xp->xb_blkno; 12544 12545 /* 12546 * Legacy: If the starting block number is one past the last block 12547 * in the partition, do not set B_ERROR in the buf. 12548 */ 12549 if (blocknum == nblocks) { 12550 goto error_exit; 12551 } 12552 12553 /* 12554 * Confirm that the first block of the request lies within the 12555 * partition limits. Also the requested number of bytes must be 12556 * a multiple of the system block size. 12557 */ 12558 if ((blocknum < 0) || (blocknum >= nblocks) || 12559 ((bp->b_bcount & (DEV_BSIZE - 1)) != 0)) { 12560 bp->b_flags |= B_ERROR; 12561 goto error_exit; 12562 } 12563 12564 /* 12565 * If the requsted # blocks exceeds the available # blocks, that 12566 * is an overrun of the partition. 12567 */ 12568 if ((!NOT_DEVBSIZE(un)) && is_aligned) { 12569 requested_nblocks = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 12570 } else { 12571 requested_nblocks = SD_BYTES2SYSBLOCKS(bp->b_bcount); 12572 } 12573 12574 available_nblocks = (size_t)(nblocks - blocknum); 12575 ASSERT(nblocks >= blocknum); 12576 12577 if (requested_nblocks > available_nblocks) { 12578 size_t resid; 12579 12580 /* 12581 * Allocate an "overrun" buf to allow the request to proceed 12582 * for the amount of space available in the partition. The 12583 * amount not transferred will be added into the b_resid 12584 * when the operation is complete. The overrun buf 12585 * replaces the original buf here, and the original buf 12586 * is saved inside the overrun buf, for later use. 12587 */ 12588 if ((!NOT_DEVBSIZE(un)) && is_aligned) { 12589 resid = SD_TGTBLOCKS2BYTES(un, 12590 (offset_t)(requested_nblocks - available_nblocks)); 12591 } else { 12592 resid = SD_SYSBLOCKS2BYTES( 12593 (offset_t)(requested_nblocks - available_nblocks)); 12594 } 12595 12596 size_t count = bp->b_bcount - resid; 12597 /* 12598 * Note: count is an unsigned entity thus it'll NEVER 12599 * be less than 0 so ASSERT the original values are 12600 * correct. 12601 */ 12602 ASSERT(bp->b_bcount >= resid); 12603 12604 bp = sd_bioclone_alloc(bp, count, blocknum, 12605 (int (*)(struct buf *)) sd_mapblockaddr_iodone); 12606 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */ 12607 ASSERT(xp != NULL); 12608 } 12609 12610 /* At this point there should be no residual for this buf. */ 12611 ASSERT(bp->b_resid == 0); 12612 12613 /* Convert the block number to an absolute address. */ 12614 xp->xb_blkno += partition_offset; 12615 12616 SD_NEXT_IOSTART(index, un, bp); 12617 12618 SD_TRACE(SD_LOG_IO_PARTITION, un, 12619 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp); 12620 12621 return; 12622 12623 error_exit: 12624 bp->b_resid = bp->b_bcount; 12625 SD_BEGIN_IODONE(index, un, bp); 12626 SD_TRACE(SD_LOG_IO_PARTITION, un, 12627 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp); 12628 } 12629 12630 12631 /* 12632 * Function: sd_mapblockaddr_iodone 12633 * 12634 * Description: Completion-side processing for partition management. 12635 * 12636 * Context: May be called under interrupt context 12637 */ 12638 12639 static void 12640 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp) 12641 { 12642 /* int partition; */ /* Not used, see below. */ 12643 ASSERT(un != NULL); 12644 ASSERT(bp != NULL); 12645 ASSERT(!mutex_owned(SD_MUTEX(un))); 12646 12647 SD_TRACE(SD_LOG_IO_PARTITION, un, 12648 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp); 12649 12650 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) { 12651 /* 12652 * We have an "overrun" buf to deal with... 12653 */ 12654 struct sd_xbuf *xp; 12655 struct buf *obp; /* ptr to the original buf */ 12656 12657 xp = SD_GET_XBUF(bp); 12658 ASSERT(xp != NULL); 12659 12660 /* Retrieve the pointer to the original buf */ 12661 obp = (struct buf *)xp->xb_private; 12662 ASSERT(obp != NULL); 12663 12664 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid); 12665 bioerror(obp, bp->b_error); 12666 12667 sd_bioclone_free(bp); 12668 12669 /* 12670 * Get back the original buf. 12671 * Note that since the restoration of xb_blkno below 12672 * was removed, the sd_xbuf is not needed. 12673 */ 12674 bp = obp; 12675 /* 12676 * xp = SD_GET_XBUF(bp); 12677 * ASSERT(xp != NULL); 12678 */ 12679 } 12680 12681 /* 12682 * Convert sd->xb_blkno back to a minor-device relative value. 12683 * Note: this has been commented out, as it is not needed in the 12684 * current implementation of the driver (ie, since this function 12685 * is at the top of the layering chains, so the info will be 12686 * discarded) and it is in the "hot" IO path. 12687 * 12688 * partition = getminor(bp->b_edev) & SDPART_MASK; 12689 * xp->xb_blkno -= un->un_offset[partition]; 12690 */ 12691 12692 SD_NEXT_IODONE(index, un, bp); 12693 12694 SD_TRACE(SD_LOG_IO_PARTITION, un, 12695 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp); 12696 } 12697 12698 12699 /* 12700 * Function: sd_mapblocksize_iostart 12701 * 12702 * Description: Convert between system block size (un->un_sys_blocksize) 12703 * and target block size (un->un_tgt_blocksize). 12704 * 12705 * Context: Can sleep to allocate resources. 12706 * 12707 * Assumptions: A higher layer has already performed any partition validation, 12708 * and converted the xp->xb_blkno to an absolute value relative 12709 * to the start of the device. 12710 * 12711 * It is also assumed that the higher layer has implemented 12712 * an "overrun" mechanism for the case where the request would 12713 * read/write beyond the end of a partition. In this case we 12714 * assume (and ASSERT) that bp->b_resid == 0. 12715 * 12716 * Note: The implementation for this routine assumes the target 12717 * block size remains constant between allocation and transport. 12718 */ 12719 12720 static void 12721 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp) 12722 { 12723 struct sd_mapblocksize_info *bsp; 12724 struct sd_xbuf *xp; 12725 offset_t first_byte; 12726 daddr_t start_block, end_block; 12727 daddr_t request_bytes; 12728 ushort_t is_aligned = FALSE; 12729 12730 ASSERT(un != NULL); 12731 ASSERT(bp != NULL); 12732 ASSERT(!mutex_owned(SD_MUTEX(un))); 12733 ASSERT(bp->b_resid == 0); 12734 12735 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12736 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp); 12737 12738 /* 12739 * For a non-writable CD, a write request is an error 12740 */ 12741 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) && 12742 (un->un_f_mmc_writable_media == FALSE)) { 12743 bioerror(bp, EIO); 12744 bp->b_resid = bp->b_bcount; 12745 SD_BEGIN_IODONE(index, un, bp); 12746 return; 12747 } 12748 12749 /* 12750 * We do not need a shadow buf if the device is using 12751 * un->un_sys_blocksize as its block size or if bcount == 0. 12752 * In this case there is no layer-private data block allocated. 12753 */ 12754 if ((un->un_tgt_blocksize == DEV_BSIZE) || 12755 (bp->b_bcount == 0)) { 12756 goto done; 12757 } 12758 12759 #if defined(__i386) || defined(__amd64) 12760 /* We do not support non-block-aligned transfers for ROD devices */ 12761 ASSERT(!ISROD(un)); 12762 #endif 12763 12764 xp = SD_GET_XBUF(bp); 12765 ASSERT(xp != NULL); 12766 12767 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12768 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n", 12769 un->un_tgt_blocksize, DEV_BSIZE); 12770 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12771 "request start block:0x%x\n", xp->xb_blkno); 12772 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12773 "request len:0x%x\n", bp->b_bcount); 12774 12775 /* 12776 * Allocate the layer-private data area for the mapblocksize layer. 12777 * Layers are allowed to use the xp_private member of the sd_xbuf 12778 * struct to store the pointer to their layer-private data block, but 12779 * each layer also has the responsibility of restoring the prior 12780 * contents of xb_private before returning the buf/xbuf to the 12781 * higher layer that sent it. 12782 * 12783 * Here we save the prior contents of xp->xb_private into the 12784 * bsp->mbs_oprivate field of our layer-private data area. This value 12785 * is restored by sd_mapblocksize_iodone() just prior to freeing up 12786 * the layer-private area and returning the buf/xbuf to the layer 12787 * that sent it. 12788 * 12789 * Note that here we use kmem_zalloc for the allocation as there are 12790 * parts of the mapblocksize code that expect certain fields to be 12791 * zero unless explicitly set to a required value. 12792 */ 12793 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 12794 bsp->mbs_oprivate = xp->xb_private; 12795 xp->xb_private = bsp; 12796 12797 /* 12798 * This treats the data on the disk (target) as an array of bytes. 12799 * first_byte is the byte offset, from the beginning of the device, 12800 * to the location of the request. This is converted from a 12801 * un->un_sys_blocksize block address to a byte offset, and then back 12802 * to a block address based upon a un->un_tgt_blocksize block size. 12803 * 12804 * xp->xb_blkno should be absolute upon entry into this function, 12805 * but, but it is based upon partitions that use the "system" 12806 * block size. It must be adjusted to reflect the block size of 12807 * the target. 12808 * 12809 * Note that end_block is actually the block that follows the last 12810 * block of the request, but that's what is needed for the computation. 12811 */ 12812 first_byte = SD_SYSBLOCKS2BYTES((offset_t)xp->xb_blkno); 12813 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize; 12814 end_block = (first_byte + bp->b_bcount + un->un_tgt_blocksize - 1) / 12815 un->un_tgt_blocksize; 12816 12817 /* request_bytes is rounded up to a multiple of the target block size */ 12818 request_bytes = (end_block - start_block) * un->un_tgt_blocksize; 12819 12820 /* 12821 * See if the starting address of the request and the request 12822 * length are aligned on a un->un_tgt_blocksize boundary. If aligned 12823 * then we do not need to allocate a shadow buf to handle the request. 12824 */ 12825 if (((first_byte % un->un_tgt_blocksize) == 0) && 12826 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) { 12827 is_aligned = TRUE; 12828 } 12829 12830 if ((bp->b_flags & B_READ) == 0) { 12831 /* 12832 * Lock the range for a write operation. An aligned request is 12833 * considered a simple write; otherwise the request must be a 12834 * read-modify-write. 12835 */ 12836 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1, 12837 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW); 12838 } 12839 12840 /* 12841 * Alloc a shadow buf if the request is not aligned. Also, this is 12842 * where the READ command is generated for a read-modify-write. (The 12843 * write phase is deferred until after the read completes.) 12844 */ 12845 if (is_aligned == FALSE) { 12846 12847 struct sd_mapblocksize_info *shadow_bsp; 12848 struct sd_xbuf *shadow_xp; 12849 struct buf *shadow_bp; 12850 12851 /* 12852 * Allocate the shadow buf and it associated xbuf. Note that 12853 * after this call the xb_blkno value in both the original 12854 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the 12855 * same: absolute relative to the start of the device, and 12856 * adjusted for the target block size. The b_blkno in the 12857 * shadow buf will also be set to this value. We should never 12858 * change b_blkno in the original bp however. 12859 * 12860 * Note also that the shadow buf will always need to be a 12861 * READ command, regardless of whether the incoming command 12862 * is a READ or a WRITE. 12863 */ 12864 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ, 12865 xp->xb_blkno, 12866 (int (*)(struct buf *)) sd_mapblocksize_iodone); 12867 12868 shadow_xp = SD_GET_XBUF(shadow_bp); 12869 12870 /* 12871 * Allocate the layer-private data for the shadow buf. 12872 * (No need to preserve xb_private in the shadow xbuf.) 12873 */ 12874 shadow_xp->xb_private = shadow_bsp = 12875 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 12876 12877 /* 12878 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone 12879 * to figure out where the start of the user data is (based upon 12880 * the system block size) in the data returned by the READ 12881 * command (which will be based upon the target blocksize). Note 12882 * that this is only really used if the request is unaligned. 12883 */ 12884 bsp->mbs_copy_offset = (ssize_t)(first_byte - 12885 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize)); 12886 ASSERT((bsp->mbs_copy_offset >= 0) && 12887 (bsp->mbs_copy_offset < un->un_tgt_blocksize)); 12888 12889 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset; 12890 12891 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index; 12892 12893 /* Transfer the wmap (if any) to the shadow buf */ 12894 shadow_bsp->mbs_wmp = bsp->mbs_wmp; 12895 bsp->mbs_wmp = NULL; 12896 12897 /* 12898 * The shadow buf goes on from here in place of the 12899 * original buf. 12900 */ 12901 shadow_bsp->mbs_orig_bp = bp; 12902 bp = shadow_bp; 12903 } 12904 12905 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12906 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno); 12907 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12908 "sd_mapblocksize_iostart: tgt request len:0x%x\n", 12909 request_bytes); 12910 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12911 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp); 12912 12913 done: 12914 SD_NEXT_IOSTART(index, un, bp); 12915 12916 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12917 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp); 12918 } 12919 12920 12921 /* 12922 * Function: sd_mapblocksize_iodone 12923 * 12924 * Description: Completion side processing for block-size mapping. 12925 * 12926 * Context: May be called under interrupt context 12927 */ 12928 12929 static void 12930 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp) 12931 { 12932 struct sd_mapblocksize_info *bsp; 12933 struct sd_xbuf *xp; 12934 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */ 12935 struct buf *orig_bp; /* ptr to the original buf */ 12936 offset_t shadow_end; 12937 offset_t request_end; 12938 offset_t shadow_start; 12939 ssize_t copy_offset; 12940 size_t copy_length; 12941 size_t shortfall; 12942 uint_t is_write; /* TRUE if this bp is a WRITE */ 12943 uint_t has_wmap; /* TRUE is this bp has a wmap */ 12944 12945 ASSERT(un != NULL); 12946 ASSERT(bp != NULL); 12947 12948 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12949 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp); 12950 12951 /* 12952 * There is no shadow buf or layer-private data if the target is 12953 * using un->un_sys_blocksize as its block size or if bcount == 0. 12954 */ 12955 if ((un->un_tgt_blocksize == DEV_BSIZE) || 12956 (bp->b_bcount == 0)) { 12957 goto exit; 12958 } 12959 12960 xp = SD_GET_XBUF(bp); 12961 ASSERT(xp != NULL); 12962 12963 /* Retrieve the pointer to the layer-private data area from the xbuf. */ 12964 bsp = xp->xb_private; 12965 12966 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE; 12967 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE; 12968 12969 if (is_write) { 12970 /* 12971 * For a WRITE request we must free up the block range that 12972 * we have locked up. This holds regardless of whether this is 12973 * an aligned write request or a read-modify-write request. 12974 */ 12975 sd_range_unlock(un, bsp->mbs_wmp); 12976 bsp->mbs_wmp = NULL; 12977 } 12978 12979 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) { 12980 /* 12981 * An aligned read or write command will have no shadow buf; 12982 * there is not much else to do with it. 12983 */ 12984 goto done; 12985 } 12986 12987 orig_bp = bsp->mbs_orig_bp; 12988 ASSERT(orig_bp != NULL); 12989 orig_xp = SD_GET_XBUF(orig_bp); 12990 ASSERT(orig_xp != NULL); 12991 ASSERT(!mutex_owned(SD_MUTEX(un))); 12992 12993 if (!is_write && has_wmap) { 12994 /* 12995 * A READ with a wmap means this is the READ phase of a 12996 * read-modify-write. If an error occurred on the READ then 12997 * we do not proceed with the WRITE phase or copy any data. 12998 * Just release the write maps and return with an error. 12999 */ 13000 if ((bp->b_resid != 0) || (bp->b_error != 0)) { 13001 orig_bp->b_resid = orig_bp->b_bcount; 13002 bioerror(orig_bp, bp->b_error); 13003 sd_range_unlock(un, bsp->mbs_wmp); 13004 goto freebuf_done; 13005 } 13006 } 13007 13008 /* 13009 * Here is where we set up to copy the data from the shadow buf 13010 * into the space associated with the original buf. 13011 * 13012 * To deal with the conversion between block sizes, these 13013 * computations treat the data as an array of bytes, with the 13014 * first byte (byte 0) corresponding to the first byte in the 13015 * first block on the disk. 13016 */ 13017 13018 /* 13019 * shadow_start and shadow_len indicate the location and size of 13020 * the data returned with the shadow IO request. 13021 */ 13022 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 13023 shadow_end = shadow_start + bp->b_bcount - bp->b_resid; 13024 13025 /* 13026 * copy_offset gives the offset (in bytes) from the start of the first 13027 * block of the READ request to the beginning of the data. We retrieve 13028 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved 13029 * there by sd_mapblockize_iostart(). copy_length gives the amount of 13030 * data to be copied (in bytes). 13031 */ 13032 copy_offset = bsp->mbs_copy_offset; 13033 ASSERT((copy_offset >= 0) && (copy_offset < un->un_tgt_blocksize)); 13034 copy_length = orig_bp->b_bcount; 13035 request_end = shadow_start + copy_offset + orig_bp->b_bcount; 13036 13037 /* 13038 * Set up the resid and error fields of orig_bp as appropriate. 13039 */ 13040 if (shadow_end >= request_end) { 13041 /* We got all the requested data; set resid to zero */ 13042 orig_bp->b_resid = 0; 13043 } else { 13044 /* 13045 * We failed to get enough data to fully satisfy the original 13046 * request. Just copy back whatever data we got and set 13047 * up the residual and error code as required. 13048 * 13049 * 'shortfall' is the amount by which the data received with the 13050 * shadow buf has "fallen short" of the requested amount. 13051 */ 13052 shortfall = (size_t)(request_end - shadow_end); 13053 13054 if (shortfall > orig_bp->b_bcount) { 13055 /* 13056 * We did not get enough data to even partially 13057 * fulfill the original request. The residual is 13058 * equal to the amount requested. 13059 */ 13060 orig_bp->b_resid = orig_bp->b_bcount; 13061 } else { 13062 /* 13063 * We did not get all the data that we requested 13064 * from the device, but we will try to return what 13065 * portion we did get. 13066 */ 13067 orig_bp->b_resid = shortfall; 13068 } 13069 ASSERT(copy_length >= orig_bp->b_resid); 13070 copy_length -= orig_bp->b_resid; 13071 } 13072 13073 /* Propagate the error code from the shadow buf to the original buf */ 13074 bioerror(orig_bp, bp->b_error); 13075 13076 if (is_write) { 13077 goto freebuf_done; /* No data copying for a WRITE */ 13078 } 13079 13080 if (has_wmap) { 13081 /* 13082 * This is a READ command from the READ phase of a 13083 * read-modify-write request. We have to copy the data given 13084 * by the user OVER the data returned by the READ command, 13085 * then convert the command from a READ to a WRITE and send 13086 * it back to the target. 13087 */ 13088 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset, 13089 copy_length); 13090 13091 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */ 13092 13093 /* 13094 * Dispatch the WRITE command to the taskq thread, which 13095 * will in turn send the command to the target. When the 13096 * WRITE command completes, we (sd_mapblocksize_iodone()) 13097 * will get called again as part of the iodone chain 13098 * processing for it. Note that we will still be dealing 13099 * with the shadow buf at that point. 13100 */ 13101 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp, 13102 KM_NOSLEEP) != 0) { 13103 /* 13104 * Dispatch was successful so we are done. Return 13105 * without going any higher up the iodone chain. Do 13106 * not free up any layer-private data until after the 13107 * WRITE completes. 13108 */ 13109 return; 13110 } 13111 13112 /* 13113 * Dispatch of the WRITE command failed; set up the error 13114 * condition and send this IO back up the iodone chain. 13115 */ 13116 bioerror(orig_bp, EIO); 13117 orig_bp->b_resid = orig_bp->b_bcount; 13118 13119 } else { 13120 /* 13121 * This is a regular READ request (ie, not a RMW). Copy the 13122 * data from the shadow buf into the original buf. The 13123 * copy_offset compensates for any "misalignment" between the 13124 * shadow buf (with its un->un_tgt_blocksize blocks) and the 13125 * original buf (with its un->un_sys_blocksize blocks). 13126 */ 13127 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr, 13128 copy_length); 13129 } 13130 13131 freebuf_done: 13132 13133 /* 13134 * At this point we still have both the shadow buf AND the original 13135 * buf to deal with, as well as the layer-private data area in each. 13136 * Local variables are as follows: 13137 * 13138 * bp -- points to shadow buf 13139 * xp -- points to xbuf of shadow buf 13140 * bsp -- points to layer-private data area of shadow buf 13141 * orig_bp -- points to original buf 13142 * 13143 * First free the shadow buf and its associated xbuf, then free the 13144 * layer-private data area from the shadow buf. There is no need to 13145 * restore xb_private in the shadow xbuf. 13146 */ 13147 sd_shadow_buf_free(bp); 13148 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 13149 13150 /* 13151 * Now update the local variables to point to the original buf, xbuf, 13152 * and layer-private area. 13153 */ 13154 bp = orig_bp; 13155 xp = SD_GET_XBUF(bp); 13156 ASSERT(xp != NULL); 13157 ASSERT(xp == orig_xp); 13158 bsp = xp->xb_private; 13159 ASSERT(bsp != NULL); 13160 13161 done: 13162 /* 13163 * Restore xb_private to whatever it was set to by the next higher 13164 * layer in the chain, then free the layer-private data area. 13165 */ 13166 xp->xb_private = bsp->mbs_oprivate; 13167 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 13168 13169 exit: 13170 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp), 13171 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp); 13172 13173 SD_NEXT_IODONE(index, un, bp); 13174 } 13175 13176 13177 /* 13178 * Function: sd_checksum_iostart 13179 * 13180 * Description: A stub function for a layer that's currently not used. 13181 * For now just a placeholder. 13182 * 13183 * Context: Kernel thread context 13184 */ 13185 13186 static void 13187 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp) 13188 { 13189 ASSERT(un != NULL); 13190 ASSERT(bp != NULL); 13191 ASSERT(!mutex_owned(SD_MUTEX(un))); 13192 SD_NEXT_IOSTART(index, un, bp); 13193 } 13194 13195 13196 /* 13197 * Function: sd_checksum_iodone 13198 * 13199 * Description: A stub function for a layer that's currently not used. 13200 * For now just a placeholder. 13201 * 13202 * Context: May be called under interrupt context 13203 */ 13204 13205 static void 13206 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp) 13207 { 13208 ASSERT(un != NULL); 13209 ASSERT(bp != NULL); 13210 ASSERT(!mutex_owned(SD_MUTEX(un))); 13211 SD_NEXT_IODONE(index, un, bp); 13212 } 13213 13214 13215 /* 13216 * Function: sd_checksum_uscsi_iostart 13217 * 13218 * Description: A stub function for a layer that's currently not used. 13219 * For now just a placeholder. 13220 * 13221 * Context: Kernel thread context 13222 */ 13223 13224 static void 13225 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp) 13226 { 13227 ASSERT(un != NULL); 13228 ASSERT(bp != NULL); 13229 ASSERT(!mutex_owned(SD_MUTEX(un))); 13230 SD_NEXT_IOSTART(index, un, bp); 13231 } 13232 13233 13234 /* 13235 * Function: sd_checksum_uscsi_iodone 13236 * 13237 * Description: A stub function for a layer that's currently not used. 13238 * For now just a placeholder. 13239 * 13240 * Context: May be called under interrupt context 13241 */ 13242 13243 static void 13244 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 13245 { 13246 ASSERT(un != NULL); 13247 ASSERT(bp != NULL); 13248 ASSERT(!mutex_owned(SD_MUTEX(un))); 13249 SD_NEXT_IODONE(index, un, bp); 13250 } 13251 13252 13253 /* 13254 * Function: sd_pm_iostart 13255 * 13256 * Description: iostart-side routine for Power mangement. 13257 * 13258 * Context: Kernel thread context 13259 */ 13260 13261 static void 13262 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp) 13263 { 13264 ASSERT(un != NULL); 13265 ASSERT(bp != NULL); 13266 ASSERT(!mutex_owned(SD_MUTEX(un))); 13267 ASSERT(!mutex_owned(&un->un_pm_mutex)); 13268 13269 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n"); 13270 13271 if (sd_pm_entry(un) != DDI_SUCCESS) { 13272 /* 13273 * Set up to return the failed buf back up the 'iodone' 13274 * side of the calling chain. 13275 */ 13276 bioerror(bp, EIO); 13277 bp->b_resid = bp->b_bcount; 13278 13279 SD_BEGIN_IODONE(index, un, bp); 13280 13281 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 13282 return; 13283 } 13284 13285 SD_NEXT_IOSTART(index, un, bp); 13286 13287 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 13288 } 13289 13290 13291 /* 13292 * Function: sd_pm_iodone 13293 * 13294 * Description: iodone-side routine for power mangement. 13295 * 13296 * Context: may be called from interrupt context 13297 */ 13298 13299 static void 13300 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp) 13301 { 13302 ASSERT(un != NULL); 13303 ASSERT(bp != NULL); 13304 ASSERT(!mutex_owned(&un->un_pm_mutex)); 13305 13306 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n"); 13307 13308 /* 13309 * After attach the following flag is only read, so don't 13310 * take the penalty of acquiring a mutex for it. 13311 */ 13312 if (un->un_f_pm_is_enabled == TRUE) { 13313 sd_pm_exit(un); 13314 } 13315 13316 SD_NEXT_IODONE(index, un, bp); 13317 13318 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n"); 13319 } 13320 13321 13322 /* 13323 * Function: sd_core_iostart 13324 * 13325 * Description: Primary driver function for enqueuing buf(9S) structs from 13326 * the system and initiating IO to the target device 13327 * 13328 * Context: Kernel thread context. Can sleep. 13329 * 13330 * Assumptions: - The given xp->xb_blkno is absolute 13331 * (ie, relative to the start of the device). 13332 * - The IO is to be done using the native blocksize of 13333 * the device, as specified in un->un_tgt_blocksize. 13334 */ 13335 /* ARGSUSED */ 13336 static void 13337 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp) 13338 { 13339 struct sd_xbuf *xp; 13340 13341 ASSERT(un != NULL); 13342 ASSERT(bp != NULL); 13343 ASSERT(!mutex_owned(SD_MUTEX(un))); 13344 ASSERT(bp->b_resid == 0); 13345 13346 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp); 13347 13348 xp = SD_GET_XBUF(bp); 13349 ASSERT(xp != NULL); 13350 13351 mutex_enter(SD_MUTEX(un)); 13352 13353 /* 13354 * If we are currently in the failfast state, fail any new IO 13355 * that has B_FAILFAST set, then return. 13356 */ 13357 if ((bp->b_flags & B_FAILFAST) && 13358 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) { 13359 mutex_exit(SD_MUTEX(un)); 13360 bioerror(bp, EIO); 13361 bp->b_resid = bp->b_bcount; 13362 SD_BEGIN_IODONE(index, un, bp); 13363 return; 13364 } 13365 13366 if (SD_IS_DIRECT_PRIORITY(xp)) { 13367 /* 13368 * Priority command -- transport it immediately. 13369 * 13370 * Note: We may want to assert that USCSI_DIAGNOSE is set, 13371 * because all direct priority commands should be associated 13372 * with error recovery actions which we don't want to retry. 13373 */ 13374 sd_start_cmds(un, bp); 13375 } else { 13376 /* 13377 * Normal command -- add it to the wait queue, then start 13378 * transporting commands from the wait queue. 13379 */ 13380 sd_add_buf_to_waitq(un, bp); 13381 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 13382 sd_start_cmds(un, NULL); 13383 } 13384 13385 mutex_exit(SD_MUTEX(un)); 13386 13387 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp); 13388 } 13389 13390 13391 /* 13392 * Function: sd_init_cdb_limits 13393 * 13394 * Description: This is to handle scsi_pkt initialization differences 13395 * between the driver platforms. 13396 * 13397 * Legacy behaviors: 13398 * 13399 * If the block number or the sector count exceeds the 13400 * capabilities of a Group 0 command, shift over to a 13401 * Group 1 command. We don't blindly use Group 1 13402 * commands because a) some drives (CDC Wren IVs) get a 13403 * bit confused, and b) there is probably a fair amount 13404 * of speed difference for a target to receive and decode 13405 * a 10 byte command instead of a 6 byte command. 13406 * 13407 * The xfer time difference of 6 vs 10 byte CDBs is 13408 * still significant so this code is still worthwhile. 13409 * 10 byte CDBs are very inefficient with the fas HBA driver 13410 * and older disks. Each CDB byte took 1 usec with some 13411 * popular disks. 13412 * 13413 * Context: Must be called at attach time 13414 */ 13415 13416 static void 13417 sd_init_cdb_limits(struct sd_lun *un) 13418 { 13419 int hba_cdb_limit; 13420 13421 /* 13422 * Use CDB_GROUP1 commands for most devices except for 13423 * parallel SCSI fixed drives in which case we get better 13424 * performance using CDB_GROUP0 commands (where applicable). 13425 */ 13426 un->un_mincdb = SD_CDB_GROUP1; 13427 #if !defined(__fibre) 13428 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) && 13429 !un->un_f_has_removable_media) { 13430 un->un_mincdb = SD_CDB_GROUP0; 13431 } 13432 #endif 13433 13434 /* 13435 * Try to read the max-cdb-length supported by HBA. 13436 */ 13437 un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1); 13438 if (0 >= un->un_max_hba_cdb) { 13439 un->un_max_hba_cdb = CDB_GROUP4; 13440 hba_cdb_limit = SD_CDB_GROUP4; 13441 } else if (0 < un->un_max_hba_cdb && 13442 un->un_max_hba_cdb < CDB_GROUP1) { 13443 hba_cdb_limit = SD_CDB_GROUP0; 13444 } else if (CDB_GROUP1 <= un->un_max_hba_cdb && 13445 un->un_max_hba_cdb < CDB_GROUP5) { 13446 hba_cdb_limit = SD_CDB_GROUP1; 13447 } else if (CDB_GROUP5 <= un->un_max_hba_cdb && 13448 un->un_max_hba_cdb < CDB_GROUP4) { 13449 hba_cdb_limit = SD_CDB_GROUP5; 13450 } else { 13451 hba_cdb_limit = SD_CDB_GROUP4; 13452 } 13453 13454 /* 13455 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4 13456 * commands for fixed disks unless we are building for a 32 bit 13457 * kernel. 13458 */ 13459 #ifdef _LP64 13460 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 13461 min(hba_cdb_limit, SD_CDB_GROUP4); 13462 #else 13463 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 13464 min(hba_cdb_limit, SD_CDB_GROUP1); 13465 #endif 13466 13467 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) 13468 ? sizeof (struct scsi_arq_status) : 1); 13469 un->un_cmd_timeout = (ushort_t)sd_io_time; 13470 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; 13471 } 13472 13473 13474 /* 13475 * Function: sd_initpkt_for_buf 13476 * 13477 * Description: Allocate and initialize for transport a scsi_pkt struct, 13478 * based upon the info specified in the given buf struct. 13479 * 13480 * Assumes the xb_blkno in the request is absolute (ie, 13481 * relative to the start of the device (NOT partition!). 13482 * Also assumes that the request is using the native block 13483 * size of the device (as returned by the READ CAPACITY 13484 * command). 13485 * 13486 * Return Code: SD_PKT_ALLOC_SUCCESS 13487 * SD_PKT_ALLOC_FAILURE 13488 * SD_PKT_ALLOC_FAILURE_NO_DMA 13489 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13490 * 13491 * Context: Kernel thread and may be called from software interrupt context 13492 * as part of a sdrunout callback. This function may not block or 13493 * call routines that block 13494 */ 13495 13496 static int 13497 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp) 13498 { 13499 struct sd_xbuf *xp; 13500 struct scsi_pkt *pktp = NULL; 13501 struct sd_lun *un; 13502 size_t blockcount; 13503 daddr_t startblock; 13504 int rval; 13505 int cmd_flags; 13506 13507 ASSERT(bp != NULL); 13508 ASSERT(pktpp != NULL); 13509 xp = SD_GET_XBUF(bp); 13510 ASSERT(xp != NULL); 13511 un = SD_GET_UN(bp); 13512 ASSERT(un != NULL); 13513 ASSERT(mutex_owned(SD_MUTEX(un))); 13514 ASSERT(bp->b_resid == 0); 13515 13516 SD_TRACE(SD_LOG_IO_CORE, un, 13517 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp); 13518 13519 mutex_exit(SD_MUTEX(un)); 13520 13521 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13522 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) { 13523 /* 13524 * Already have a scsi_pkt -- just need DMA resources. 13525 * We must recompute the CDB in case the mapping returns 13526 * a nonzero pkt_resid. 13527 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer 13528 * that is being retried, the unmap/remap of the DMA resouces 13529 * will result in the entire transfer starting over again 13530 * from the very first block. 13531 */ 13532 ASSERT(xp->xb_pktp != NULL); 13533 pktp = xp->xb_pktp; 13534 } else { 13535 pktp = NULL; 13536 } 13537 #endif /* __i386 || __amd64 */ 13538 13539 startblock = xp->xb_blkno; /* Absolute block num. */ 13540 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 13541 13542 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK); 13543 13544 /* 13545 * sd_setup_rw_pkt will determine the appropriate CDB group to use, 13546 * call scsi_init_pkt, and build the CDB. 13547 */ 13548 rval = sd_setup_rw_pkt(un, &pktp, bp, 13549 cmd_flags, sdrunout, (caddr_t)un, 13550 startblock, blockcount); 13551 13552 if (rval == 0) { 13553 /* 13554 * Success. 13555 * 13556 * If partial DMA is being used and required for this transfer. 13557 * set it up here. 13558 */ 13559 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 && 13560 (pktp->pkt_resid != 0)) { 13561 13562 /* 13563 * Save the CDB length and pkt_resid for the 13564 * next xfer 13565 */ 13566 xp->xb_dma_resid = pktp->pkt_resid; 13567 13568 /* rezero resid */ 13569 pktp->pkt_resid = 0; 13570 13571 } else { 13572 xp->xb_dma_resid = 0; 13573 } 13574 13575 pktp->pkt_flags = un->un_tagflags; 13576 pktp->pkt_time = un->un_cmd_timeout; 13577 pktp->pkt_comp = sdintr; 13578 13579 pktp->pkt_private = bp; 13580 *pktpp = pktp; 13581 13582 SD_TRACE(SD_LOG_IO_CORE, un, 13583 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp); 13584 13585 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13586 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED; 13587 #endif 13588 13589 mutex_enter(SD_MUTEX(un)); 13590 return (SD_PKT_ALLOC_SUCCESS); 13591 13592 } 13593 13594 /* 13595 * SD_PKT_ALLOC_FAILURE is the only expected failure code 13596 * from sd_setup_rw_pkt. 13597 */ 13598 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 13599 13600 if (rval == SD_PKT_ALLOC_FAILURE) { 13601 *pktpp = NULL; 13602 /* 13603 * Set the driver state to RWAIT to indicate the driver 13604 * is waiting on resource allocations. The driver will not 13605 * suspend, pm_suspend, or detatch while the state is RWAIT. 13606 */ 13607 mutex_enter(SD_MUTEX(un)); 13608 New_state(un, SD_STATE_RWAIT); 13609 13610 SD_ERROR(SD_LOG_IO_CORE, un, 13611 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp); 13612 13613 if ((bp->b_flags & B_ERROR) != 0) { 13614 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 13615 } 13616 return (SD_PKT_ALLOC_FAILURE); 13617 } else { 13618 /* 13619 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13620 * 13621 * This should never happen. Maybe someone messed with the 13622 * kernel's minphys? 13623 */ 13624 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13625 "Request rejected: too large for CDB: " 13626 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount); 13627 SD_ERROR(SD_LOG_IO_CORE, un, 13628 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp); 13629 mutex_enter(SD_MUTEX(un)); 13630 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13631 13632 } 13633 } 13634 13635 13636 /* 13637 * Function: sd_destroypkt_for_buf 13638 * 13639 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing). 13640 * 13641 * Context: Kernel thread or interrupt context 13642 */ 13643 13644 static void 13645 sd_destroypkt_for_buf(struct buf *bp) 13646 { 13647 ASSERT(bp != NULL); 13648 ASSERT(SD_GET_UN(bp) != NULL); 13649 13650 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13651 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp); 13652 13653 ASSERT(SD_GET_PKTP(bp) != NULL); 13654 scsi_destroy_pkt(SD_GET_PKTP(bp)); 13655 13656 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13657 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp); 13658 } 13659 13660 /* 13661 * Function: sd_setup_rw_pkt 13662 * 13663 * Description: Determines appropriate CDB group for the requested LBA 13664 * and transfer length, calls scsi_init_pkt, and builds 13665 * the CDB. Do not use for partial DMA transfers except 13666 * for the initial transfer since the CDB size must 13667 * remain constant. 13668 * 13669 * Context: Kernel thread and may be called from software interrupt 13670 * context as part of a sdrunout callback. This function may not 13671 * block or call routines that block 13672 */ 13673 13674 13675 int 13676 sd_setup_rw_pkt(struct sd_lun *un, 13677 struct scsi_pkt **pktpp, struct buf *bp, int flags, 13678 int (*callback)(caddr_t), caddr_t callback_arg, 13679 diskaddr_t lba, uint32_t blockcount) 13680 { 13681 struct scsi_pkt *return_pktp; 13682 union scsi_cdb *cdbp; 13683 struct sd_cdbinfo *cp = NULL; 13684 int i; 13685 13686 /* 13687 * See which size CDB to use, based upon the request. 13688 */ 13689 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) { 13690 13691 /* 13692 * Check lba and block count against sd_cdbtab limits. 13693 * In the partial DMA case, we have to use the same size 13694 * CDB for all the transfers. Check lba + blockcount 13695 * against the max LBA so we know that segment of the 13696 * transfer can use the CDB we select. 13697 */ 13698 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) && 13699 (blockcount <= sd_cdbtab[i].sc_maxlen)) { 13700 13701 /* 13702 * The command will fit into the CDB type 13703 * specified by sd_cdbtab[i]. 13704 */ 13705 cp = sd_cdbtab + i; 13706 13707 /* 13708 * Call scsi_init_pkt so we can fill in the 13709 * CDB. 13710 */ 13711 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp, 13712 bp, cp->sc_grpcode, un->un_status_len, 0, 13713 flags, callback, callback_arg); 13714 13715 if (return_pktp != NULL) { 13716 13717 /* 13718 * Return new value of pkt 13719 */ 13720 *pktpp = return_pktp; 13721 13722 /* 13723 * To be safe, zero the CDB insuring there is 13724 * no leftover data from a previous command. 13725 */ 13726 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode); 13727 13728 /* 13729 * Handle partial DMA mapping 13730 */ 13731 if (return_pktp->pkt_resid != 0) { 13732 13733 /* 13734 * Not going to xfer as many blocks as 13735 * originally expected 13736 */ 13737 blockcount -= 13738 SD_BYTES2TGTBLOCKS(un, 13739 return_pktp->pkt_resid); 13740 } 13741 13742 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp; 13743 13744 /* 13745 * Set command byte based on the CDB 13746 * type we matched. 13747 */ 13748 cdbp->scc_cmd = cp->sc_grpmask | 13749 ((bp->b_flags & B_READ) ? 13750 SCMD_READ : SCMD_WRITE); 13751 13752 SD_FILL_SCSI1_LUN(un, return_pktp); 13753 13754 /* 13755 * Fill in LBA and length 13756 */ 13757 ASSERT((cp->sc_grpcode == CDB_GROUP1) || 13758 (cp->sc_grpcode == CDB_GROUP4) || 13759 (cp->sc_grpcode == CDB_GROUP0) || 13760 (cp->sc_grpcode == CDB_GROUP5)); 13761 13762 if (cp->sc_grpcode == CDB_GROUP1) { 13763 FORMG1ADDR(cdbp, lba); 13764 FORMG1COUNT(cdbp, blockcount); 13765 return (0); 13766 } else if (cp->sc_grpcode == CDB_GROUP4) { 13767 FORMG4LONGADDR(cdbp, lba); 13768 FORMG4COUNT(cdbp, blockcount); 13769 return (0); 13770 } else if (cp->sc_grpcode == CDB_GROUP0) { 13771 FORMG0ADDR(cdbp, lba); 13772 FORMG0COUNT(cdbp, blockcount); 13773 return (0); 13774 } else if (cp->sc_grpcode == CDB_GROUP5) { 13775 FORMG5ADDR(cdbp, lba); 13776 FORMG5COUNT(cdbp, blockcount); 13777 return (0); 13778 } 13779 13780 /* 13781 * It should be impossible to not match one 13782 * of the CDB types above, so we should never 13783 * reach this point. Set the CDB command byte 13784 * to test-unit-ready to avoid writing 13785 * to somewhere we don't intend. 13786 */ 13787 cdbp->scc_cmd = SCMD_TEST_UNIT_READY; 13788 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13789 } else { 13790 /* 13791 * Couldn't get scsi_pkt 13792 */ 13793 return (SD_PKT_ALLOC_FAILURE); 13794 } 13795 } 13796 } 13797 13798 /* 13799 * None of the available CDB types were suitable. This really 13800 * should never happen: on a 64 bit system we support 13801 * READ16/WRITE16 which will hold an entire 64 bit disk address 13802 * and on a 32 bit system we will refuse to bind to a device 13803 * larger than 2TB so addresses will never be larger than 32 bits. 13804 */ 13805 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13806 } 13807 13808 /* 13809 * Function: sd_setup_next_rw_pkt 13810 * 13811 * Description: Setup packet for partial DMA transfers, except for the 13812 * initial transfer. sd_setup_rw_pkt should be used for 13813 * the initial transfer. 13814 * 13815 * Context: Kernel thread and may be called from interrupt context. 13816 */ 13817 13818 int 13819 sd_setup_next_rw_pkt(struct sd_lun *un, 13820 struct scsi_pkt *pktp, struct buf *bp, 13821 diskaddr_t lba, uint32_t blockcount) 13822 { 13823 uchar_t com; 13824 union scsi_cdb *cdbp; 13825 uchar_t cdb_group_id; 13826 13827 ASSERT(pktp != NULL); 13828 ASSERT(pktp->pkt_cdbp != NULL); 13829 13830 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 13831 com = cdbp->scc_cmd; 13832 cdb_group_id = CDB_GROUPID(com); 13833 13834 ASSERT((cdb_group_id == CDB_GROUPID_0) || 13835 (cdb_group_id == CDB_GROUPID_1) || 13836 (cdb_group_id == CDB_GROUPID_4) || 13837 (cdb_group_id == CDB_GROUPID_5)); 13838 13839 /* 13840 * Move pkt to the next portion of the xfer. 13841 * func is NULL_FUNC so we do not have to release 13842 * the disk mutex here. 13843 */ 13844 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0, 13845 NULL_FUNC, NULL) == pktp) { 13846 /* Success. Handle partial DMA */ 13847 if (pktp->pkt_resid != 0) { 13848 blockcount -= 13849 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid); 13850 } 13851 13852 cdbp->scc_cmd = com; 13853 SD_FILL_SCSI1_LUN(un, pktp); 13854 if (cdb_group_id == CDB_GROUPID_1) { 13855 FORMG1ADDR(cdbp, lba); 13856 FORMG1COUNT(cdbp, blockcount); 13857 return (0); 13858 } else if (cdb_group_id == CDB_GROUPID_4) { 13859 FORMG4LONGADDR(cdbp, lba); 13860 FORMG4COUNT(cdbp, blockcount); 13861 return (0); 13862 } else if (cdb_group_id == CDB_GROUPID_0) { 13863 FORMG0ADDR(cdbp, lba); 13864 FORMG0COUNT(cdbp, blockcount); 13865 return (0); 13866 } else if (cdb_group_id == CDB_GROUPID_5) { 13867 FORMG5ADDR(cdbp, lba); 13868 FORMG5COUNT(cdbp, blockcount); 13869 return (0); 13870 } 13871 13872 /* Unreachable */ 13873 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13874 } 13875 13876 /* 13877 * Error setting up next portion of cmd transfer. 13878 * Something is definitely very wrong and this 13879 * should not happen. 13880 */ 13881 return (SD_PKT_ALLOC_FAILURE); 13882 } 13883 13884 /* 13885 * Function: sd_initpkt_for_uscsi 13886 * 13887 * Description: Allocate and initialize for transport a scsi_pkt struct, 13888 * based upon the info specified in the given uscsi_cmd struct. 13889 * 13890 * Return Code: SD_PKT_ALLOC_SUCCESS 13891 * SD_PKT_ALLOC_FAILURE 13892 * SD_PKT_ALLOC_FAILURE_NO_DMA 13893 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13894 * 13895 * Context: Kernel thread and may be called from software interrupt context 13896 * as part of a sdrunout callback. This function may not block or 13897 * call routines that block 13898 */ 13899 13900 static int 13901 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp) 13902 { 13903 struct uscsi_cmd *uscmd; 13904 struct sd_xbuf *xp; 13905 struct scsi_pkt *pktp; 13906 struct sd_lun *un; 13907 uint32_t flags = 0; 13908 13909 ASSERT(bp != NULL); 13910 ASSERT(pktpp != NULL); 13911 xp = SD_GET_XBUF(bp); 13912 ASSERT(xp != NULL); 13913 un = SD_GET_UN(bp); 13914 ASSERT(un != NULL); 13915 ASSERT(mutex_owned(SD_MUTEX(un))); 13916 13917 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 13918 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 13919 ASSERT(uscmd != NULL); 13920 13921 SD_TRACE(SD_LOG_IO_CORE, un, 13922 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp); 13923 13924 /* 13925 * Allocate the scsi_pkt for the command. 13926 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path 13927 * during scsi_init_pkt time and will continue to use the 13928 * same path as long as the same scsi_pkt is used without 13929 * intervening scsi_dma_free(). Since uscsi command does 13930 * not call scsi_dmafree() before retry failed command, it 13931 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT 13932 * set such that scsi_vhci can use other available path for 13933 * retry. Besides, ucsci command does not allow DMA breakup, 13934 * so there is no need to set PKT_DMA_PARTIAL flag. 13935 */ 13936 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 13937 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 13938 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 13939 ((int)(uscmd->uscsi_rqlen) + sizeof (struct scsi_arq_status) 13940 - sizeof (struct scsi_extended_sense)), 0, 13941 (un->un_pkt_flags & ~PKT_DMA_PARTIAL) | PKT_XARQ, 13942 sdrunout, (caddr_t)un); 13943 } else { 13944 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 13945 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 13946 sizeof (struct scsi_arq_status), 0, 13947 (un->un_pkt_flags & ~PKT_DMA_PARTIAL), 13948 sdrunout, (caddr_t)un); 13949 } 13950 13951 if (pktp == NULL) { 13952 *pktpp = NULL; 13953 /* 13954 * Set the driver state to RWAIT to indicate the driver 13955 * is waiting on resource allocations. The driver will not 13956 * suspend, pm_suspend, or detatch while the state is RWAIT. 13957 */ 13958 New_state(un, SD_STATE_RWAIT); 13959 13960 SD_ERROR(SD_LOG_IO_CORE, un, 13961 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp); 13962 13963 if ((bp->b_flags & B_ERROR) != 0) { 13964 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 13965 } 13966 return (SD_PKT_ALLOC_FAILURE); 13967 } 13968 13969 /* 13970 * We do not do DMA breakup for USCSI commands, so return failure 13971 * here if all the needed DMA resources were not allocated. 13972 */ 13973 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) && 13974 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) { 13975 scsi_destroy_pkt(pktp); 13976 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: " 13977 "No partial DMA for USCSI. exit: buf:0x%p\n", bp); 13978 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL); 13979 } 13980 13981 /* Init the cdb from the given uscsi struct */ 13982 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp, 13983 uscmd->uscsi_cdb[0], 0, 0, 0); 13984 13985 SD_FILL_SCSI1_LUN(un, pktp); 13986 13987 /* 13988 * Set up the optional USCSI flags. See the uscsi (7I) man page 13989 * for listing of the supported flags. 13990 */ 13991 13992 if (uscmd->uscsi_flags & USCSI_SILENT) { 13993 flags |= FLAG_SILENT; 13994 } 13995 13996 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) { 13997 flags |= FLAG_DIAGNOSE; 13998 } 13999 14000 if (uscmd->uscsi_flags & USCSI_ISOLATE) { 14001 flags |= FLAG_ISOLATE; 14002 } 14003 14004 if (un->un_f_is_fibre == FALSE) { 14005 if (uscmd->uscsi_flags & USCSI_RENEGOT) { 14006 flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 14007 } 14008 } 14009 14010 /* 14011 * Set the pkt flags here so we save time later. 14012 * Note: These flags are NOT in the uscsi man page!!! 14013 */ 14014 if (uscmd->uscsi_flags & USCSI_HEAD) { 14015 flags |= FLAG_HEAD; 14016 } 14017 14018 if (uscmd->uscsi_flags & USCSI_NOINTR) { 14019 flags |= FLAG_NOINTR; 14020 } 14021 14022 /* 14023 * For tagged queueing, things get a bit complicated. 14024 * Check first for head of queue and last for ordered queue. 14025 * If neither head nor order, use the default driver tag flags. 14026 */ 14027 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) { 14028 if (uscmd->uscsi_flags & USCSI_HTAG) { 14029 flags |= FLAG_HTAG; 14030 } else if (uscmd->uscsi_flags & USCSI_OTAG) { 14031 flags |= FLAG_OTAG; 14032 } else { 14033 flags |= un->un_tagflags & FLAG_TAGMASK; 14034 } 14035 } 14036 14037 if (uscmd->uscsi_flags & USCSI_NODISCON) { 14038 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON; 14039 } 14040 14041 pktp->pkt_flags = flags; 14042 14043 /* Transfer uscsi information to scsi_pkt */ 14044 (void) scsi_uscsi_pktinit(uscmd, pktp); 14045 14046 /* Copy the caller's CDB into the pkt... */ 14047 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen); 14048 14049 if (uscmd->uscsi_timeout == 0) { 14050 pktp->pkt_time = un->un_uscsi_timeout; 14051 } else { 14052 pktp->pkt_time = uscmd->uscsi_timeout; 14053 } 14054 14055 /* need it later to identify USCSI request in sdintr */ 14056 xp->xb_pkt_flags |= SD_XB_USCSICMD; 14057 14058 xp->xb_sense_resid = uscmd->uscsi_rqresid; 14059 14060 pktp->pkt_private = bp; 14061 pktp->pkt_comp = sdintr; 14062 *pktpp = pktp; 14063 14064 SD_TRACE(SD_LOG_IO_CORE, un, 14065 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp); 14066 14067 return (SD_PKT_ALLOC_SUCCESS); 14068 } 14069 14070 14071 /* 14072 * Function: sd_destroypkt_for_uscsi 14073 * 14074 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi 14075 * IOs.. Also saves relevant info into the associated uscsi_cmd 14076 * struct. 14077 * 14078 * Context: May be called under interrupt context 14079 */ 14080 14081 static void 14082 sd_destroypkt_for_uscsi(struct buf *bp) 14083 { 14084 struct uscsi_cmd *uscmd; 14085 struct sd_xbuf *xp; 14086 struct scsi_pkt *pktp; 14087 struct sd_lun *un; 14088 struct sd_uscsi_info *suip; 14089 14090 ASSERT(bp != NULL); 14091 xp = SD_GET_XBUF(bp); 14092 ASSERT(xp != NULL); 14093 un = SD_GET_UN(bp); 14094 ASSERT(un != NULL); 14095 ASSERT(!mutex_owned(SD_MUTEX(un))); 14096 pktp = SD_GET_PKTP(bp); 14097 ASSERT(pktp != NULL); 14098 14099 SD_TRACE(SD_LOG_IO_CORE, un, 14100 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp); 14101 14102 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 14103 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 14104 ASSERT(uscmd != NULL); 14105 14106 /* Save the status and the residual into the uscsi_cmd struct */ 14107 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 14108 uscmd->uscsi_resid = bp->b_resid; 14109 14110 /* Transfer scsi_pkt information to uscsi */ 14111 (void) scsi_uscsi_pktfini(pktp, uscmd); 14112 14113 /* 14114 * If enabled, copy any saved sense data into the area specified 14115 * by the uscsi command. 14116 */ 14117 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 14118 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 14119 /* 14120 * Note: uscmd->uscsi_rqbuf should always point to a buffer 14121 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd()) 14122 */ 14123 uscmd->uscsi_rqstatus = xp->xb_sense_status; 14124 uscmd->uscsi_rqresid = xp->xb_sense_resid; 14125 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 14126 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 14127 MAX_SENSE_LENGTH); 14128 } else { 14129 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 14130 SENSE_LENGTH); 14131 } 14132 } 14133 /* 14134 * The following assignments are for SCSI FMA. 14135 */ 14136 ASSERT(xp->xb_private != NULL); 14137 suip = (struct sd_uscsi_info *)xp->xb_private; 14138 suip->ui_pkt_reason = pktp->pkt_reason; 14139 suip->ui_pkt_state = pktp->pkt_state; 14140 suip->ui_pkt_statistics = pktp->pkt_statistics; 14141 suip->ui_lba = (uint64_t)SD_GET_BLKNO(bp); 14142 14143 /* We are done with the scsi_pkt; free it now */ 14144 ASSERT(SD_GET_PKTP(bp) != NULL); 14145 scsi_destroy_pkt(SD_GET_PKTP(bp)); 14146 14147 SD_TRACE(SD_LOG_IO_CORE, un, 14148 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp); 14149 } 14150 14151 14152 /* 14153 * Function: sd_bioclone_alloc 14154 * 14155 * Description: Allocate a buf(9S) and init it as per the given buf 14156 * and the various arguments. The associated sd_xbuf 14157 * struct is (nearly) duplicated. The struct buf *bp 14158 * argument is saved in new_xp->xb_private. 14159 * 14160 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 14161 * datalen - size of data area for the shadow bp 14162 * blkno - starting LBA 14163 * func - function pointer for b_iodone in the shadow buf. (May 14164 * be NULL if none.) 14165 * 14166 * Return Code: Pointer to allocates buf(9S) struct 14167 * 14168 * Context: Can sleep. 14169 */ 14170 14171 static struct buf * 14172 sd_bioclone_alloc(struct buf *bp, size_t datalen, 14173 daddr_t blkno, int (*func)(struct buf *)) 14174 { 14175 struct sd_lun *un; 14176 struct sd_xbuf *xp; 14177 struct sd_xbuf *new_xp; 14178 struct buf *new_bp; 14179 14180 ASSERT(bp != NULL); 14181 xp = SD_GET_XBUF(bp); 14182 ASSERT(xp != NULL); 14183 un = SD_GET_UN(bp); 14184 ASSERT(un != NULL); 14185 ASSERT(!mutex_owned(SD_MUTEX(un))); 14186 14187 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func, 14188 NULL, KM_SLEEP); 14189 14190 new_bp->b_lblkno = blkno; 14191 14192 /* 14193 * Allocate an xbuf for the shadow bp and copy the contents of the 14194 * original xbuf into it. 14195 */ 14196 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 14197 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 14198 14199 /* 14200 * The given bp is automatically saved in the xb_private member 14201 * of the new xbuf. Callers are allowed to depend on this. 14202 */ 14203 new_xp->xb_private = bp; 14204 14205 new_bp->b_private = new_xp; 14206 14207 return (new_bp); 14208 } 14209 14210 /* 14211 * Function: sd_shadow_buf_alloc 14212 * 14213 * Description: Allocate a buf(9S) and init it as per the given buf 14214 * and the various arguments. The associated sd_xbuf 14215 * struct is (nearly) duplicated. The struct buf *bp 14216 * argument is saved in new_xp->xb_private. 14217 * 14218 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 14219 * datalen - size of data area for the shadow bp 14220 * bflags - B_READ or B_WRITE (pseudo flag) 14221 * blkno - starting LBA 14222 * func - function pointer for b_iodone in the shadow buf. (May 14223 * be NULL if none.) 14224 * 14225 * Return Code: Pointer to allocates buf(9S) struct 14226 * 14227 * Context: Can sleep. 14228 */ 14229 14230 static struct buf * 14231 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags, 14232 daddr_t blkno, int (*func)(struct buf *)) 14233 { 14234 struct sd_lun *un; 14235 struct sd_xbuf *xp; 14236 struct sd_xbuf *new_xp; 14237 struct buf *new_bp; 14238 14239 ASSERT(bp != NULL); 14240 xp = SD_GET_XBUF(bp); 14241 ASSERT(xp != NULL); 14242 un = SD_GET_UN(bp); 14243 ASSERT(un != NULL); 14244 ASSERT(!mutex_owned(SD_MUTEX(un))); 14245 14246 if (bp->b_flags & (B_PAGEIO | B_PHYS)) { 14247 bp_mapin(bp); 14248 } 14249 14250 bflags &= (B_READ | B_WRITE); 14251 #if defined(__i386) || defined(__amd64) 14252 new_bp = getrbuf(KM_SLEEP); 14253 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP); 14254 new_bp->b_bcount = datalen; 14255 new_bp->b_flags = bflags | 14256 (bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW)); 14257 #else 14258 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL, 14259 datalen, bflags, SLEEP_FUNC, NULL); 14260 #endif 14261 new_bp->av_forw = NULL; 14262 new_bp->av_back = NULL; 14263 new_bp->b_dev = bp->b_dev; 14264 new_bp->b_blkno = blkno; 14265 new_bp->b_iodone = func; 14266 new_bp->b_edev = bp->b_edev; 14267 new_bp->b_resid = 0; 14268 14269 /* We need to preserve the B_FAILFAST flag */ 14270 if (bp->b_flags & B_FAILFAST) { 14271 new_bp->b_flags |= B_FAILFAST; 14272 } 14273 14274 /* 14275 * Allocate an xbuf for the shadow bp and copy the contents of the 14276 * original xbuf into it. 14277 */ 14278 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 14279 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 14280 14281 /* Need later to copy data between the shadow buf & original buf! */ 14282 new_xp->xb_pkt_flags |= PKT_CONSISTENT; 14283 14284 /* 14285 * The given bp is automatically saved in the xb_private member 14286 * of the new xbuf. Callers are allowed to depend on this. 14287 */ 14288 new_xp->xb_private = bp; 14289 14290 new_bp->b_private = new_xp; 14291 14292 return (new_bp); 14293 } 14294 14295 /* 14296 * Function: sd_bioclone_free 14297 * 14298 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations 14299 * in the larger than partition operation. 14300 * 14301 * Context: May be called under interrupt context 14302 */ 14303 14304 static void 14305 sd_bioclone_free(struct buf *bp) 14306 { 14307 struct sd_xbuf *xp; 14308 14309 ASSERT(bp != NULL); 14310 xp = SD_GET_XBUF(bp); 14311 ASSERT(xp != NULL); 14312 14313 /* 14314 * Call bp_mapout() before freeing the buf, in case a lower 14315 * layer or HBA had done a bp_mapin(). we must do this here 14316 * as we are the "originator" of the shadow buf. 14317 */ 14318 bp_mapout(bp); 14319 14320 /* 14321 * Null out b_iodone before freeing the bp, to ensure that the driver 14322 * never gets confused by a stale value in this field. (Just a little 14323 * extra defensiveness here.) 14324 */ 14325 bp->b_iodone = NULL; 14326 14327 freerbuf(bp); 14328 14329 kmem_free(xp, sizeof (struct sd_xbuf)); 14330 } 14331 14332 /* 14333 * Function: sd_shadow_buf_free 14334 * 14335 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations. 14336 * 14337 * Context: May be called under interrupt context 14338 */ 14339 14340 static void 14341 sd_shadow_buf_free(struct buf *bp) 14342 { 14343 struct sd_xbuf *xp; 14344 14345 ASSERT(bp != NULL); 14346 xp = SD_GET_XBUF(bp); 14347 ASSERT(xp != NULL); 14348 14349 #if defined(__sparc) 14350 /* 14351 * Call bp_mapout() before freeing the buf, in case a lower 14352 * layer or HBA had done a bp_mapin(). we must do this here 14353 * as we are the "originator" of the shadow buf. 14354 */ 14355 bp_mapout(bp); 14356 #endif 14357 14358 /* 14359 * Null out b_iodone before freeing the bp, to ensure that the driver 14360 * never gets confused by a stale value in this field. (Just a little 14361 * extra defensiveness here.) 14362 */ 14363 bp->b_iodone = NULL; 14364 14365 #if defined(__i386) || defined(__amd64) 14366 kmem_free(bp->b_un.b_addr, bp->b_bcount); 14367 freerbuf(bp); 14368 #else 14369 scsi_free_consistent_buf(bp); 14370 #endif 14371 14372 kmem_free(xp, sizeof (struct sd_xbuf)); 14373 } 14374 14375 14376 /* 14377 * Function: sd_print_transport_rejected_message 14378 * 14379 * Description: This implements the ludicrously complex rules for printing 14380 * a "transport rejected" message. This is to address the 14381 * specific problem of having a flood of this error message 14382 * produced when a failover occurs. 14383 * 14384 * Context: Any. 14385 */ 14386 14387 static void 14388 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp, 14389 int code) 14390 { 14391 ASSERT(un != NULL); 14392 ASSERT(mutex_owned(SD_MUTEX(un))); 14393 ASSERT(xp != NULL); 14394 14395 /* 14396 * Print the "transport rejected" message under the following 14397 * conditions: 14398 * 14399 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set 14400 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR. 14401 * - If the error code IS a TRAN_FATAL_ERROR, then the message is 14402 * printed the FIRST time a TRAN_FATAL_ERROR is returned from 14403 * scsi_transport(9F) (which indicates that the target might have 14404 * gone off-line). This uses the un->un_tran_fatal_count 14405 * count, which is incremented whenever a TRAN_FATAL_ERROR is 14406 * received, and reset to zero whenver a TRAN_ACCEPT is returned 14407 * from scsi_transport(). 14408 * 14409 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of 14410 * the preceeding cases in order for the message to be printed. 14411 */ 14412 if (((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) && 14413 (SD_FM_LOG(un) == SD_FM_LOG_NSUP)) { 14414 if ((sd_level_mask & SD_LOGMASK_DIAG) || 14415 (code != TRAN_FATAL_ERROR) || 14416 (un->un_tran_fatal_count == 1)) { 14417 switch (code) { 14418 case TRAN_BADPKT: 14419 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14420 "transport rejected bad packet\n"); 14421 break; 14422 case TRAN_FATAL_ERROR: 14423 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14424 "transport rejected fatal error\n"); 14425 break; 14426 default: 14427 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14428 "transport rejected (%d)\n", code); 14429 break; 14430 } 14431 } 14432 } 14433 } 14434 14435 14436 /* 14437 * Function: sd_add_buf_to_waitq 14438 * 14439 * Description: Add the given buf(9S) struct to the wait queue for the 14440 * instance. If sorting is enabled, then the buf is added 14441 * to the queue via an elevator sort algorithm (a la 14442 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key. 14443 * If sorting is not enabled, then the buf is just added 14444 * to the end of the wait queue. 14445 * 14446 * Return Code: void 14447 * 14448 * Context: Does not sleep/block, therefore technically can be called 14449 * from any context. However if sorting is enabled then the 14450 * execution time is indeterminate, and may take long if 14451 * the wait queue grows large. 14452 */ 14453 14454 static void 14455 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp) 14456 { 14457 struct buf *ap; 14458 14459 ASSERT(bp != NULL); 14460 ASSERT(un != NULL); 14461 ASSERT(mutex_owned(SD_MUTEX(un))); 14462 14463 /* If the queue is empty, add the buf as the only entry & return. */ 14464 if (un->un_waitq_headp == NULL) { 14465 ASSERT(un->un_waitq_tailp == NULL); 14466 un->un_waitq_headp = un->un_waitq_tailp = bp; 14467 bp->av_forw = NULL; 14468 return; 14469 } 14470 14471 ASSERT(un->un_waitq_tailp != NULL); 14472 14473 /* 14474 * If sorting is disabled, just add the buf to the tail end of 14475 * the wait queue and return. 14476 */ 14477 if (un->un_f_disksort_disabled) { 14478 un->un_waitq_tailp->av_forw = bp; 14479 un->un_waitq_tailp = bp; 14480 bp->av_forw = NULL; 14481 return; 14482 } 14483 14484 /* 14485 * Sort thru the list of requests currently on the wait queue 14486 * and add the new buf request at the appropriate position. 14487 * 14488 * The un->un_waitq_headp is an activity chain pointer on which 14489 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The 14490 * first queue holds those requests which are positioned after 14491 * the current SD_GET_BLKNO() (in the first request); the second holds 14492 * requests which came in after their SD_GET_BLKNO() number was passed. 14493 * Thus we implement a one way scan, retracting after reaching 14494 * the end of the drive to the first request on the second 14495 * queue, at which time it becomes the first queue. 14496 * A one-way scan is natural because of the way UNIX read-ahead 14497 * blocks are allocated. 14498 * 14499 * If we lie after the first request, then we must locate the 14500 * second request list and add ourselves to it. 14501 */ 14502 ap = un->un_waitq_headp; 14503 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) { 14504 while (ap->av_forw != NULL) { 14505 /* 14506 * Look for an "inversion" in the (normally 14507 * ascending) block numbers. This indicates 14508 * the start of the second request list. 14509 */ 14510 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) { 14511 /* 14512 * Search the second request list for the 14513 * first request at a larger block number. 14514 * We go before that; however if there is 14515 * no such request, we go at the end. 14516 */ 14517 do { 14518 if (SD_GET_BLKNO(bp) < 14519 SD_GET_BLKNO(ap->av_forw)) { 14520 goto insert; 14521 } 14522 ap = ap->av_forw; 14523 } while (ap->av_forw != NULL); 14524 goto insert; /* after last */ 14525 } 14526 ap = ap->av_forw; 14527 } 14528 14529 /* 14530 * No inversions... we will go after the last, and 14531 * be the first request in the second request list. 14532 */ 14533 goto insert; 14534 } 14535 14536 /* 14537 * Request is at/after the current request... 14538 * sort in the first request list. 14539 */ 14540 while (ap->av_forw != NULL) { 14541 /* 14542 * We want to go after the current request (1) if 14543 * there is an inversion after it (i.e. it is the end 14544 * of the first request list), or (2) if the next 14545 * request is a larger block no. than our request. 14546 */ 14547 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) || 14548 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) { 14549 goto insert; 14550 } 14551 ap = ap->av_forw; 14552 } 14553 14554 /* 14555 * Neither a second list nor a larger request, therefore 14556 * we go at the end of the first list (which is the same 14557 * as the end of the whole schebang). 14558 */ 14559 insert: 14560 bp->av_forw = ap->av_forw; 14561 ap->av_forw = bp; 14562 14563 /* 14564 * If we inserted onto the tail end of the waitq, make sure the 14565 * tail pointer is updated. 14566 */ 14567 if (ap == un->un_waitq_tailp) { 14568 un->un_waitq_tailp = bp; 14569 } 14570 } 14571 14572 14573 /* 14574 * Function: sd_start_cmds 14575 * 14576 * Description: Remove and transport cmds from the driver queues. 14577 * 14578 * Arguments: un - pointer to the unit (soft state) struct for the target. 14579 * 14580 * immed_bp - ptr to a buf to be transported immediately. Only 14581 * the immed_bp is transported; bufs on the waitq are not 14582 * processed and the un_retry_bp is not checked. If immed_bp is 14583 * NULL, then normal queue processing is performed. 14584 * 14585 * Context: May be called from kernel thread context, interrupt context, 14586 * or runout callback context. This function may not block or 14587 * call routines that block. 14588 */ 14589 14590 static void 14591 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp) 14592 { 14593 struct sd_xbuf *xp; 14594 struct buf *bp; 14595 void (*statp)(kstat_io_t *); 14596 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14597 void (*saved_statp)(kstat_io_t *); 14598 #endif 14599 int rval; 14600 struct sd_fm_internal *sfip = NULL; 14601 14602 ASSERT(un != NULL); 14603 ASSERT(mutex_owned(SD_MUTEX(un))); 14604 ASSERT(un->un_ncmds_in_transport >= 0); 14605 ASSERT(un->un_throttle >= 0); 14606 14607 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); 14608 14609 do { 14610 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14611 saved_statp = NULL; 14612 #endif 14613 14614 /* 14615 * If we are syncing or dumping, fail the command to 14616 * avoid recursively calling back into scsi_transport(). 14617 * The dump I/O itself uses a separate code path so this 14618 * only prevents non-dump I/O from being sent while dumping. 14619 * File system sync takes place before dumping begins. 14620 * During panic, filesystem I/O is allowed provided 14621 * un_in_callback is <= 1. This is to prevent recursion 14622 * such as sd_start_cmds -> scsi_transport -> sdintr -> 14623 * sd_start_cmds and so on. See panic.c for more information 14624 * about the states the system can be in during panic. 14625 */ 14626 if ((un->un_state == SD_STATE_DUMPING) || 14627 (ddi_in_panic() && (un->un_in_callback > 1))) { 14628 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14629 "sd_start_cmds: panicking\n"); 14630 goto exit; 14631 } 14632 14633 if ((bp = immed_bp) != NULL) { 14634 /* 14635 * We have a bp that must be transported immediately. 14636 * It's OK to transport the immed_bp here without doing 14637 * the throttle limit check because the immed_bp is 14638 * always used in a retry/recovery case. This means 14639 * that we know we are not at the throttle limit by 14640 * virtue of the fact that to get here we must have 14641 * already gotten a command back via sdintr(). This also 14642 * relies on (1) the command on un_retry_bp preventing 14643 * further commands from the waitq from being issued; 14644 * and (2) the code in sd_retry_command checking the 14645 * throttle limit before issuing a delayed or immediate 14646 * retry. This holds even if the throttle limit is 14647 * currently ratcheted down from its maximum value. 14648 */ 14649 statp = kstat_runq_enter; 14650 if (bp == un->un_retry_bp) { 14651 ASSERT((un->un_retry_statp == NULL) || 14652 (un->un_retry_statp == kstat_waitq_enter) || 14653 (un->un_retry_statp == 14654 kstat_runq_back_to_waitq)); 14655 /* 14656 * If the waitq kstat was incremented when 14657 * sd_set_retry_bp() queued this bp for a retry, 14658 * then we must set up statp so that the waitq 14659 * count will get decremented correctly below. 14660 * Also we must clear un->un_retry_statp to 14661 * ensure that we do not act on a stale value 14662 * in this field. 14663 */ 14664 if ((un->un_retry_statp == kstat_waitq_enter) || 14665 (un->un_retry_statp == 14666 kstat_runq_back_to_waitq)) { 14667 statp = kstat_waitq_to_runq; 14668 } 14669 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14670 saved_statp = un->un_retry_statp; 14671 #endif 14672 un->un_retry_statp = NULL; 14673 14674 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14675 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p " 14676 "un_throttle:%d un_ncmds_in_transport:%d\n", 14677 un, un->un_retry_bp, un->un_throttle, 14678 un->un_ncmds_in_transport); 14679 } else { 14680 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: " 14681 "processing priority bp:0x%p\n", bp); 14682 } 14683 14684 } else if ((bp = un->un_waitq_headp) != NULL) { 14685 /* 14686 * A command on the waitq is ready to go, but do not 14687 * send it if: 14688 * 14689 * (1) the throttle limit has been reached, or 14690 * (2) a retry is pending, or 14691 * (3) a START_STOP_UNIT callback pending, or 14692 * (4) a callback for a SD_PATH_DIRECT_PRIORITY 14693 * command is pending. 14694 * 14695 * For all of these conditions, IO processing will 14696 * restart after the condition is cleared. 14697 */ 14698 if (un->un_ncmds_in_transport >= un->un_throttle) { 14699 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14700 "sd_start_cmds: exiting, " 14701 "throttle limit reached!\n"); 14702 goto exit; 14703 } 14704 if (un->un_retry_bp != NULL) { 14705 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14706 "sd_start_cmds: exiting, retry pending!\n"); 14707 goto exit; 14708 } 14709 if (un->un_startstop_timeid != NULL) { 14710 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14711 "sd_start_cmds: exiting, " 14712 "START_STOP pending!\n"); 14713 goto exit; 14714 } 14715 if (un->un_direct_priority_timeid != NULL) { 14716 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14717 "sd_start_cmds: exiting, " 14718 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n"); 14719 goto exit; 14720 } 14721 14722 /* Dequeue the command */ 14723 un->un_waitq_headp = bp->av_forw; 14724 if (un->un_waitq_headp == NULL) { 14725 un->un_waitq_tailp = NULL; 14726 } 14727 bp->av_forw = NULL; 14728 statp = kstat_waitq_to_runq; 14729 SD_TRACE(SD_LOG_IO_CORE, un, 14730 "sd_start_cmds: processing waitq bp:0x%p\n", bp); 14731 14732 } else { 14733 /* No work to do so bail out now */ 14734 SD_TRACE(SD_LOG_IO_CORE, un, 14735 "sd_start_cmds: no more work, exiting!\n"); 14736 goto exit; 14737 } 14738 14739 /* 14740 * Reset the state to normal. This is the mechanism by which 14741 * the state transitions from either SD_STATE_RWAIT or 14742 * SD_STATE_OFFLINE to SD_STATE_NORMAL. 14743 * If state is SD_STATE_PM_CHANGING then this command is 14744 * part of the device power control and the state must 14745 * not be put back to normal. Doing so would would 14746 * allow new commands to proceed when they shouldn't, 14747 * the device may be going off. 14748 */ 14749 if ((un->un_state != SD_STATE_SUSPENDED) && 14750 (un->un_state != SD_STATE_PM_CHANGING)) { 14751 New_state(un, SD_STATE_NORMAL); 14752 } 14753 14754 xp = SD_GET_XBUF(bp); 14755 ASSERT(xp != NULL); 14756 14757 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14758 /* 14759 * Allocate the scsi_pkt if we need one, or attach DMA 14760 * resources if we have a scsi_pkt that needs them. The 14761 * latter should only occur for commands that are being 14762 * retried. 14763 */ 14764 if ((xp->xb_pktp == NULL) || 14765 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { 14766 #else 14767 if (xp->xb_pktp == NULL) { 14768 #endif 14769 /* 14770 * There is no scsi_pkt allocated for this buf. Call 14771 * the initpkt function to allocate & init one. 14772 * 14773 * The scsi_init_pkt runout callback functionality is 14774 * implemented as follows: 14775 * 14776 * 1) The initpkt function always calls 14777 * scsi_init_pkt(9F) with sdrunout specified as the 14778 * callback routine. 14779 * 2) A successful packet allocation is initialized and 14780 * the I/O is transported. 14781 * 3) The I/O associated with an allocation resource 14782 * failure is left on its queue to be retried via 14783 * runout or the next I/O. 14784 * 4) The I/O associated with a DMA error is removed 14785 * from the queue and failed with EIO. Processing of 14786 * the transport queues is also halted to be 14787 * restarted via runout or the next I/O. 14788 * 5) The I/O associated with a CDB size or packet 14789 * size error is removed from the queue and failed 14790 * with EIO. Processing of the transport queues is 14791 * continued. 14792 * 14793 * Note: there is no interface for canceling a runout 14794 * callback. To prevent the driver from detaching or 14795 * suspending while a runout is pending the driver 14796 * state is set to SD_STATE_RWAIT 14797 * 14798 * Note: using the scsi_init_pkt callback facility can 14799 * result in an I/O request persisting at the head of 14800 * the list which cannot be satisfied even after 14801 * multiple retries. In the future the driver may 14802 * implement some kind of maximum runout count before 14803 * failing an I/O. 14804 * 14805 * Note: the use of funcp below may seem superfluous, 14806 * but it helps warlock figure out the correct 14807 * initpkt function calls (see [s]sd.wlcmd). 14808 */ 14809 struct scsi_pkt *pktp; 14810 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp); 14811 14812 ASSERT(bp != un->un_rqs_bp); 14813 14814 funcp = sd_initpkt_map[xp->xb_chain_iostart]; 14815 switch ((*funcp)(bp, &pktp)) { 14816 case SD_PKT_ALLOC_SUCCESS: 14817 xp->xb_pktp = pktp; 14818 SD_TRACE(SD_LOG_IO_CORE, un, 14819 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n", 14820 pktp); 14821 goto got_pkt; 14822 14823 case SD_PKT_ALLOC_FAILURE: 14824 /* 14825 * Temporary (hopefully) resource depletion. 14826 * Since retries and RQS commands always have a 14827 * scsi_pkt allocated, these cases should never 14828 * get here. So the only cases this needs to 14829 * handle is a bp from the waitq (which we put 14830 * back onto the waitq for sdrunout), or a bp 14831 * sent as an immed_bp (which we just fail). 14832 */ 14833 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14834 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n"); 14835 14836 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14837 14838 if (bp == immed_bp) { 14839 /* 14840 * If SD_XB_DMA_FREED is clear, then 14841 * this is a failure to allocate a 14842 * scsi_pkt, and we must fail the 14843 * command. 14844 */ 14845 if ((xp->xb_pkt_flags & 14846 SD_XB_DMA_FREED) == 0) { 14847 break; 14848 } 14849 14850 /* 14851 * If this immediate command is NOT our 14852 * un_retry_bp, then we must fail it. 14853 */ 14854 if (bp != un->un_retry_bp) { 14855 break; 14856 } 14857 14858 /* 14859 * We get here if this cmd is our 14860 * un_retry_bp that was DMAFREED, but 14861 * scsi_init_pkt() failed to reallocate 14862 * DMA resources when we attempted to 14863 * retry it. This can happen when an 14864 * mpxio failover is in progress, but 14865 * we don't want to just fail the 14866 * command in this case. 14867 * 14868 * Use timeout(9F) to restart it after 14869 * a 100ms delay. We don't want to 14870 * let sdrunout() restart it, because 14871 * sdrunout() is just supposed to start 14872 * commands that are sitting on the 14873 * wait queue. The un_retry_bp stays 14874 * set until the command completes, but 14875 * sdrunout can be called many times 14876 * before that happens. Since sdrunout 14877 * cannot tell if the un_retry_bp is 14878 * already in the transport, it could 14879 * end up calling scsi_transport() for 14880 * the un_retry_bp multiple times. 14881 * 14882 * Also: don't schedule the callback 14883 * if some other callback is already 14884 * pending. 14885 */ 14886 if (un->un_retry_statp == NULL) { 14887 /* 14888 * restore the kstat pointer to 14889 * keep kstat counts coherent 14890 * when we do retry the command. 14891 */ 14892 un->un_retry_statp = 14893 saved_statp; 14894 } 14895 14896 if ((un->un_startstop_timeid == NULL) && 14897 (un->un_retry_timeid == NULL) && 14898 (un->un_direct_priority_timeid == 14899 NULL)) { 14900 14901 un->un_retry_timeid = 14902 timeout( 14903 sd_start_retry_command, 14904 un, SD_RESTART_TIMEOUT); 14905 } 14906 goto exit; 14907 } 14908 14909 #else 14910 if (bp == immed_bp) { 14911 break; /* Just fail the command */ 14912 } 14913 #endif 14914 14915 /* Add the buf back to the head of the waitq */ 14916 bp->av_forw = un->un_waitq_headp; 14917 un->un_waitq_headp = bp; 14918 if (un->un_waitq_tailp == NULL) { 14919 un->un_waitq_tailp = bp; 14920 } 14921 goto exit; 14922 14923 case SD_PKT_ALLOC_FAILURE_NO_DMA: 14924 /* 14925 * HBA DMA resource failure. Fail the command 14926 * and continue processing of the queues. 14927 */ 14928 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14929 "sd_start_cmds: " 14930 "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); 14931 break; 14932 14933 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: 14934 /* 14935 * Note:x86: Partial DMA mapping not supported 14936 * for USCSI commands, and all the needed DMA 14937 * resources were not allocated. 14938 */ 14939 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14940 "sd_start_cmds: " 14941 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); 14942 break; 14943 14944 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: 14945 /* 14946 * Note:x86: Request cannot fit into CDB based 14947 * on lba and len. 14948 */ 14949 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14950 "sd_start_cmds: " 14951 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); 14952 break; 14953 14954 default: 14955 /* Should NEVER get here! */ 14956 panic("scsi_initpkt error"); 14957 /*NOTREACHED*/ 14958 } 14959 14960 /* 14961 * Fatal error in allocating a scsi_pkt for this buf. 14962 * Update kstats & return the buf with an error code. 14963 * We must use sd_return_failed_command_no_restart() to 14964 * avoid a recursive call back into sd_start_cmds(). 14965 * However this also means that we must keep processing 14966 * the waitq here in order to avoid stalling. 14967 */ 14968 if (statp == kstat_waitq_to_runq) { 14969 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 14970 } 14971 sd_return_failed_command_no_restart(un, bp, EIO); 14972 if (bp == immed_bp) { 14973 /* immed_bp is gone by now, so clear this */ 14974 immed_bp = NULL; 14975 } 14976 continue; 14977 } 14978 got_pkt: 14979 if (bp == immed_bp) { 14980 /* goto the head of the class.... */ 14981 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 14982 } 14983 14984 un->un_ncmds_in_transport++; 14985 SD_UPDATE_KSTATS(un, statp, bp); 14986 14987 /* 14988 * Call scsi_transport() to send the command to the target. 14989 * According to SCSA architecture, we must drop the mutex here 14990 * before calling scsi_transport() in order to avoid deadlock. 14991 * Note that the scsi_pkt's completion routine can be executed 14992 * (from interrupt context) even before the call to 14993 * scsi_transport() returns. 14994 */ 14995 SD_TRACE(SD_LOG_IO_CORE, un, 14996 "sd_start_cmds: calling scsi_transport()\n"); 14997 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp); 14998 14999 mutex_exit(SD_MUTEX(un)); 15000 rval = scsi_transport(xp->xb_pktp); 15001 mutex_enter(SD_MUTEX(un)); 15002 15003 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15004 "sd_start_cmds: scsi_transport() returned %d\n", rval); 15005 15006 switch (rval) { 15007 case TRAN_ACCEPT: 15008 /* Clear this with every pkt accepted by the HBA */ 15009 un->un_tran_fatal_count = 0; 15010 break; /* Success; try the next cmd (if any) */ 15011 15012 case TRAN_BUSY: 15013 un->un_ncmds_in_transport--; 15014 ASSERT(un->un_ncmds_in_transport >= 0); 15015 15016 /* 15017 * Don't retry request sense, the sense data 15018 * is lost when another request is sent. 15019 * Free up the rqs buf and retry 15020 * the original failed cmd. Update kstat. 15021 */ 15022 if (bp == un->un_rqs_bp) { 15023 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 15024 bp = sd_mark_rqs_idle(un, xp); 15025 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15026 NULL, NULL, EIO, un->un_busy_timeout / 500, 15027 kstat_waitq_enter); 15028 goto exit; 15029 } 15030 15031 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 15032 /* 15033 * Free the DMA resources for the scsi_pkt. This will 15034 * allow mpxio to select another path the next time 15035 * we call scsi_transport() with this scsi_pkt. 15036 * See sdintr() for the rationalization behind this. 15037 */ 15038 if ((un->un_f_is_fibre == TRUE) && 15039 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 15040 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) { 15041 scsi_dmafree(xp->xb_pktp); 15042 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 15043 } 15044 #endif 15045 15046 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) { 15047 /* 15048 * Commands that are SD_PATH_DIRECT_PRIORITY 15049 * are for error recovery situations. These do 15050 * not use the normal command waitq, so if they 15051 * get a TRAN_BUSY we cannot put them back onto 15052 * the waitq for later retry. One possible 15053 * problem is that there could already be some 15054 * other command on un_retry_bp that is waiting 15055 * for this one to complete, so we would be 15056 * deadlocked if we put this command back onto 15057 * the waitq for later retry (since un_retry_bp 15058 * must complete before the driver gets back to 15059 * commands on the waitq). 15060 * 15061 * To avoid deadlock we must schedule a callback 15062 * that will restart this command after a set 15063 * interval. This should keep retrying for as 15064 * long as the underlying transport keeps 15065 * returning TRAN_BUSY (just like for other 15066 * commands). Use the same timeout interval as 15067 * for the ordinary TRAN_BUSY retry. 15068 */ 15069 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15070 "sd_start_cmds: scsi_transport() returned " 15071 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n"); 15072 15073 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 15074 un->un_direct_priority_timeid = 15075 timeout(sd_start_direct_priority_command, 15076 bp, un->un_busy_timeout / 500); 15077 15078 goto exit; 15079 } 15080 15081 /* 15082 * For TRAN_BUSY, we want to reduce the throttle value, 15083 * unless we are retrying a command. 15084 */ 15085 if (bp != un->un_retry_bp) { 15086 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); 15087 } 15088 15089 /* 15090 * Set up the bp to be tried again 10 ms later. 15091 * Note:x86: Is there a timeout value in the sd_lun 15092 * for this condition? 15093 */ 15094 sd_set_retry_bp(un, bp, un->un_busy_timeout / 500, 15095 kstat_runq_back_to_waitq); 15096 goto exit; 15097 15098 case TRAN_FATAL_ERROR: 15099 un->un_tran_fatal_count++; 15100 /* FALLTHRU */ 15101 15102 case TRAN_BADPKT: 15103 default: 15104 un->un_ncmds_in_transport--; 15105 ASSERT(un->un_ncmds_in_transport >= 0); 15106 15107 /* 15108 * If this is our REQUEST SENSE command with a 15109 * transport error, we must get back the pointers 15110 * to the original buf, and mark the REQUEST 15111 * SENSE command as "available". 15112 */ 15113 if (bp == un->un_rqs_bp) { 15114 bp = sd_mark_rqs_idle(un, xp); 15115 xp = SD_GET_XBUF(bp); 15116 } else { 15117 /* 15118 * Legacy behavior: do not update transport 15119 * error count for request sense commands. 15120 */ 15121 SD_UPDATE_ERRSTATS(un, sd_transerrs); 15122 } 15123 15124 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 15125 sd_print_transport_rejected_message(un, xp, rval); 15126 15127 /* 15128 * This command will be terminated by SD driver due 15129 * to a fatal transport error. We should post 15130 * ereport.io.scsi.cmd.disk.tran with driver-assessment 15131 * of "fail" for any command to indicate this 15132 * situation. 15133 */ 15134 if (xp->xb_ena > 0) { 15135 ASSERT(un->un_fm_private != NULL); 15136 sfip = un->un_fm_private; 15137 sfip->fm_ssc.ssc_flags |= SSC_FLAGS_TRAN_ABORT; 15138 sd_ssc_extract_info(&sfip->fm_ssc, un, 15139 xp->xb_pktp, bp, xp); 15140 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL); 15141 } 15142 15143 /* 15144 * We must use sd_return_failed_command_no_restart() to 15145 * avoid a recursive call back into sd_start_cmds(). 15146 * However this also means that we must keep processing 15147 * the waitq here in order to avoid stalling. 15148 */ 15149 sd_return_failed_command_no_restart(un, bp, EIO); 15150 15151 /* 15152 * Notify any threads waiting in sd_ddi_suspend() that 15153 * a command completion has occurred. 15154 */ 15155 if (un->un_state == SD_STATE_SUSPENDED) { 15156 cv_broadcast(&un->un_disk_busy_cv); 15157 } 15158 15159 if (bp == immed_bp) { 15160 /* immed_bp is gone by now, so clear this */ 15161 immed_bp = NULL; 15162 } 15163 break; 15164 } 15165 15166 } while (immed_bp == NULL); 15167 15168 exit: 15169 ASSERT(mutex_owned(SD_MUTEX(un))); 15170 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n"); 15171 } 15172 15173 15174 /* 15175 * Function: sd_return_command 15176 * 15177 * Description: Returns a command to its originator (with or without an 15178 * error). Also starts commands waiting to be transported 15179 * to the target. 15180 * 15181 * Context: May be called from interrupt, kernel, or timeout context 15182 */ 15183 15184 static void 15185 sd_return_command(struct sd_lun *un, struct buf *bp) 15186 { 15187 struct sd_xbuf *xp; 15188 struct scsi_pkt *pktp; 15189 struct sd_fm_internal *sfip; 15190 15191 ASSERT(bp != NULL); 15192 ASSERT(un != NULL); 15193 ASSERT(mutex_owned(SD_MUTEX(un))); 15194 ASSERT(bp != un->un_rqs_bp); 15195 xp = SD_GET_XBUF(bp); 15196 ASSERT(xp != NULL); 15197 15198 pktp = SD_GET_PKTP(bp); 15199 sfip = (struct sd_fm_internal *)un->un_fm_private; 15200 ASSERT(sfip != NULL); 15201 15202 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n"); 15203 15204 /* 15205 * Note: check for the "sdrestart failed" case. 15206 */ 15207 if ((un->un_partial_dma_supported == 1) && 15208 ((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) && 15209 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) && 15210 (xp->xb_pktp->pkt_resid == 0)) { 15211 15212 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) { 15213 /* 15214 * Successfully set up next portion of cmd 15215 * transfer, try sending it 15216 */ 15217 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 15218 NULL, NULL, 0, (clock_t)0, NULL); 15219 sd_start_cmds(un, NULL); 15220 return; /* Note:x86: need a return here? */ 15221 } 15222 } 15223 15224 /* 15225 * If this is the failfast bp, clear it from un_failfast_bp. This 15226 * can happen if upon being re-tried the failfast bp either 15227 * succeeded or encountered another error (possibly even a different 15228 * error than the one that precipitated the failfast state, but in 15229 * that case it would have had to exhaust retries as well). Regardless, 15230 * this should not occur whenever the instance is in the active 15231 * failfast state. 15232 */ 15233 if (bp == un->un_failfast_bp) { 15234 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 15235 un->un_failfast_bp = NULL; 15236 } 15237 15238 /* 15239 * Clear the failfast state upon successful completion of ANY cmd. 15240 */ 15241 if (bp->b_error == 0) { 15242 un->un_failfast_state = SD_FAILFAST_INACTIVE; 15243 /* 15244 * If this is a successful command, but used to be retried, 15245 * we will take it as a recovered command and post an 15246 * ereport with driver-assessment of "recovered". 15247 */ 15248 if (xp->xb_ena > 0) { 15249 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 15250 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RECOVERY); 15251 } 15252 } else { 15253 /* 15254 * If this is a failed non-USCSI command we will post an 15255 * ereport with driver-assessment set accordingly("fail" or 15256 * "fatal"). 15257 */ 15258 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 15259 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 15260 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL); 15261 } 15262 } 15263 15264 /* 15265 * This is used if the command was retried one or more times. Show that 15266 * we are done with it, and allow processing of the waitq to resume. 15267 */ 15268 if (bp == un->un_retry_bp) { 15269 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15270 "sd_return_command: un:0x%p: " 15271 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 15272 un->un_retry_bp = NULL; 15273 un->un_retry_statp = NULL; 15274 } 15275 15276 SD_UPDATE_RDWR_STATS(un, bp); 15277 SD_UPDATE_PARTITION_STATS(un, bp); 15278 15279 switch (un->un_state) { 15280 case SD_STATE_SUSPENDED: 15281 /* 15282 * Notify any threads waiting in sd_ddi_suspend() that 15283 * a command completion has occurred. 15284 */ 15285 cv_broadcast(&un->un_disk_busy_cv); 15286 break; 15287 default: 15288 sd_start_cmds(un, NULL); 15289 break; 15290 } 15291 15292 /* Return this command up the iodone chain to its originator. */ 15293 mutex_exit(SD_MUTEX(un)); 15294 15295 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 15296 xp->xb_pktp = NULL; 15297 15298 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 15299 15300 ASSERT(!mutex_owned(SD_MUTEX(un))); 15301 mutex_enter(SD_MUTEX(un)); 15302 15303 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n"); 15304 } 15305 15306 15307 /* 15308 * Function: sd_return_failed_command 15309 * 15310 * Description: Command completion when an error occurred. 15311 * 15312 * Context: May be called from interrupt context 15313 */ 15314 15315 static void 15316 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode) 15317 { 15318 ASSERT(bp != NULL); 15319 ASSERT(un != NULL); 15320 ASSERT(mutex_owned(SD_MUTEX(un))); 15321 15322 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15323 "sd_return_failed_command: entry\n"); 15324 15325 /* 15326 * b_resid could already be nonzero due to a partial data 15327 * transfer, so do not change it here. 15328 */ 15329 SD_BIOERROR(bp, errcode); 15330 15331 sd_return_command(un, bp); 15332 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15333 "sd_return_failed_command: exit\n"); 15334 } 15335 15336 15337 /* 15338 * Function: sd_return_failed_command_no_restart 15339 * 15340 * Description: Same as sd_return_failed_command, but ensures that no 15341 * call back into sd_start_cmds will be issued. 15342 * 15343 * Context: May be called from interrupt context 15344 */ 15345 15346 static void 15347 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp, 15348 int errcode) 15349 { 15350 struct sd_xbuf *xp; 15351 15352 ASSERT(bp != NULL); 15353 ASSERT(un != NULL); 15354 ASSERT(mutex_owned(SD_MUTEX(un))); 15355 xp = SD_GET_XBUF(bp); 15356 ASSERT(xp != NULL); 15357 ASSERT(errcode != 0); 15358 15359 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15360 "sd_return_failed_command_no_restart: entry\n"); 15361 15362 /* 15363 * b_resid could already be nonzero due to a partial data 15364 * transfer, so do not change it here. 15365 */ 15366 SD_BIOERROR(bp, errcode); 15367 15368 /* 15369 * If this is the failfast bp, clear it. This can happen if the 15370 * failfast bp encounterd a fatal error when we attempted to 15371 * re-try it (such as a scsi_transport(9F) failure). However 15372 * we should NOT be in an active failfast state if the failfast 15373 * bp is not NULL. 15374 */ 15375 if (bp == un->un_failfast_bp) { 15376 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 15377 un->un_failfast_bp = NULL; 15378 } 15379 15380 if (bp == un->un_retry_bp) { 15381 /* 15382 * This command was retried one or more times. Show that we are 15383 * done with it, and allow processing of the waitq to resume. 15384 */ 15385 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15386 "sd_return_failed_command_no_restart: " 15387 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 15388 un->un_retry_bp = NULL; 15389 un->un_retry_statp = NULL; 15390 } 15391 15392 SD_UPDATE_RDWR_STATS(un, bp); 15393 SD_UPDATE_PARTITION_STATS(un, bp); 15394 15395 mutex_exit(SD_MUTEX(un)); 15396 15397 if (xp->xb_pktp != NULL) { 15398 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 15399 xp->xb_pktp = NULL; 15400 } 15401 15402 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 15403 15404 mutex_enter(SD_MUTEX(un)); 15405 15406 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15407 "sd_return_failed_command_no_restart: exit\n"); 15408 } 15409 15410 15411 /* 15412 * Function: sd_retry_command 15413 * 15414 * Description: queue up a command for retry, or (optionally) fail it 15415 * if retry counts are exhausted. 15416 * 15417 * Arguments: un - Pointer to the sd_lun struct for the target. 15418 * 15419 * bp - Pointer to the buf for the command to be retried. 15420 * 15421 * retry_check_flag - Flag to see which (if any) of the retry 15422 * counts should be decremented/checked. If the indicated 15423 * retry count is exhausted, then the command will not be 15424 * retried; it will be failed instead. This should use a 15425 * value equal to one of the following: 15426 * 15427 * SD_RETRIES_NOCHECK 15428 * SD_RESD_RETRIES_STANDARD 15429 * SD_RETRIES_VICTIM 15430 * 15431 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE 15432 * if the check should be made to see of FLAG_ISOLATE is set 15433 * in the pkt. If FLAG_ISOLATE is set, then the command is 15434 * not retried, it is simply failed. 15435 * 15436 * user_funcp - Ptr to function to call before dispatching the 15437 * command. May be NULL if no action needs to be performed. 15438 * (Primarily intended for printing messages.) 15439 * 15440 * user_arg - Optional argument to be passed along to 15441 * the user_funcp call. 15442 * 15443 * failure_code - errno return code to set in the bp if the 15444 * command is going to be failed. 15445 * 15446 * retry_delay - Retry delay interval in (clock_t) units. May 15447 * be zero which indicates that the retry should be retried 15448 * immediately (ie, without an intervening delay). 15449 * 15450 * statp - Ptr to kstat function to be updated if the command 15451 * is queued for a delayed retry. May be NULL if no kstat 15452 * update is desired. 15453 * 15454 * Context: May be called from interrupt context. 15455 */ 15456 15457 static void 15458 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag, 15459 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int 15460 code), void *user_arg, int failure_code, clock_t retry_delay, 15461 void (*statp)(kstat_io_t *)) 15462 { 15463 struct sd_xbuf *xp; 15464 struct scsi_pkt *pktp; 15465 struct sd_fm_internal *sfip; 15466 15467 ASSERT(un != NULL); 15468 ASSERT(mutex_owned(SD_MUTEX(un))); 15469 ASSERT(bp != NULL); 15470 xp = SD_GET_XBUF(bp); 15471 ASSERT(xp != NULL); 15472 pktp = SD_GET_PKTP(bp); 15473 ASSERT(pktp != NULL); 15474 15475 sfip = (struct sd_fm_internal *)un->un_fm_private; 15476 ASSERT(sfip != NULL); 15477 15478 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15479 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp); 15480 15481 /* 15482 * If we are syncing or dumping, fail the command to avoid 15483 * recursively calling back into scsi_transport(). 15484 */ 15485 if (ddi_in_panic()) { 15486 goto fail_command_no_log; 15487 } 15488 15489 /* 15490 * We should never be be retrying a command with FLAG_DIAGNOSE set, so 15491 * log an error and fail the command. 15492 */ 15493 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 15494 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 15495 "ERROR, retrying FLAG_DIAGNOSE command.\n"); 15496 sd_dump_memory(un, SD_LOG_IO, "CDB", 15497 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15498 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 15499 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 15500 goto fail_command; 15501 } 15502 15503 /* 15504 * If we are suspended, then put the command onto head of the 15505 * wait queue since we don't want to start more commands, and 15506 * clear the un_retry_bp. Next time when we are resumed, will 15507 * handle the command in the wait queue. 15508 */ 15509 switch (un->un_state) { 15510 case SD_STATE_SUSPENDED: 15511 case SD_STATE_DUMPING: 15512 bp->av_forw = un->un_waitq_headp; 15513 un->un_waitq_headp = bp; 15514 if (un->un_waitq_tailp == NULL) { 15515 un->un_waitq_tailp = bp; 15516 } 15517 if (bp == un->un_retry_bp) { 15518 un->un_retry_bp = NULL; 15519 un->un_retry_statp = NULL; 15520 } 15521 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 15522 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: " 15523 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp); 15524 return; 15525 default: 15526 break; 15527 } 15528 15529 /* 15530 * If the caller wants us to check FLAG_ISOLATE, then see if that 15531 * is set; if it is then we do not want to retry the command. 15532 * Normally, FLAG_ISOLATE is only used with USCSI cmds. 15533 */ 15534 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) { 15535 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) { 15536 goto fail_command; 15537 } 15538 } 15539 15540 15541 /* 15542 * If SD_RETRIES_FAILFAST is set, it indicates that either a 15543 * command timeout or a selection timeout has occurred. This means 15544 * that we were unable to establish an kind of communication with 15545 * the target, and subsequent retries and/or commands are likely 15546 * to encounter similar results and take a long time to complete. 15547 * 15548 * If this is a failfast error condition, we need to update the 15549 * failfast state, even if this bp does not have B_FAILFAST set. 15550 */ 15551 if (retry_check_flag & SD_RETRIES_FAILFAST) { 15552 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) { 15553 ASSERT(un->un_failfast_bp == NULL); 15554 /* 15555 * If we are already in the active failfast state, and 15556 * another failfast error condition has been detected, 15557 * then fail this command if it has B_FAILFAST set. 15558 * If B_FAILFAST is clear, then maintain the legacy 15559 * behavior of retrying heroically, even tho this will 15560 * take a lot more time to fail the command. 15561 */ 15562 if (bp->b_flags & B_FAILFAST) { 15563 goto fail_command; 15564 } 15565 } else { 15566 /* 15567 * We're not in the active failfast state, but we 15568 * have a failfast error condition, so we must begin 15569 * transition to the next state. We do this regardless 15570 * of whether or not this bp has B_FAILFAST set. 15571 */ 15572 if (un->un_failfast_bp == NULL) { 15573 /* 15574 * This is the first bp to meet a failfast 15575 * condition so save it on un_failfast_bp & 15576 * do normal retry processing. Do not enter 15577 * active failfast state yet. This marks 15578 * entry into the "failfast pending" state. 15579 */ 15580 un->un_failfast_bp = bp; 15581 15582 } else if (un->un_failfast_bp == bp) { 15583 /* 15584 * This is the second time *this* bp has 15585 * encountered a failfast error condition, 15586 * so enter active failfast state & flush 15587 * queues as appropriate. 15588 */ 15589 un->un_failfast_state = SD_FAILFAST_ACTIVE; 15590 un->un_failfast_bp = NULL; 15591 sd_failfast_flushq(un); 15592 15593 /* 15594 * Fail this bp now if B_FAILFAST set; 15595 * otherwise continue with retries. (It would 15596 * be pretty ironic if this bp succeeded on a 15597 * subsequent retry after we just flushed all 15598 * the queues). 15599 */ 15600 if (bp->b_flags & B_FAILFAST) { 15601 goto fail_command; 15602 } 15603 15604 #if !defined(lint) && !defined(__lint) 15605 } else { 15606 /* 15607 * If neither of the preceeding conditionals 15608 * was true, it means that there is some 15609 * *other* bp that has met an inital failfast 15610 * condition and is currently either being 15611 * retried or is waiting to be retried. In 15612 * that case we should perform normal retry 15613 * processing on *this* bp, since there is a 15614 * chance that the current failfast condition 15615 * is transient and recoverable. If that does 15616 * not turn out to be the case, then retries 15617 * will be cleared when the wait queue is 15618 * flushed anyway. 15619 */ 15620 #endif 15621 } 15622 } 15623 } else { 15624 /* 15625 * SD_RETRIES_FAILFAST is clear, which indicates that we 15626 * likely were able to at least establish some level of 15627 * communication with the target and subsequent commands 15628 * and/or retries are likely to get through to the target, 15629 * In this case we want to be aggressive about clearing 15630 * the failfast state. Note that this does not affect 15631 * the "failfast pending" condition. 15632 */ 15633 un->un_failfast_state = SD_FAILFAST_INACTIVE; 15634 } 15635 15636 15637 /* 15638 * Check the specified retry count to see if we can still do 15639 * any retries with this pkt before we should fail it. 15640 */ 15641 switch (retry_check_flag & SD_RETRIES_MASK) { 15642 case SD_RETRIES_VICTIM: 15643 /* 15644 * Check the victim retry count. If exhausted, then fall 15645 * thru & check against the standard retry count. 15646 */ 15647 if (xp->xb_victim_retry_count < un->un_victim_retry_count) { 15648 /* Increment count & proceed with the retry */ 15649 xp->xb_victim_retry_count++; 15650 break; 15651 } 15652 /* Victim retries exhausted, fall back to std. retries... */ 15653 /* FALLTHRU */ 15654 15655 case SD_RETRIES_STANDARD: 15656 if (xp->xb_retry_count >= un->un_retry_count) { 15657 /* Retries exhausted, fail the command */ 15658 SD_TRACE(SD_LOG_IO_CORE, un, 15659 "sd_retry_command: retries exhausted!\n"); 15660 /* 15661 * update b_resid for failed SCMD_READ & SCMD_WRITE 15662 * commands with nonzero pkt_resid. 15663 */ 15664 if ((pktp->pkt_reason == CMD_CMPLT) && 15665 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) && 15666 (pktp->pkt_resid != 0)) { 15667 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F; 15668 if ((op == SCMD_READ) || (op == SCMD_WRITE)) { 15669 SD_UPDATE_B_RESID(bp, pktp); 15670 } 15671 } 15672 goto fail_command; 15673 } 15674 xp->xb_retry_count++; 15675 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15676 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 15677 break; 15678 15679 case SD_RETRIES_UA: 15680 if (xp->xb_ua_retry_count >= sd_ua_retry_count) { 15681 /* Retries exhausted, fail the command */ 15682 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15683 "Unit Attention retries exhausted. " 15684 "Check the target.\n"); 15685 goto fail_command; 15686 } 15687 xp->xb_ua_retry_count++; 15688 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15689 "sd_retry_command: retry count:%d\n", 15690 xp->xb_ua_retry_count); 15691 break; 15692 15693 case SD_RETRIES_BUSY: 15694 if (xp->xb_retry_count >= un->un_busy_retry_count) { 15695 /* Retries exhausted, fail the command */ 15696 SD_TRACE(SD_LOG_IO_CORE, un, 15697 "sd_retry_command: retries exhausted!\n"); 15698 goto fail_command; 15699 } 15700 xp->xb_retry_count++; 15701 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15702 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 15703 break; 15704 15705 case SD_RETRIES_NOCHECK: 15706 default: 15707 /* No retry count to check. Just proceed with the retry */ 15708 break; 15709 } 15710 15711 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 15712 15713 /* 15714 * If this is a non-USCSI command being retried 15715 * during execution last time, we should post an ereport with 15716 * driver-assessment of the value "retry". 15717 * For partial DMA, request sense and STATUS_QFULL, there are no 15718 * hardware errors, we bypass ereport posting. 15719 */ 15720 if (failure_code != 0) { 15721 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 15722 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 15723 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RETRY); 15724 } 15725 } 15726 15727 /* 15728 * If we were given a zero timeout, we must attempt to retry the 15729 * command immediately (ie, without a delay). 15730 */ 15731 if (retry_delay == 0) { 15732 /* 15733 * Check some limiting conditions to see if we can actually 15734 * do the immediate retry. If we cannot, then we must 15735 * fall back to queueing up a delayed retry. 15736 */ 15737 if (un->un_ncmds_in_transport >= un->un_throttle) { 15738 /* 15739 * We are at the throttle limit for the target, 15740 * fall back to delayed retry. 15741 */ 15742 retry_delay = un->un_busy_timeout; 15743 statp = kstat_waitq_enter; 15744 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15745 "sd_retry_command: immed. retry hit " 15746 "throttle!\n"); 15747 } else { 15748 /* 15749 * We're clear to proceed with the immediate retry. 15750 * First call the user-provided function (if any) 15751 */ 15752 if (user_funcp != NULL) { 15753 (*user_funcp)(un, bp, user_arg, 15754 SD_IMMEDIATE_RETRY_ISSUED); 15755 #ifdef __lock_lint 15756 sd_print_incomplete_msg(un, bp, user_arg, 15757 SD_IMMEDIATE_RETRY_ISSUED); 15758 sd_print_cmd_incomplete_msg(un, bp, user_arg, 15759 SD_IMMEDIATE_RETRY_ISSUED); 15760 sd_print_sense_failed_msg(un, bp, user_arg, 15761 SD_IMMEDIATE_RETRY_ISSUED); 15762 #endif 15763 } 15764 15765 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15766 "sd_retry_command: issuing immediate retry\n"); 15767 15768 /* 15769 * Call sd_start_cmds() to transport the command to 15770 * the target. 15771 */ 15772 sd_start_cmds(un, bp); 15773 15774 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15775 "sd_retry_command exit\n"); 15776 return; 15777 } 15778 } 15779 15780 /* 15781 * Set up to retry the command after a delay. 15782 * First call the user-provided function (if any) 15783 */ 15784 if (user_funcp != NULL) { 15785 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED); 15786 } 15787 15788 sd_set_retry_bp(un, bp, retry_delay, statp); 15789 15790 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15791 return; 15792 15793 fail_command: 15794 15795 if (user_funcp != NULL) { 15796 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED); 15797 } 15798 15799 fail_command_no_log: 15800 15801 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15802 "sd_retry_command: returning failed command\n"); 15803 15804 sd_return_failed_command(un, bp, failure_code); 15805 15806 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15807 } 15808 15809 15810 /* 15811 * Function: sd_set_retry_bp 15812 * 15813 * Description: Set up the given bp for retry. 15814 * 15815 * Arguments: un - ptr to associated softstate 15816 * bp - ptr to buf(9S) for the command 15817 * retry_delay - time interval before issuing retry (may be 0) 15818 * statp - optional pointer to kstat function 15819 * 15820 * Context: May be called under interrupt context 15821 */ 15822 15823 static void 15824 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, 15825 void (*statp)(kstat_io_t *)) 15826 { 15827 ASSERT(un != NULL); 15828 ASSERT(mutex_owned(SD_MUTEX(un))); 15829 ASSERT(bp != NULL); 15830 15831 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15832 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp); 15833 15834 /* 15835 * Indicate that the command is being retried. This will not allow any 15836 * other commands on the wait queue to be transported to the target 15837 * until this command has been completed (success or failure). The 15838 * "retry command" is not transported to the target until the given 15839 * time delay expires, unless the user specified a 0 retry_delay. 15840 * 15841 * Note: the timeout(9F) callback routine is what actually calls 15842 * sd_start_cmds() to transport the command, with the exception of a 15843 * zero retry_delay. The only current implementor of a zero retry delay 15844 * is the case where a START_STOP_UNIT is sent to spin-up a device. 15845 */ 15846 if (un->un_retry_bp == NULL) { 15847 ASSERT(un->un_retry_statp == NULL); 15848 un->un_retry_bp = bp; 15849 15850 /* 15851 * If the user has not specified a delay the command should 15852 * be queued and no timeout should be scheduled. 15853 */ 15854 if (retry_delay == 0) { 15855 /* 15856 * Save the kstat pointer that will be used in the 15857 * call to SD_UPDATE_KSTATS() below, so that 15858 * sd_start_cmds() can correctly decrement the waitq 15859 * count when it is time to transport this command. 15860 */ 15861 un->un_retry_statp = statp; 15862 goto done; 15863 } 15864 } 15865 15866 if (un->un_retry_bp == bp) { 15867 /* 15868 * Save the kstat pointer that will be used in the call to 15869 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can 15870 * correctly decrement the waitq count when it is time to 15871 * transport this command. 15872 */ 15873 un->un_retry_statp = statp; 15874 15875 /* 15876 * Schedule a timeout if: 15877 * 1) The user has specified a delay. 15878 * 2) There is not a START_STOP_UNIT callback pending. 15879 * 15880 * If no delay has been specified, then it is up to the caller 15881 * to ensure that IO processing continues without stalling. 15882 * Effectively, this means that the caller will issue the 15883 * required call to sd_start_cmds(). The START_STOP_UNIT 15884 * callback does this after the START STOP UNIT command has 15885 * completed. In either of these cases we should not schedule 15886 * a timeout callback here. Also don't schedule the timeout if 15887 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart. 15888 */ 15889 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) && 15890 (un->un_direct_priority_timeid == NULL)) { 15891 un->un_retry_timeid = 15892 timeout(sd_start_retry_command, un, retry_delay); 15893 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15894 "sd_set_retry_bp: setting timeout: un: 0x%p" 15895 " bp:0x%p un_retry_timeid:0x%p\n", 15896 un, bp, un->un_retry_timeid); 15897 } 15898 } else { 15899 /* 15900 * We only get in here if there is already another command 15901 * waiting to be retried. In this case, we just put the 15902 * given command onto the wait queue, so it can be transported 15903 * after the current retry command has completed. 15904 * 15905 * Also we have to make sure that if the command at the head 15906 * of the wait queue is the un_failfast_bp, that we do not 15907 * put ahead of it any other commands that are to be retried. 15908 */ 15909 if ((un->un_failfast_bp != NULL) && 15910 (un->un_failfast_bp == un->un_waitq_headp)) { 15911 /* 15912 * Enqueue this command AFTER the first command on 15913 * the wait queue (which is also un_failfast_bp). 15914 */ 15915 bp->av_forw = un->un_waitq_headp->av_forw; 15916 un->un_waitq_headp->av_forw = bp; 15917 if (un->un_waitq_headp == un->un_waitq_tailp) { 15918 un->un_waitq_tailp = bp; 15919 } 15920 } else { 15921 /* Enqueue this command at the head of the waitq. */ 15922 bp->av_forw = un->un_waitq_headp; 15923 un->un_waitq_headp = bp; 15924 if (un->un_waitq_tailp == NULL) { 15925 un->un_waitq_tailp = bp; 15926 } 15927 } 15928 15929 if (statp == NULL) { 15930 statp = kstat_waitq_enter; 15931 } 15932 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15933 "sd_set_retry_bp: un:0x%p already delayed retry\n", un); 15934 } 15935 15936 done: 15937 if (statp != NULL) { 15938 SD_UPDATE_KSTATS(un, statp, bp); 15939 } 15940 15941 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15942 "sd_set_retry_bp: exit un:0x%p\n", un); 15943 } 15944 15945 15946 /* 15947 * Function: sd_start_retry_command 15948 * 15949 * Description: Start the command that has been waiting on the target's 15950 * retry queue. Called from timeout(9F) context after the 15951 * retry delay interval has expired. 15952 * 15953 * Arguments: arg - pointer to associated softstate for the device. 15954 * 15955 * Context: timeout(9F) thread context. May not sleep. 15956 */ 15957 15958 static void 15959 sd_start_retry_command(void *arg) 15960 { 15961 struct sd_lun *un = arg; 15962 15963 ASSERT(un != NULL); 15964 ASSERT(!mutex_owned(SD_MUTEX(un))); 15965 15966 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15967 "sd_start_retry_command: entry\n"); 15968 15969 mutex_enter(SD_MUTEX(un)); 15970 15971 un->un_retry_timeid = NULL; 15972 15973 if (un->un_retry_bp != NULL) { 15974 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15975 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n", 15976 un, un->un_retry_bp); 15977 sd_start_cmds(un, un->un_retry_bp); 15978 } 15979 15980 mutex_exit(SD_MUTEX(un)); 15981 15982 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15983 "sd_start_retry_command: exit\n"); 15984 } 15985 15986 /* 15987 * Function: sd_rmw_msg_print_handler 15988 * 15989 * Description: If RMW mode is enabled and warning message is triggered 15990 * print I/O count during a fixed interval. 15991 * 15992 * Arguments: arg - pointer to associated softstate for the device. 15993 * 15994 * Context: timeout(9F) thread context. May not sleep. 15995 */ 15996 static void 15997 sd_rmw_msg_print_handler(void *arg) 15998 { 15999 struct sd_lun *un = arg; 16000 16001 ASSERT(un != NULL); 16002 ASSERT(!mutex_owned(SD_MUTEX(un))); 16003 16004 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16005 "sd_rmw_msg_print_handler: entry\n"); 16006 16007 mutex_enter(SD_MUTEX(un)); 16008 16009 if (un->un_rmw_incre_count > 0) { 16010 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16011 "%"PRIu64" I/O requests are not aligned with %d disk " 16012 "sector size in %ld seconds. They are handled through " 16013 "Read Modify Write but the performance is very low!\n", 16014 un->un_rmw_incre_count, un->un_tgt_blocksize, 16015 drv_hztousec(SD_RMW_MSG_PRINT_TIMEOUT) / 1000000); 16016 un->un_rmw_incre_count = 0; 16017 un->un_rmw_msg_timeid = timeout(sd_rmw_msg_print_handler, 16018 un, SD_RMW_MSG_PRINT_TIMEOUT); 16019 } else { 16020 un->un_rmw_msg_timeid = NULL; 16021 } 16022 16023 mutex_exit(SD_MUTEX(un)); 16024 16025 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16026 "sd_rmw_msg_print_handler: exit\n"); 16027 } 16028 16029 /* 16030 * Function: sd_start_direct_priority_command 16031 * 16032 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had 16033 * received TRAN_BUSY when we called scsi_transport() to send it 16034 * to the underlying HBA. This function is called from timeout(9F) 16035 * context after the delay interval has expired. 16036 * 16037 * Arguments: arg - pointer to associated buf(9S) to be restarted. 16038 * 16039 * Context: timeout(9F) thread context. May not sleep. 16040 */ 16041 16042 static void 16043 sd_start_direct_priority_command(void *arg) 16044 { 16045 struct buf *priority_bp = arg; 16046 struct sd_lun *un; 16047 16048 ASSERT(priority_bp != NULL); 16049 un = SD_GET_UN(priority_bp); 16050 ASSERT(un != NULL); 16051 ASSERT(!mutex_owned(SD_MUTEX(un))); 16052 16053 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16054 "sd_start_direct_priority_command: entry\n"); 16055 16056 mutex_enter(SD_MUTEX(un)); 16057 un->un_direct_priority_timeid = NULL; 16058 sd_start_cmds(un, priority_bp); 16059 mutex_exit(SD_MUTEX(un)); 16060 16061 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16062 "sd_start_direct_priority_command: exit\n"); 16063 } 16064 16065 16066 /* 16067 * Function: sd_send_request_sense_command 16068 * 16069 * Description: Sends a REQUEST SENSE command to the target 16070 * 16071 * Context: May be called from interrupt context. 16072 */ 16073 16074 static void 16075 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 16076 struct scsi_pkt *pktp) 16077 { 16078 ASSERT(bp != NULL); 16079 ASSERT(un != NULL); 16080 ASSERT(mutex_owned(SD_MUTEX(un))); 16081 16082 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: " 16083 "entry: buf:0x%p\n", bp); 16084 16085 /* 16086 * If we are syncing or dumping, then fail the command to avoid a 16087 * recursive callback into scsi_transport(). Also fail the command 16088 * if we are suspended (legacy behavior). 16089 */ 16090 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 16091 (un->un_state == SD_STATE_DUMPING)) { 16092 sd_return_failed_command(un, bp, EIO); 16093 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16094 "sd_send_request_sense_command: syncing/dumping, exit\n"); 16095 return; 16096 } 16097 16098 /* 16099 * Retry the failed command and don't issue the request sense if: 16100 * 1) the sense buf is busy 16101 * 2) we have 1 or more outstanding commands on the target 16102 * (the sense data will be cleared or invalidated any way) 16103 * 16104 * Note: There could be an issue with not checking a retry limit here, 16105 * the problem is determining which retry limit to check. 16106 */ 16107 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { 16108 /* Don't retry if the command is flagged as non-retryable */ 16109 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 16110 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 16111 NULL, NULL, 0, un->un_busy_timeout, 16112 kstat_waitq_enter); 16113 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16114 "sd_send_request_sense_command: " 16115 "at full throttle, retrying exit\n"); 16116 } else { 16117 sd_return_failed_command(un, bp, EIO); 16118 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16119 "sd_send_request_sense_command: " 16120 "at full throttle, non-retryable exit\n"); 16121 } 16122 return; 16123 } 16124 16125 sd_mark_rqs_busy(un, bp); 16126 sd_start_cmds(un, un->un_rqs_bp); 16127 16128 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16129 "sd_send_request_sense_command: exit\n"); 16130 } 16131 16132 16133 /* 16134 * Function: sd_mark_rqs_busy 16135 * 16136 * Description: Indicate that the request sense bp for this instance is 16137 * in use. 16138 * 16139 * Context: May be called under interrupt context 16140 */ 16141 16142 static void 16143 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp) 16144 { 16145 struct sd_xbuf *sense_xp; 16146 16147 ASSERT(un != NULL); 16148 ASSERT(bp != NULL); 16149 ASSERT(mutex_owned(SD_MUTEX(un))); 16150 ASSERT(un->un_sense_isbusy == 0); 16151 16152 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: " 16153 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un); 16154 16155 sense_xp = SD_GET_XBUF(un->un_rqs_bp); 16156 ASSERT(sense_xp != NULL); 16157 16158 SD_INFO(SD_LOG_IO, un, 16159 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp); 16160 16161 ASSERT(sense_xp->xb_pktp != NULL); 16162 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) 16163 == (FLAG_SENSING | FLAG_HEAD)); 16164 16165 un->un_sense_isbusy = 1; 16166 un->un_rqs_bp->b_resid = 0; 16167 sense_xp->xb_pktp->pkt_resid = 0; 16168 sense_xp->xb_pktp->pkt_reason = 0; 16169 16170 /* So we can get back the bp at interrupt time! */ 16171 sense_xp->xb_sense_bp = bp; 16172 16173 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH); 16174 16175 /* 16176 * Mark this buf as awaiting sense data. (This is already set in 16177 * the pkt_flags for the RQS packet.) 16178 */ 16179 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING; 16180 16181 /* Request sense down same path */ 16182 if (scsi_pkt_allocated_correctly((SD_GET_XBUF(bp))->xb_pktp) && 16183 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance) 16184 sense_xp->xb_pktp->pkt_path_instance = 16185 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance; 16186 16187 sense_xp->xb_retry_count = 0; 16188 sense_xp->xb_victim_retry_count = 0; 16189 sense_xp->xb_ua_retry_count = 0; 16190 sense_xp->xb_nr_retry_count = 0; 16191 sense_xp->xb_dma_resid = 0; 16192 16193 /* Clean up the fields for auto-request sense */ 16194 sense_xp->xb_sense_status = 0; 16195 sense_xp->xb_sense_state = 0; 16196 sense_xp->xb_sense_resid = 0; 16197 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data)); 16198 16199 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n"); 16200 } 16201 16202 16203 /* 16204 * Function: sd_mark_rqs_idle 16205 * 16206 * Description: SD_MUTEX must be held continuously through this routine 16207 * to prevent reuse of the rqs struct before the caller can 16208 * complete it's processing. 16209 * 16210 * Return Code: Pointer to the RQS buf 16211 * 16212 * Context: May be called under interrupt context 16213 */ 16214 16215 static struct buf * 16216 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp) 16217 { 16218 struct buf *bp; 16219 ASSERT(un != NULL); 16220 ASSERT(sense_xp != NULL); 16221 ASSERT(mutex_owned(SD_MUTEX(un))); 16222 ASSERT(un->un_sense_isbusy != 0); 16223 16224 un->un_sense_isbusy = 0; 16225 bp = sense_xp->xb_sense_bp; 16226 sense_xp->xb_sense_bp = NULL; 16227 16228 /* This pkt is no longer interested in getting sense data */ 16229 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING; 16230 16231 return (bp); 16232 } 16233 16234 16235 16236 /* 16237 * Function: sd_alloc_rqs 16238 * 16239 * Description: Set up the unit to receive auto request sense data 16240 * 16241 * Return Code: DDI_SUCCESS or DDI_FAILURE 16242 * 16243 * Context: Called under attach(9E) context 16244 */ 16245 16246 static int 16247 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un) 16248 { 16249 struct sd_xbuf *xp; 16250 16251 ASSERT(un != NULL); 16252 ASSERT(!mutex_owned(SD_MUTEX(un))); 16253 ASSERT(un->un_rqs_bp == NULL); 16254 ASSERT(un->un_rqs_pktp == NULL); 16255 16256 /* 16257 * First allocate the required buf and scsi_pkt structs, then set up 16258 * the CDB in the scsi_pkt for a REQUEST SENSE command. 16259 */ 16260 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL, 16261 MAX_SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL); 16262 if (un->un_rqs_bp == NULL) { 16263 return (DDI_FAILURE); 16264 } 16265 16266 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp, 16267 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL); 16268 16269 if (un->un_rqs_pktp == NULL) { 16270 sd_free_rqs(un); 16271 return (DDI_FAILURE); 16272 } 16273 16274 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */ 16275 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp, 16276 SCMD_REQUEST_SENSE, 0, MAX_SENSE_LENGTH, 0); 16277 16278 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp); 16279 16280 /* Set up the other needed members in the ARQ scsi_pkt. */ 16281 un->un_rqs_pktp->pkt_comp = sdintr; 16282 un->un_rqs_pktp->pkt_time = sd_io_time; 16283 un->un_rqs_pktp->pkt_flags |= 16284 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */ 16285 16286 /* 16287 * Allocate & init the sd_xbuf struct for the RQS command. Do not 16288 * provide any intpkt, destroypkt routines as we take care of 16289 * scsi_pkt allocation/freeing here and in sd_free_rqs(). 16290 */ 16291 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 16292 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL); 16293 xp->xb_pktp = un->un_rqs_pktp; 16294 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16295 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n", 16296 un, xp, un->un_rqs_pktp, un->un_rqs_bp); 16297 16298 /* 16299 * Save the pointer to the request sense private bp so it can 16300 * be retrieved in sdintr. 16301 */ 16302 un->un_rqs_pktp->pkt_private = un->un_rqs_bp; 16303 ASSERT(un->un_rqs_bp->b_private == xp); 16304 16305 /* 16306 * See if the HBA supports auto-request sense for the specified 16307 * target/lun. If it does, then try to enable it (if not already 16308 * enabled). 16309 * 16310 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return 16311 * failure, while for other HBAs (pln) scsi_ifsetcap will always 16312 * return success. However, in both of these cases ARQ is always 16313 * enabled and scsi_ifgetcap will always return true. The best approach 16314 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). 16315 * 16316 * The 3rd case is the HBA (adp) always return enabled on 16317 * scsi_ifgetgetcap even when it's not enable, the best approach 16318 * is issue a scsi_ifsetcap then a scsi_ifgetcap 16319 * Note: this case is to circumvent the Adaptec bug. (x86 only) 16320 */ 16321 16322 if (un->un_f_is_fibre == TRUE) { 16323 un->un_f_arq_enabled = TRUE; 16324 } else { 16325 #if defined(__i386) || defined(__amd64) 16326 /* 16327 * Circumvent the Adaptec bug, remove this code when 16328 * the bug is fixed 16329 */ 16330 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); 16331 #endif 16332 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { 16333 case 0: 16334 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16335 "sd_alloc_rqs: HBA supports ARQ\n"); 16336 /* 16337 * ARQ is supported by this HBA but currently is not 16338 * enabled. Attempt to enable it and if successful then 16339 * mark this instance as ARQ enabled. 16340 */ 16341 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1) 16342 == 1) { 16343 /* Successfully enabled ARQ in the HBA */ 16344 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16345 "sd_alloc_rqs: ARQ enabled\n"); 16346 un->un_f_arq_enabled = TRUE; 16347 } else { 16348 /* Could not enable ARQ in the HBA */ 16349 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16350 "sd_alloc_rqs: failed ARQ enable\n"); 16351 un->un_f_arq_enabled = FALSE; 16352 } 16353 break; 16354 case 1: 16355 /* 16356 * ARQ is supported by this HBA and is already enabled. 16357 * Just mark ARQ as enabled for this instance. 16358 */ 16359 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16360 "sd_alloc_rqs: ARQ already enabled\n"); 16361 un->un_f_arq_enabled = TRUE; 16362 break; 16363 default: 16364 /* 16365 * ARQ is not supported by this HBA; disable it for this 16366 * instance. 16367 */ 16368 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16369 "sd_alloc_rqs: HBA does not support ARQ\n"); 16370 un->un_f_arq_enabled = FALSE; 16371 break; 16372 } 16373 } 16374 16375 return (DDI_SUCCESS); 16376 } 16377 16378 16379 /* 16380 * Function: sd_free_rqs 16381 * 16382 * Description: Cleanup for the pre-instance RQS command. 16383 * 16384 * Context: Kernel thread context 16385 */ 16386 16387 static void 16388 sd_free_rqs(struct sd_lun *un) 16389 { 16390 ASSERT(un != NULL); 16391 16392 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n"); 16393 16394 /* 16395 * If consistent memory is bound to a scsi_pkt, the pkt 16396 * has to be destroyed *before* freeing the consistent memory. 16397 * Don't change the sequence of this operations. 16398 * scsi_destroy_pkt() might access memory, which isn't allowed, 16399 * after it was freed in scsi_free_consistent_buf(). 16400 */ 16401 if (un->un_rqs_pktp != NULL) { 16402 scsi_destroy_pkt(un->un_rqs_pktp); 16403 un->un_rqs_pktp = NULL; 16404 } 16405 16406 if (un->un_rqs_bp != NULL) { 16407 struct sd_xbuf *xp = SD_GET_XBUF(un->un_rqs_bp); 16408 if (xp != NULL) { 16409 kmem_free(xp, sizeof (struct sd_xbuf)); 16410 } 16411 scsi_free_consistent_buf(un->un_rqs_bp); 16412 un->un_rqs_bp = NULL; 16413 } 16414 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n"); 16415 } 16416 16417 16418 16419 /* 16420 * Function: sd_reduce_throttle 16421 * 16422 * Description: Reduces the maximum # of outstanding commands on a 16423 * target to the current number of outstanding commands. 16424 * Queues a tiemout(9F) callback to restore the limit 16425 * after a specified interval has elapsed. 16426 * Typically used when we get a TRAN_BUSY return code 16427 * back from scsi_transport(). 16428 * 16429 * Arguments: un - ptr to the sd_lun softstate struct 16430 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL 16431 * 16432 * Context: May be called from interrupt context 16433 */ 16434 16435 static void 16436 sd_reduce_throttle(struct sd_lun *un, int throttle_type) 16437 { 16438 ASSERT(un != NULL); 16439 ASSERT(mutex_owned(SD_MUTEX(un))); 16440 ASSERT(un->un_ncmds_in_transport >= 0); 16441 16442 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 16443 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n", 16444 un, un->un_throttle, un->un_ncmds_in_transport); 16445 16446 if (un->un_throttle > 1) { 16447 if (un->un_f_use_adaptive_throttle == TRUE) { 16448 switch (throttle_type) { 16449 case SD_THROTTLE_TRAN_BUSY: 16450 if (un->un_busy_throttle == 0) { 16451 un->un_busy_throttle = un->un_throttle; 16452 } 16453 break; 16454 case SD_THROTTLE_QFULL: 16455 un->un_busy_throttle = 0; 16456 break; 16457 default: 16458 ASSERT(FALSE); 16459 } 16460 16461 if (un->un_ncmds_in_transport > 0) { 16462 un->un_throttle = un->un_ncmds_in_transport; 16463 } 16464 16465 } else { 16466 if (un->un_ncmds_in_transport == 0) { 16467 un->un_throttle = 1; 16468 } else { 16469 un->un_throttle = un->un_ncmds_in_transport; 16470 } 16471 } 16472 } 16473 16474 /* Reschedule the timeout if none is currently active */ 16475 if (un->un_reset_throttle_timeid == NULL) { 16476 un->un_reset_throttle_timeid = timeout(sd_restore_throttle, 16477 un, SD_THROTTLE_RESET_INTERVAL); 16478 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16479 "sd_reduce_throttle: timeout scheduled!\n"); 16480 } 16481 16482 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 16483 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle); 16484 } 16485 16486 16487 16488 /* 16489 * Function: sd_restore_throttle 16490 * 16491 * Description: Callback function for timeout(9F). Resets the current 16492 * value of un->un_throttle to its default. 16493 * 16494 * Arguments: arg - pointer to associated softstate for the device. 16495 * 16496 * Context: May be called from interrupt context 16497 */ 16498 16499 static void 16500 sd_restore_throttle(void *arg) 16501 { 16502 struct sd_lun *un = arg; 16503 16504 ASSERT(un != NULL); 16505 ASSERT(!mutex_owned(SD_MUTEX(un))); 16506 16507 mutex_enter(SD_MUTEX(un)); 16508 16509 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 16510 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle); 16511 16512 un->un_reset_throttle_timeid = NULL; 16513 16514 if (un->un_f_use_adaptive_throttle == TRUE) { 16515 /* 16516 * If un_busy_throttle is nonzero, then it contains the 16517 * value that un_throttle was when we got a TRAN_BUSY back 16518 * from scsi_transport(). We want to revert back to this 16519 * value. 16520 * 16521 * In the QFULL case, the throttle limit will incrementally 16522 * increase until it reaches max throttle. 16523 */ 16524 if (un->un_busy_throttle > 0) { 16525 un->un_throttle = un->un_busy_throttle; 16526 un->un_busy_throttle = 0; 16527 } else { 16528 /* 16529 * increase throttle by 10% open gate slowly, schedule 16530 * another restore if saved throttle has not been 16531 * reached 16532 */ 16533 short throttle; 16534 if (sd_qfull_throttle_enable) { 16535 throttle = un->un_throttle + 16536 max((un->un_throttle / 10), 1); 16537 un->un_throttle = 16538 (throttle < un->un_saved_throttle) ? 16539 throttle : un->un_saved_throttle; 16540 if (un->un_throttle < un->un_saved_throttle) { 16541 un->un_reset_throttle_timeid = 16542 timeout(sd_restore_throttle, 16543 un, 16544 SD_QFULL_THROTTLE_RESET_INTERVAL); 16545 } 16546 } 16547 } 16548 16549 /* 16550 * If un_throttle has fallen below the low-water mark, we 16551 * restore the maximum value here (and allow it to ratchet 16552 * down again if necessary). 16553 */ 16554 if (un->un_throttle < un->un_min_throttle) { 16555 un->un_throttle = un->un_saved_throttle; 16556 } 16557 } else { 16558 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 16559 "restoring limit from 0x%x to 0x%x\n", 16560 un->un_throttle, un->un_saved_throttle); 16561 un->un_throttle = un->un_saved_throttle; 16562 } 16563 16564 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 16565 "sd_restore_throttle: calling sd_start_cmds!\n"); 16566 16567 sd_start_cmds(un, NULL); 16568 16569 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 16570 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n", 16571 un, un->un_throttle); 16572 16573 mutex_exit(SD_MUTEX(un)); 16574 16575 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n"); 16576 } 16577 16578 /* 16579 * Function: sdrunout 16580 * 16581 * Description: Callback routine for scsi_init_pkt when a resource allocation 16582 * fails. 16583 * 16584 * Arguments: arg - a pointer to the sd_lun unit struct for the particular 16585 * soft state instance. 16586 * 16587 * Return Code: The scsi_init_pkt routine allows for the callback function to 16588 * return a 0 indicating the callback should be rescheduled or a 1 16589 * indicating not to reschedule. This routine always returns 1 16590 * because the driver always provides a callback function to 16591 * scsi_init_pkt. This results in a callback always being scheduled 16592 * (via the scsi_init_pkt callback implementation) if a resource 16593 * failure occurs. 16594 * 16595 * Context: This callback function may not block or call routines that block 16596 * 16597 * Note: Using the scsi_init_pkt callback facility can result in an I/O 16598 * request persisting at the head of the list which cannot be 16599 * satisfied even after multiple retries. In the future the driver 16600 * may implement some time of maximum runout count before failing 16601 * an I/O. 16602 */ 16603 16604 static int 16605 sdrunout(caddr_t arg) 16606 { 16607 struct sd_lun *un = (struct sd_lun *)arg; 16608 16609 ASSERT(un != NULL); 16610 ASSERT(!mutex_owned(SD_MUTEX(un))); 16611 16612 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n"); 16613 16614 mutex_enter(SD_MUTEX(un)); 16615 sd_start_cmds(un, NULL); 16616 mutex_exit(SD_MUTEX(un)); 16617 /* 16618 * This callback routine always returns 1 (i.e. do not reschedule) 16619 * because we always specify sdrunout as the callback handler for 16620 * scsi_init_pkt inside the call to sd_start_cmds. 16621 */ 16622 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n"); 16623 return (1); 16624 } 16625 16626 16627 /* 16628 * Function: sdintr 16629 * 16630 * Description: Completion callback routine for scsi_pkt(9S) structs 16631 * sent to the HBA driver via scsi_transport(9F). 16632 * 16633 * Context: Interrupt context 16634 */ 16635 16636 static void 16637 sdintr(struct scsi_pkt *pktp) 16638 { 16639 struct buf *bp; 16640 struct sd_xbuf *xp; 16641 struct sd_lun *un; 16642 size_t actual_len; 16643 sd_ssc_t *sscp; 16644 16645 ASSERT(pktp != NULL); 16646 bp = (struct buf *)pktp->pkt_private; 16647 ASSERT(bp != NULL); 16648 xp = SD_GET_XBUF(bp); 16649 ASSERT(xp != NULL); 16650 ASSERT(xp->xb_pktp != NULL); 16651 un = SD_GET_UN(bp); 16652 ASSERT(un != NULL); 16653 ASSERT(!mutex_owned(SD_MUTEX(un))); 16654 16655 #ifdef SD_FAULT_INJECTION 16656 16657 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n"); 16658 /* SD FaultInjection */ 16659 sd_faultinjection(pktp); 16660 16661 #endif /* SD_FAULT_INJECTION */ 16662 16663 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p," 16664 " xp:0x%p, un:0x%p\n", bp, xp, un); 16665 16666 mutex_enter(SD_MUTEX(un)); 16667 16668 ASSERT(un->un_fm_private != NULL); 16669 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc; 16670 ASSERT(sscp != NULL); 16671 16672 /* Reduce the count of the #commands currently in transport */ 16673 un->un_ncmds_in_transport--; 16674 ASSERT(un->un_ncmds_in_transport >= 0); 16675 16676 /* Increment counter to indicate that the callback routine is active */ 16677 un->un_in_callback++; 16678 16679 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 16680 16681 #ifdef SDDEBUG 16682 if (bp == un->un_retry_bp) { 16683 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: " 16684 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n", 16685 un, un->un_retry_bp, un->un_ncmds_in_transport); 16686 } 16687 #endif 16688 16689 /* 16690 * If pkt_reason is CMD_DEV_GONE, fail the command, and update the media 16691 * state if needed. 16692 */ 16693 if (pktp->pkt_reason == CMD_DEV_GONE) { 16694 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16695 "Command failed to complete...Device is gone\n"); 16696 if (un->un_mediastate != DKIO_DEV_GONE) { 16697 un->un_mediastate = DKIO_DEV_GONE; 16698 cv_broadcast(&un->un_state_cv); 16699 } 16700 /* 16701 * If the command happens to be the REQUEST SENSE command, 16702 * free up the rqs buf and fail the original command. 16703 */ 16704 if (bp == un->un_rqs_bp) { 16705 bp = sd_mark_rqs_idle(un, xp); 16706 } 16707 sd_return_failed_command(un, bp, EIO); 16708 goto exit; 16709 } 16710 16711 if (pktp->pkt_state & STATE_XARQ_DONE) { 16712 SD_TRACE(SD_LOG_COMMON, un, 16713 "sdintr: extra sense data received. pkt=%p\n", pktp); 16714 } 16715 16716 /* 16717 * First see if the pkt has auto-request sense data with it.... 16718 * Look at the packet state first so we don't take a performance 16719 * hit looking at the arq enabled flag unless absolutely necessary. 16720 */ 16721 if ((pktp->pkt_state & STATE_ARQ_DONE) && 16722 (un->un_f_arq_enabled == TRUE)) { 16723 /* 16724 * The HBA did an auto request sense for this command so check 16725 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 16726 * driver command that should not be retried. 16727 */ 16728 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 16729 /* 16730 * Save the relevant sense info into the xp for the 16731 * original cmd. 16732 */ 16733 struct scsi_arq_status *asp; 16734 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 16735 xp->xb_sense_status = 16736 *((uchar_t *)(&(asp->sts_rqpkt_status))); 16737 xp->xb_sense_state = asp->sts_rqpkt_state; 16738 xp->xb_sense_resid = asp->sts_rqpkt_resid; 16739 if (pktp->pkt_state & STATE_XARQ_DONE) { 16740 actual_len = MAX_SENSE_LENGTH - 16741 xp->xb_sense_resid; 16742 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16743 MAX_SENSE_LENGTH); 16744 } else { 16745 if (xp->xb_sense_resid > SENSE_LENGTH) { 16746 actual_len = MAX_SENSE_LENGTH - 16747 xp->xb_sense_resid; 16748 } else { 16749 actual_len = SENSE_LENGTH - 16750 xp->xb_sense_resid; 16751 } 16752 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 16753 if ((((struct uscsi_cmd *) 16754 (xp->xb_pktinfo))->uscsi_rqlen) > 16755 actual_len) { 16756 xp->xb_sense_resid = 16757 (((struct uscsi_cmd *) 16758 (xp->xb_pktinfo))-> 16759 uscsi_rqlen) - actual_len; 16760 } else { 16761 xp->xb_sense_resid = 0; 16762 } 16763 } 16764 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16765 SENSE_LENGTH); 16766 } 16767 16768 /* fail the command */ 16769 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16770 "sdintr: arq done and FLAG_DIAGNOSE set\n"); 16771 sd_return_failed_command(un, bp, EIO); 16772 goto exit; 16773 } 16774 16775 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 16776 /* 16777 * We want to either retry or fail this command, so free 16778 * the DMA resources here. If we retry the command then 16779 * the DMA resources will be reallocated in sd_start_cmds(). 16780 * Note that when PKT_DMA_PARTIAL is used, this reallocation 16781 * causes the *entire* transfer to start over again from the 16782 * beginning of the request, even for PARTIAL chunks that 16783 * have already transferred successfully. 16784 */ 16785 if ((un->un_f_is_fibre == TRUE) && 16786 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 16787 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 16788 scsi_dmafree(pktp); 16789 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 16790 } 16791 #endif 16792 16793 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16794 "sdintr: arq done, sd_handle_auto_request_sense\n"); 16795 16796 sd_handle_auto_request_sense(un, bp, xp, pktp); 16797 goto exit; 16798 } 16799 16800 /* Next see if this is the REQUEST SENSE pkt for the instance */ 16801 if (pktp->pkt_flags & FLAG_SENSING) { 16802 /* This pktp is from the unit's REQUEST_SENSE command */ 16803 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16804 "sdintr: sd_handle_request_sense\n"); 16805 sd_handle_request_sense(un, bp, xp, pktp); 16806 goto exit; 16807 } 16808 16809 /* 16810 * Check to see if the command successfully completed as requested; 16811 * this is the most common case (and also the hot performance path). 16812 * 16813 * Requirements for successful completion are: 16814 * pkt_reason is CMD_CMPLT and packet status is status good. 16815 * In addition: 16816 * - A residual of zero indicates successful completion no matter what 16817 * the command is. 16818 * - If the residual is not zero and the command is not a read or 16819 * write, then it's still defined as successful completion. In other 16820 * words, if the command is a read or write the residual must be 16821 * zero for successful completion. 16822 * - If the residual is not zero and the command is a read or 16823 * write, and it's a USCSICMD, then it's still defined as 16824 * successful completion. 16825 */ 16826 if ((pktp->pkt_reason == CMD_CMPLT) && 16827 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) { 16828 16829 /* 16830 * Since this command is returned with a good status, we 16831 * can reset the count for Sonoma failover. 16832 */ 16833 un->un_sonoma_failure_count = 0; 16834 16835 /* 16836 * Return all USCSI commands on good status 16837 */ 16838 if (pktp->pkt_resid == 0) { 16839 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16840 "sdintr: returning command for resid == 0\n"); 16841 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) && 16842 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) { 16843 SD_UPDATE_B_RESID(bp, pktp); 16844 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16845 "sdintr: returning command for resid != 0\n"); 16846 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 16847 SD_UPDATE_B_RESID(bp, pktp); 16848 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16849 "sdintr: returning uscsi command\n"); 16850 } else { 16851 goto not_successful; 16852 } 16853 sd_return_command(un, bp); 16854 16855 /* 16856 * Decrement counter to indicate that the callback routine 16857 * is done. 16858 */ 16859 un->un_in_callback--; 16860 ASSERT(un->un_in_callback >= 0); 16861 mutex_exit(SD_MUTEX(un)); 16862 16863 return; 16864 } 16865 16866 not_successful: 16867 16868 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 16869 /* 16870 * The following is based upon knowledge of the underlying transport 16871 * and its use of DMA resources. This code should be removed when 16872 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor 16873 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf() 16874 * and sd_start_cmds(). 16875 * 16876 * Free any DMA resources associated with this command if there 16877 * is a chance it could be retried or enqueued for later retry. 16878 * If we keep the DMA binding then mpxio cannot reissue the 16879 * command on another path whenever a path failure occurs. 16880 * 16881 * Note that when PKT_DMA_PARTIAL is used, free/reallocation 16882 * causes the *entire* transfer to start over again from the 16883 * beginning of the request, even for PARTIAL chunks that 16884 * have already transferred successfully. 16885 * 16886 * This is only done for non-uscsi commands (and also skipped for the 16887 * driver's internal RQS command). Also just do this for Fibre Channel 16888 * devices as these are the only ones that support mpxio. 16889 */ 16890 if ((un->un_f_is_fibre == TRUE) && 16891 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 16892 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 16893 scsi_dmafree(pktp); 16894 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 16895 } 16896 #endif 16897 16898 /* 16899 * The command did not successfully complete as requested so check 16900 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 16901 * driver command that should not be retried so just return. If 16902 * FLAG_DIAGNOSE is not set the error will be processed below. 16903 */ 16904 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 16905 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16906 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n"); 16907 /* 16908 * Issue a request sense if a check condition caused the error 16909 * (we handle the auto request sense case above), otherwise 16910 * just fail the command. 16911 */ 16912 if ((pktp->pkt_reason == CMD_CMPLT) && 16913 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { 16914 sd_send_request_sense_command(un, bp, pktp); 16915 } else { 16916 sd_return_failed_command(un, bp, EIO); 16917 } 16918 goto exit; 16919 } 16920 16921 /* 16922 * The command did not successfully complete as requested so process 16923 * the error, retry, and/or attempt recovery. 16924 */ 16925 switch (pktp->pkt_reason) { 16926 case CMD_CMPLT: 16927 switch (SD_GET_PKT_STATUS(pktp)) { 16928 case STATUS_GOOD: 16929 /* 16930 * The command completed successfully with a non-zero 16931 * residual 16932 */ 16933 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16934 "sdintr: STATUS_GOOD \n"); 16935 sd_pkt_status_good(un, bp, xp, pktp); 16936 break; 16937 16938 case STATUS_CHECK: 16939 case STATUS_TERMINATED: 16940 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16941 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n"); 16942 sd_pkt_status_check_condition(un, bp, xp, pktp); 16943 break; 16944 16945 case STATUS_BUSY: 16946 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16947 "sdintr: STATUS_BUSY\n"); 16948 sd_pkt_status_busy(un, bp, xp, pktp); 16949 break; 16950 16951 case STATUS_RESERVATION_CONFLICT: 16952 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16953 "sdintr: STATUS_RESERVATION_CONFLICT\n"); 16954 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 16955 break; 16956 16957 case STATUS_QFULL: 16958 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16959 "sdintr: STATUS_QFULL\n"); 16960 sd_pkt_status_qfull(un, bp, xp, pktp); 16961 break; 16962 16963 case STATUS_MET: 16964 case STATUS_INTERMEDIATE: 16965 case STATUS_SCSI2: 16966 case STATUS_INTERMEDIATE_MET: 16967 case STATUS_ACA_ACTIVE: 16968 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16969 "Unexpected SCSI status received: 0x%x\n", 16970 SD_GET_PKT_STATUS(pktp)); 16971 /* 16972 * Mark the ssc_flags when detected invalid status 16973 * code for non-USCSI command. 16974 */ 16975 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16976 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS, 16977 0, "stat-code"); 16978 } 16979 sd_return_failed_command(un, bp, EIO); 16980 break; 16981 16982 default: 16983 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16984 "Invalid SCSI status received: 0x%x\n", 16985 SD_GET_PKT_STATUS(pktp)); 16986 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16987 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS, 16988 0, "stat-code"); 16989 } 16990 sd_return_failed_command(un, bp, EIO); 16991 break; 16992 16993 } 16994 break; 16995 16996 case CMD_INCOMPLETE: 16997 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16998 "sdintr: CMD_INCOMPLETE\n"); 16999 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp); 17000 break; 17001 case CMD_TRAN_ERR: 17002 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17003 "sdintr: CMD_TRAN_ERR\n"); 17004 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp); 17005 break; 17006 case CMD_RESET: 17007 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17008 "sdintr: CMD_RESET \n"); 17009 sd_pkt_reason_cmd_reset(un, bp, xp, pktp); 17010 break; 17011 case CMD_ABORTED: 17012 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17013 "sdintr: CMD_ABORTED \n"); 17014 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp); 17015 break; 17016 case CMD_TIMEOUT: 17017 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17018 "sdintr: CMD_TIMEOUT\n"); 17019 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp); 17020 break; 17021 case CMD_UNX_BUS_FREE: 17022 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17023 "sdintr: CMD_UNX_BUS_FREE \n"); 17024 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp); 17025 break; 17026 case CMD_TAG_REJECT: 17027 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17028 "sdintr: CMD_TAG_REJECT\n"); 17029 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp); 17030 break; 17031 default: 17032 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17033 "sdintr: default\n"); 17034 /* 17035 * Mark the ssc_flags for detecting invliad pkt_reason. 17036 */ 17037 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17038 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_PKT_REASON, 17039 0, "pkt-reason"); 17040 } 17041 sd_pkt_reason_default(un, bp, xp, pktp); 17042 break; 17043 } 17044 17045 exit: 17046 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n"); 17047 17048 /* Decrement counter to indicate that the callback routine is done. */ 17049 un->un_in_callback--; 17050 ASSERT(un->un_in_callback >= 0); 17051 17052 /* 17053 * At this point, the pkt has been dispatched, ie, it is either 17054 * being re-tried or has been returned to its caller and should 17055 * not be referenced. 17056 */ 17057 17058 mutex_exit(SD_MUTEX(un)); 17059 } 17060 17061 17062 /* 17063 * Function: sd_print_incomplete_msg 17064 * 17065 * Description: Prints the error message for a CMD_INCOMPLETE error. 17066 * 17067 * Arguments: un - ptr to associated softstate for the device. 17068 * bp - ptr to the buf(9S) for the command. 17069 * arg - message string ptr 17070 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED, 17071 * or SD_NO_RETRY_ISSUED. 17072 * 17073 * Context: May be called under interrupt context 17074 */ 17075 17076 static void 17077 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 17078 { 17079 struct scsi_pkt *pktp; 17080 char *msgp; 17081 char *cmdp = arg; 17082 17083 ASSERT(un != NULL); 17084 ASSERT(mutex_owned(SD_MUTEX(un))); 17085 ASSERT(bp != NULL); 17086 ASSERT(arg != NULL); 17087 pktp = SD_GET_PKTP(bp); 17088 ASSERT(pktp != NULL); 17089 17090 switch (code) { 17091 case SD_DELAYED_RETRY_ISSUED: 17092 case SD_IMMEDIATE_RETRY_ISSUED: 17093 msgp = "retrying"; 17094 break; 17095 case SD_NO_RETRY_ISSUED: 17096 default: 17097 msgp = "giving up"; 17098 break; 17099 } 17100 17101 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 17102 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17103 "incomplete %s- %s\n", cmdp, msgp); 17104 } 17105 } 17106 17107 17108 17109 /* 17110 * Function: sd_pkt_status_good 17111 * 17112 * Description: Processing for a STATUS_GOOD code in pkt_status. 17113 * 17114 * Context: May be called under interrupt context 17115 */ 17116 17117 static void 17118 sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 17119 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17120 { 17121 char *cmdp; 17122 17123 ASSERT(un != NULL); 17124 ASSERT(mutex_owned(SD_MUTEX(un))); 17125 ASSERT(bp != NULL); 17126 ASSERT(xp != NULL); 17127 ASSERT(pktp != NULL); 17128 ASSERT(pktp->pkt_reason == CMD_CMPLT); 17129 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD); 17130 ASSERT(pktp->pkt_resid != 0); 17131 17132 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n"); 17133 17134 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17135 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) { 17136 case SCMD_READ: 17137 cmdp = "read"; 17138 break; 17139 case SCMD_WRITE: 17140 cmdp = "write"; 17141 break; 17142 default: 17143 SD_UPDATE_B_RESID(bp, pktp); 17144 sd_return_command(un, bp); 17145 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 17146 return; 17147 } 17148 17149 /* 17150 * See if we can retry the read/write, preferrably immediately. 17151 * If retries are exhaused, then sd_retry_command() will update 17152 * the b_resid count. 17153 */ 17154 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg, 17155 cmdp, EIO, (clock_t)0, NULL); 17156 17157 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 17158 } 17159 17160 17161 17162 17163 17164 /* 17165 * Function: sd_handle_request_sense 17166 * 17167 * Description: Processing for non-auto Request Sense command. 17168 * 17169 * Arguments: un - ptr to associated softstate 17170 * sense_bp - ptr to buf(9S) for the RQS command 17171 * sense_xp - ptr to the sd_xbuf for the RQS command 17172 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command 17173 * 17174 * Context: May be called under interrupt context 17175 */ 17176 17177 static void 17178 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp, 17179 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp) 17180 { 17181 struct buf *cmd_bp; /* buf for the original command */ 17182 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */ 17183 struct scsi_pkt *cmd_pktp; /* pkt for the original command */ 17184 size_t actual_len; /* actual sense data length */ 17185 17186 ASSERT(un != NULL); 17187 ASSERT(mutex_owned(SD_MUTEX(un))); 17188 ASSERT(sense_bp != NULL); 17189 ASSERT(sense_xp != NULL); 17190 ASSERT(sense_pktp != NULL); 17191 17192 /* 17193 * Note the sense_bp, sense_xp, and sense_pktp here are for the 17194 * RQS command and not the original command. 17195 */ 17196 ASSERT(sense_pktp == un->un_rqs_pktp); 17197 ASSERT(sense_bp == un->un_rqs_bp); 17198 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) == 17199 (FLAG_SENSING | FLAG_HEAD)); 17200 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) & 17201 FLAG_SENSING) == FLAG_SENSING); 17202 17203 /* These are the bp, xp, and pktp for the original command */ 17204 cmd_bp = sense_xp->xb_sense_bp; 17205 cmd_xp = SD_GET_XBUF(cmd_bp); 17206 cmd_pktp = SD_GET_PKTP(cmd_bp); 17207 17208 if (sense_pktp->pkt_reason != CMD_CMPLT) { 17209 /* 17210 * The REQUEST SENSE command failed. Release the REQUEST 17211 * SENSE command for re-use, get back the bp for the original 17212 * command, and attempt to re-try the original command if 17213 * FLAG_DIAGNOSE is not set in the original packet. 17214 */ 17215 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17216 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 17217 cmd_bp = sd_mark_rqs_idle(un, sense_xp); 17218 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD, 17219 NULL, NULL, EIO, (clock_t)0, NULL); 17220 return; 17221 } 17222 } 17223 17224 /* 17225 * Save the relevant sense info into the xp for the original cmd. 17226 * 17227 * Note: if the request sense failed the state info will be zero 17228 * as set in sd_mark_rqs_busy() 17229 */ 17230 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp); 17231 cmd_xp->xb_sense_state = sense_pktp->pkt_state; 17232 actual_len = MAX_SENSE_LENGTH - sense_pktp->pkt_resid; 17233 if ((cmd_xp->xb_pkt_flags & SD_XB_USCSICMD) && 17234 (((struct uscsi_cmd *)cmd_xp->xb_pktinfo)->uscsi_rqlen > 17235 SENSE_LENGTH)) { 17236 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 17237 MAX_SENSE_LENGTH); 17238 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid; 17239 } else { 17240 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 17241 SENSE_LENGTH); 17242 if (actual_len < SENSE_LENGTH) { 17243 cmd_xp->xb_sense_resid = SENSE_LENGTH - actual_len; 17244 } else { 17245 cmd_xp->xb_sense_resid = 0; 17246 } 17247 } 17248 17249 /* 17250 * Free up the RQS command.... 17251 * NOTE: 17252 * Must do this BEFORE calling sd_validate_sense_data! 17253 * sd_validate_sense_data may return the original command in 17254 * which case the pkt will be freed and the flags can no 17255 * longer be touched. 17256 * SD_MUTEX is held through this process until the command 17257 * is dispatched based upon the sense data, so there are 17258 * no race conditions. 17259 */ 17260 (void) sd_mark_rqs_idle(un, sense_xp); 17261 17262 /* 17263 * For a retryable command see if we have valid sense data, if so then 17264 * turn it over to sd_decode_sense() to figure out the right course of 17265 * action. Just fail a non-retryable command. 17266 */ 17267 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 17268 if (sd_validate_sense_data(un, cmd_bp, cmd_xp, actual_len) == 17269 SD_SENSE_DATA_IS_VALID) { 17270 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp); 17271 } 17272 } else { 17273 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB", 17274 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 17275 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data", 17276 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 17277 sd_return_failed_command(un, cmd_bp, EIO); 17278 } 17279 } 17280 17281 17282 17283 17284 /* 17285 * Function: sd_handle_auto_request_sense 17286 * 17287 * Description: Processing for auto-request sense information. 17288 * 17289 * Arguments: un - ptr to associated softstate 17290 * bp - ptr to buf(9S) for the command 17291 * xp - ptr to the sd_xbuf for the command 17292 * pktp - ptr to the scsi_pkt(9S) for the command 17293 * 17294 * Context: May be called under interrupt context 17295 */ 17296 17297 static void 17298 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 17299 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17300 { 17301 struct scsi_arq_status *asp; 17302 size_t actual_len; 17303 17304 ASSERT(un != NULL); 17305 ASSERT(mutex_owned(SD_MUTEX(un))); 17306 ASSERT(bp != NULL); 17307 ASSERT(xp != NULL); 17308 ASSERT(pktp != NULL); 17309 ASSERT(pktp != un->un_rqs_pktp); 17310 ASSERT(bp != un->un_rqs_bp); 17311 17312 /* 17313 * For auto-request sense, we get a scsi_arq_status back from 17314 * the HBA, with the sense data in the sts_sensedata member. 17315 * The pkt_scbp of the packet points to this scsi_arq_status. 17316 */ 17317 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 17318 17319 if (asp->sts_rqpkt_reason != CMD_CMPLT) { 17320 /* 17321 * The auto REQUEST SENSE failed; see if we can re-try 17322 * the original command. 17323 */ 17324 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17325 "auto request sense failed (reason=%s)\n", 17326 scsi_rname(asp->sts_rqpkt_reason)); 17327 17328 sd_reset_target(un, pktp); 17329 17330 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 17331 NULL, NULL, EIO, (clock_t)0, NULL); 17332 return; 17333 } 17334 17335 /* Save the relevant sense info into the xp for the original cmd. */ 17336 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status))); 17337 xp->xb_sense_state = asp->sts_rqpkt_state; 17338 xp->xb_sense_resid = asp->sts_rqpkt_resid; 17339 if (xp->xb_sense_state & STATE_XARQ_DONE) { 17340 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 17341 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 17342 MAX_SENSE_LENGTH); 17343 } else { 17344 if (xp->xb_sense_resid > SENSE_LENGTH) { 17345 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 17346 } else { 17347 actual_len = SENSE_LENGTH - xp->xb_sense_resid; 17348 } 17349 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 17350 if ((((struct uscsi_cmd *) 17351 (xp->xb_pktinfo))->uscsi_rqlen) > actual_len) { 17352 xp->xb_sense_resid = (((struct uscsi_cmd *) 17353 (xp->xb_pktinfo))->uscsi_rqlen) - 17354 actual_len; 17355 } else { 17356 xp->xb_sense_resid = 0; 17357 } 17358 } 17359 bcopy(&asp->sts_sensedata, xp->xb_sense_data, SENSE_LENGTH); 17360 } 17361 17362 /* 17363 * See if we have valid sense data, if so then turn it over to 17364 * sd_decode_sense() to figure out the right course of action. 17365 */ 17366 if (sd_validate_sense_data(un, bp, xp, actual_len) == 17367 SD_SENSE_DATA_IS_VALID) { 17368 sd_decode_sense(un, bp, xp, pktp); 17369 } 17370 } 17371 17372 17373 /* 17374 * Function: sd_print_sense_failed_msg 17375 * 17376 * Description: Print log message when RQS has failed. 17377 * 17378 * Arguments: un - ptr to associated softstate 17379 * bp - ptr to buf(9S) for the command 17380 * arg - generic message string ptr 17381 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17382 * or SD_NO_RETRY_ISSUED 17383 * 17384 * Context: May be called from interrupt context 17385 */ 17386 17387 static void 17388 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg, 17389 int code) 17390 { 17391 char *msgp = arg; 17392 17393 ASSERT(un != NULL); 17394 ASSERT(mutex_owned(SD_MUTEX(un))); 17395 ASSERT(bp != NULL); 17396 17397 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) { 17398 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp); 17399 } 17400 } 17401 17402 17403 /* 17404 * Function: sd_validate_sense_data 17405 * 17406 * Description: Check the given sense data for validity. 17407 * If the sense data is not valid, the command will 17408 * be either failed or retried! 17409 * 17410 * Return Code: SD_SENSE_DATA_IS_INVALID 17411 * SD_SENSE_DATA_IS_VALID 17412 * 17413 * Context: May be called from interrupt context 17414 */ 17415 17416 static int 17417 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 17418 size_t actual_len) 17419 { 17420 struct scsi_extended_sense *esp; 17421 struct scsi_pkt *pktp; 17422 char *msgp = NULL; 17423 sd_ssc_t *sscp; 17424 17425 ASSERT(un != NULL); 17426 ASSERT(mutex_owned(SD_MUTEX(un))); 17427 ASSERT(bp != NULL); 17428 ASSERT(bp != un->un_rqs_bp); 17429 ASSERT(xp != NULL); 17430 ASSERT(un->un_fm_private != NULL); 17431 17432 pktp = SD_GET_PKTP(bp); 17433 ASSERT(pktp != NULL); 17434 17435 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc; 17436 ASSERT(sscp != NULL); 17437 17438 /* 17439 * Check the status of the RQS command (auto or manual). 17440 */ 17441 switch (xp->xb_sense_status & STATUS_MASK) { 17442 case STATUS_GOOD: 17443 break; 17444 17445 case STATUS_RESERVATION_CONFLICT: 17446 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 17447 return (SD_SENSE_DATA_IS_INVALID); 17448 17449 case STATUS_BUSY: 17450 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17451 "Busy Status on REQUEST SENSE\n"); 17452 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL, 17453 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); 17454 return (SD_SENSE_DATA_IS_INVALID); 17455 17456 case STATUS_QFULL: 17457 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17458 "QFULL Status on REQUEST SENSE\n"); 17459 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, 17460 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); 17461 return (SD_SENSE_DATA_IS_INVALID); 17462 17463 case STATUS_CHECK: 17464 case STATUS_TERMINATED: 17465 msgp = "Check Condition on REQUEST SENSE\n"; 17466 goto sense_failed; 17467 17468 default: 17469 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n"; 17470 goto sense_failed; 17471 } 17472 17473 /* 17474 * See if we got the minimum required amount of sense data. 17475 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes 17476 * or less. 17477 */ 17478 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) || 17479 (actual_len == 0)) { 17480 msgp = "Request Sense couldn't get sense data\n"; 17481 goto sense_failed; 17482 } 17483 17484 if (actual_len < SUN_MIN_SENSE_LENGTH) { 17485 msgp = "Not enough sense information\n"; 17486 /* Mark the ssc_flags for detecting invalid sense data */ 17487 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17488 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0, 17489 "sense-data"); 17490 } 17491 goto sense_failed; 17492 } 17493 17494 /* 17495 * We require the extended sense data 17496 */ 17497 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 17498 if (esp->es_class != CLASS_EXTENDED_SENSE) { 17499 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 17500 static char tmp[8]; 17501 static char buf[148]; 17502 char *p = (char *)(xp->xb_sense_data); 17503 int i; 17504 17505 mutex_enter(&sd_sense_mutex); 17506 (void) strcpy(buf, "undecodable sense information:"); 17507 for (i = 0; i < actual_len; i++) { 17508 (void) sprintf(tmp, " 0x%x", *(p++)&0xff); 17509 (void) strcpy(&buf[strlen(buf)], tmp); 17510 } 17511 i = strlen(buf); 17512 (void) strcpy(&buf[i], "-(assumed fatal)\n"); 17513 17514 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP) { 17515 scsi_log(SD_DEVINFO(un), sd_label, 17516 CE_WARN, buf); 17517 } 17518 mutex_exit(&sd_sense_mutex); 17519 } 17520 17521 /* Mark the ssc_flags for detecting invalid sense data */ 17522 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17523 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0, 17524 "sense-data"); 17525 } 17526 17527 /* Note: Legacy behavior, fail the command with no retry */ 17528 sd_return_failed_command(un, bp, EIO); 17529 return (SD_SENSE_DATA_IS_INVALID); 17530 } 17531 17532 /* 17533 * Check that es_code is valid (es_class concatenated with es_code 17534 * make up the "response code" field. es_class will always be 7, so 17535 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the 17536 * format. 17537 */ 17538 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) && 17539 (esp->es_code != CODE_FMT_FIXED_DEFERRED) && 17540 (esp->es_code != CODE_FMT_DESCR_CURRENT) && 17541 (esp->es_code != CODE_FMT_DESCR_DEFERRED) && 17542 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) { 17543 /* Mark the ssc_flags for detecting invalid sense data */ 17544 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17545 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0, 17546 "sense-data"); 17547 } 17548 goto sense_failed; 17549 } 17550 17551 return (SD_SENSE_DATA_IS_VALID); 17552 17553 sense_failed: 17554 /* 17555 * If the request sense failed (for whatever reason), attempt 17556 * to retry the original command. 17557 */ 17558 #if defined(__i386) || defined(__amd64) 17559 /* 17560 * SD_RETRY_DELAY is conditionally compile (#if fibre) in 17561 * sddef.h for Sparc platform, and x86 uses 1 binary 17562 * for both SCSI/FC. 17563 * The SD_RETRY_DELAY value need to be adjusted here 17564 * when SD_RETRY_DELAY change in sddef.h 17565 */ 17566 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 17567 sd_print_sense_failed_msg, msgp, EIO, 17568 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL); 17569 #else 17570 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 17571 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL); 17572 #endif 17573 17574 return (SD_SENSE_DATA_IS_INVALID); 17575 } 17576 17577 /* 17578 * Function: sd_decode_sense 17579 * 17580 * Description: Take recovery action(s) when SCSI Sense Data is received. 17581 * 17582 * Context: Interrupt context. 17583 */ 17584 17585 static void 17586 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 17587 struct scsi_pkt *pktp) 17588 { 17589 uint8_t sense_key; 17590 17591 ASSERT(un != NULL); 17592 ASSERT(mutex_owned(SD_MUTEX(un))); 17593 ASSERT(bp != NULL); 17594 ASSERT(bp != un->un_rqs_bp); 17595 ASSERT(xp != NULL); 17596 ASSERT(pktp != NULL); 17597 17598 sense_key = scsi_sense_key(xp->xb_sense_data); 17599 17600 switch (sense_key) { 17601 case KEY_NO_SENSE: 17602 sd_sense_key_no_sense(un, bp, xp, pktp); 17603 break; 17604 case KEY_RECOVERABLE_ERROR: 17605 sd_sense_key_recoverable_error(un, xp->xb_sense_data, 17606 bp, xp, pktp); 17607 break; 17608 case KEY_NOT_READY: 17609 sd_sense_key_not_ready(un, xp->xb_sense_data, 17610 bp, xp, pktp); 17611 break; 17612 case KEY_MEDIUM_ERROR: 17613 case KEY_HARDWARE_ERROR: 17614 sd_sense_key_medium_or_hardware_error(un, 17615 xp->xb_sense_data, bp, xp, pktp); 17616 break; 17617 case KEY_ILLEGAL_REQUEST: 17618 sd_sense_key_illegal_request(un, bp, xp, pktp); 17619 break; 17620 case KEY_UNIT_ATTENTION: 17621 sd_sense_key_unit_attention(un, xp->xb_sense_data, 17622 bp, xp, pktp); 17623 break; 17624 case KEY_WRITE_PROTECT: 17625 case KEY_VOLUME_OVERFLOW: 17626 case KEY_MISCOMPARE: 17627 sd_sense_key_fail_command(un, bp, xp, pktp); 17628 break; 17629 case KEY_BLANK_CHECK: 17630 sd_sense_key_blank_check(un, bp, xp, pktp); 17631 break; 17632 case KEY_ABORTED_COMMAND: 17633 sd_sense_key_aborted_command(un, bp, xp, pktp); 17634 break; 17635 case KEY_VENDOR_UNIQUE: 17636 case KEY_COPY_ABORTED: 17637 case KEY_EQUAL: 17638 case KEY_RESERVED: 17639 default: 17640 sd_sense_key_default(un, xp->xb_sense_data, 17641 bp, xp, pktp); 17642 break; 17643 } 17644 } 17645 17646 17647 /* 17648 * Function: sd_dump_memory 17649 * 17650 * Description: Debug logging routine to print the contents of a user provided 17651 * buffer. The output of the buffer is broken up into 256 byte 17652 * segments due to a size constraint of the scsi_log. 17653 * implementation. 17654 * 17655 * Arguments: un - ptr to softstate 17656 * comp - component mask 17657 * title - "title" string to preceed data when printed 17658 * data - ptr to data block to be printed 17659 * len - size of data block to be printed 17660 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c) 17661 * 17662 * Context: May be called from interrupt context 17663 */ 17664 17665 #define SD_DUMP_MEMORY_BUF_SIZE 256 17666 17667 static char *sd_dump_format_string[] = { 17668 " 0x%02x", 17669 " %c" 17670 }; 17671 17672 static void 17673 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data, 17674 int len, int fmt) 17675 { 17676 int i, j; 17677 int avail_count; 17678 int start_offset; 17679 int end_offset; 17680 size_t entry_len; 17681 char *bufp; 17682 char *local_buf; 17683 char *format_string; 17684 17685 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR)); 17686 17687 /* 17688 * In the debug version of the driver, this function is called from a 17689 * number of places which are NOPs in the release driver. 17690 * The debug driver therefore has additional methods of filtering 17691 * debug output. 17692 */ 17693 #ifdef SDDEBUG 17694 /* 17695 * In the debug version of the driver we can reduce the amount of debug 17696 * messages by setting sd_error_level to something other than 17697 * SCSI_ERR_ALL and clearing bits in sd_level_mask and 17698 * sd_component_mask. 17699 */ 17700 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) || 17701 (sd_error_level != SCSI_ERR_ALL)) { 17702 return; 17703 } 17704 if (((sd_component_mask & comp) == 0) || 17705 (sd_error_level != SCSI_ERR_ALL)) { 17706 return; 17707 } 17708 #else 17709 if (sd_error_level != SCSI_ERR_ALL) { 17710 return; 17711 } 17712 #endif 17713 17714 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP); 17715 bufp = local_buf; 17716 /* 17717 * Available length is the length of local_buf[], minus the 17718 * length of the title string, minus one for the ":", minus 17719 * one for the newline, minus one for the NULL terminator. 17720 * This gives the #bytes available for holding the printed 17721 * values from the given data buffer. 17722 */ 17723 if (fmt == SD_LOG_HEX) { 17724 format_string = sd_dump_format_string[0]; 17725 } else /* SD_LOG_CHAR */ { 17726 format_string = sd_dump_format_string[1]; 17727 } 17728 /* 17729 * Available count is the number of elements from the given 17730 * data buffer that we can fit into the available length. 17731 * This is based upon the size of the format string used. 17732 * Make one entry and find it's size. 17733 */ 17734 (void) sprintf(bufp, format_string, data[0]); 17735 entry_len = strlen(bufp); 17736 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len; 17737 17738 j = 0; 17739 while (j < len) { 17740 bufp = local_buf; 17741 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE); 17742 start_offset = j; 17743 17744 end_offset = start_offset + avail_count; 17745 17746 (void) sprintf(bufp, "%s:", title); 17747 bufp += strlen(bufp); 17748 for (i = start_offset; ((i < end_offset) && (j < len)); 17749 i++, j++) { 17750 (void) sprintf(bufp, format_string, data[i]); 17751 bufp += entry_len; 17752 } 17753 (void) sprintf(bufp, "\n"); 17754 17755 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf); 17756 } 17757 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE); 17758 } 17759 17760 /* 17761 * Function: sd_print_sense_msg 17762 * 17763 * Description: Log a message based upon the given sense data. 17764 * 17765 * Arguments: un - ptr to associated softstate 17766 * bp - ptr to buf(9S) for the command 17767 * arg - ptr to associate sd_sense_info struct 17768 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17769 * or SD_NO_RETRY_ISSUED 17770 * 17771 * Context: May be called from interrupt context 17772 */ 17773 17774 static void 17775 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 17776 { 17777 struct sd_xbuf *xp; 17778 struct scsi_pkt *pktp; 17779 uint8_t *sensep; 17780 daddr_t request_blkno; 17781 diskaddr_t err_blkno; 17782 int severity; 17783 int pfa_flag; 17784 extern struct scsi_key_strings scsi_cmds[]; 17785 17786 ASSERT(un != NULL); 17787 ASSERT(mutex_owned(SD_MUTEX(un))); 17788 ASSERT(bp != NULL); 17789 xp = SD_GET_XBUF(bp); 17790 ASSERT(xp != NULL); 17791 pktp = SD_GET_PKTP(bp); 17792 ASSERT(pktp != NULL); 17793 ASSERT(arg != NULL); 17794 17795 severity = ((struct sd_sense_info *)(arg))->ssi_severity; 17796 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag; 17797 17798 if ((code == SD_DELAYED_RETRY_ISSUED) || 17799 (code == SD_IMMEDIATE_RETRY_ISSUED)) { 17800 severity = SCSI_ERR_RETRYABLE; 17801 } 17802 17803 /* Use absolute block number for the request block number */ 17804 request_blkno = xp->xb_blkno; 17805 17806 /* 17807 * Now try to get the error block number from the sense data 17808 */ 17809 sensep = xp->xb_sense_data; 17810 17811 if (scsi_sense_info_uint64(sensep, SENSE_LENGTH, 17812 (uint64_t *)&err_blkno)) { 17813 /* 17814 * We retrieved the error block number from the information 17815 * portion of the sense data. 17816 * 17817 * For USCSI commands we are better off using the error 17818 * block no. as the requested block no. (This is the best 17819 * we can estimate.) 17820 */ 17821 if ((SD_IS_BUFIO(xp) == FALSE) && 17822 ((pktp->pkt_flags & FLAG_SILENT) == 0)) { 17823 request_blkno = err_blkno; 17824 } 17825 } else { 17826 /* 17827 * Without the es_valid bit set (for fixed format) or an 17828 * information descriptor (for descriptor format) we cannot 17829 * be certain of the error blkno, so just use the 17830 * request_blkno. 17831 */ 17832 err_blkno = (diskaddr_t)request_blkno; 17833 } 17834 17835 /* 17836 * The following will log the buffer contents for the release driver 17837 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error 17838 * level is set to verbose. 17839 */ 17840 sd_dump_memory(un, SD_LOG_IO, "Failed CDB", 17841 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 17842 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 17843 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX); 17844 17845 if (pfa_flag == FALSE) { 17846 /* This is normally only set for USCSI */ 17847 if ((pktp->pkt_flags & FLAG_SILENT) != 0) { 17848 return; 17849 } 17850 17851 if ((SD_IS_BUFIO(xp) == TRUE) && 17852 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) && 17853 (severity < sd_error_level))) { 17854 return; 17855 } 17856 } 17857 /* 17858 * Check for Sonoma Failover and keep a count of how many failed I/O's 17859 */ 17860 if ((SD_IS_LSI(un)) && 17861 (scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) && 17862 (scsi_sense_asc(sensep) == 0x94) && 17863 (scsi_sense_ascq(sensep) == 0x01)) { 17864 un->un_sonoma_failure_count++; 17865 if (un->un_sonoma_failure_count > 1) { 17866 return; 17867 } 17868 } 17869 17870 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP || 17871 ((scsi_sense_key(sensep) == KEY_RECOVERABLE_ERROR) && 17872 (pktp->pkt_resid == 0))) { 17873 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity, 17874 request_blkno, err_blkno, scsi_cmds, 17875 (struct scsi_extended_sense *)sensep, 17876 un->un_additional_codes, NULL); 17877 } 17878 } 17879 17880 /* 17881 * Function: sd_sense_key_no_sense 17882 * 17883 * Description: Recovery action when sense data was not received. 17884 * 17885 * Context: May be called from interrupt context 17886 */ 17887 17888 static void 17889 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 17890 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17891 { 17892 struct sd_sense_info si; 17893 17894 ASSERT(un != NULL); 17895 ASSERT(mutex_owned(SD_MUTEX(un))); 17896 ASSERT(bp != NULL); 17897 ASSERT(xp != NULL); 17898 ASSERT(pktp != NULL); 17899 17900 si.ssi_severity = SCSI_ERR_FATAL; 17901 si.ssi_pfa_flag = FALSE; 17902 17903 SD_UPDATE_ERRSTATS(un, sd_softerrs); 17904 17905 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17906 &si, EIO, (clock_t)0, NULL); 17907 } 17908 17909 17910 /* 17911 * Function: sd_sense_key_recoverable_error 17912 * 17913 * Description: Recovery actions for a SCSI "Recovered Error" sense key. 17914 * 17915 * Context: May be called from interrupt context 17916 */ 17917 17918 static void 17919 sd_sense_key_recoverable_error(struct sd_lun *un, 17920 uint8_t *sense_datap, 17921 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17922 { 17923 struct sd_sense_info si; 17924 uint8_t asc = scsi_sense_asc(sense_datap); 17925 17926 ASSERT(un != NULL); 17927 ASSERT(mutex_owned(SD_MUTEX(un))); 17928 ASSERT(bp != NULL); 17929 ASSERT(xp != NULL); 17930 ASSERT(pktp != NULL); 17931 17932 /* 17933 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED 17934 */ 17935 if ((asc == 0x5D) && (sd_report_pfa != 0)) { 17936 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 17937 si.ssi_severity = SCSI_ERR_INFO; 17938 si.ssi_pfa_flag = TRUE; 17939 } else { 17940 SD_UPDATE_ERRSTATS(un, sd_softerrs); 17941 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err); 17942 si.ssi_severity = SCSI_ERR_RECOVERED; 17943 si.ssi_pfa_flag = FALSE; 17944 } 17945 17946 if (pktp->pkt_resid == 0) { 17947 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17948 sd_return_command(un, bp); 17949 return; 17950 } 17951 17952 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17953 &si, EIO, (clock_t)0, NULL); 17954 } 17955 17956 17957 17958 17959 /* 17960 * Function: sd_sense_key_not_ready 17961 * 17962 * Description: Recovery actions for a SCSI "Not Ready" sense key. 17963 * 17964 * Context: May be called from interrupt context 17965 */ 17966 17967 static void 17968 sd_sense_key_not_ready(struct sd_lun *un, 17969 uint8_t *sense_datap, 17970 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17971 { 17972 struct sd_sense_info si; 17973 uint8_t asc = scsi_sense_asc(sense_datap); 17974 uint8_t ascq = scsi_sense_ascq(sense_datap); 17975 17976 ASSERT(un != NULL); 17977 ASSERT(mutex_owned(SD_MUTEX(un))); 17978 ASSERT(bp != NULL); 17979 ASSERT(xp != NULL); 17980 ASSERT(pktp != NULL); 17981 17982 si.ssi_severity = SCSI_ERR_FATAL; 17983 si.ssi_pfa_flag = FALSE; 17984 17985 /* 17986 * Update error stats after first NOT READY error. Disks may have 17987 * been powered down and may need to be restarted. For CDROMs, 17988 * report NOT READY errors only if media is present. 17989 */ 17990 if ((ISCD(un) && (asc == 0x3A)) || 17991 (xp->xb_nr_retry_count > 0)) { 17992 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17993 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err); 17994 } 17995 17996 /* 17997 * Just fail if the "not ready" retry limit has been reached. 17998 */ 17999 if (xp->xb_nr_retry_count >= un->un_notready_retry_count) { 18000 /* Special check for error message printing for removables. */ 18001 if (un->un_f_has_removable_media && (asc == 0x04) && 18002 (ascq >= 0x04)) { 18003 si.ssi_severity = SCSI_ERR_ALL; 18004 } 18005 goto fail_command; 18006 } 18007 18008 /* 18009 * Check the ASC and ASCQ in the sense data as needed, to determine 18010 * what to do. 18011 */ 18012 switch (asc) { 18013 case 0x04: /* LOGICAL UNIT NOT READY */ 18014 /* 18015 * disk drives that don't spin up result in a very long delay 18016 * in format without warning messages. We will log a message 18017 * if the error level is set to verbose. 18018 */ 18019 if (sd_error_level < SCSI_ERR_RETRYABLE) { 18020 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18021 "logical unit not ready, resetting disk\n"); 18022 } 18023 18024 /* 18025 * There are different requirements for CDROMs and disks for 18026 * the number of retries. If a CD-ROM is giving this, it is 18027 * probably reading TOC and is in the process of getting 18028 * ready, so we should keep on trying for a long time to make 18029 * sure that all types of media are taken in account (for 18030 * some media the drive takes a long time to read TOC). For 18031 * disks we do not want to retry this too many times as this 18032 * can cause a long hang in format when the drive refuses to 18033 * spin up (a very common failure). 18034 */ 18035 switch (ascq) { 18036 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */ 18037 /* 18038 * Disk drives frequently refuse to spin up which 18039 * results in a very long hang in format without 18040 * warning messages. 18041 * 18042 * Note: This code preserves the legacy behavior of 18043 * comparing xb_nr_retry_count against zero for fibre 18044 * channel targets instead of comparing against the 18045 * un_reset_retry_count value. The reason for this 18046 * discrepancy has been so utterly lost beneath the 18047 * Sands of Time that even Indiana Jones could not 18048 * find it. 18049 */ 18050 if (un->un_f_is_fibre == TRUE) { 18051 if (((sd_level_mask & SD_LOGMASK_DIAG) || 18052 (xp->xb_nr_retry_count > 0)) && 18053 (un->un_startstop_timeid == NULL)) { 18054 scsi_log(SD_DEVINFO(un), sd_label, 18055 CE_WARN, "logical unit not ready, " 18056 "resetting disk\n"); 18057 sd_reset_target(un, pktp); 18058 } 18059 } else { 18060 if (((sd_level_mask & SD_LOGMASK_DIAG) || 18061 (xp->xb_nr_retry_count > 18062 un->un_reset_retry_count)) && 18063 (un->un_startstop_timeid == NULL)) { 18064 scsi_log(SD_DEVINFO(un), sd_label, 18065 CE_WARN, "logical unit not ready, " 18066 "resetting disk\n"); 18067 sd_reset_target(un, pktp); 18068 } 18069 } 18070 break; 18071 18072 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */ 18073 /* 18074 * If the target is in the process of becoming 18075 * ready, just proceed with the retry. This can 18076 * happen with CD-ROMs that take a long time to 18077 * read TOC after a power cycle or reset. 18078 */ 18079 goto do_retry; 18080 18081 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */ 18082 break; 18083 18084 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */ 18085 /* 18086 * Retries cannot help here so just fail right away. 18087 */ 18088 goto fail_command; 18089 18090 case 0x88: 18091 /* 18092 * Vendor-unique code for T3/T4: it indicates a 18093 * path problem in a mutipathed config, but as far as 18094 * the target driver is concerned it equates to a fatal 18095 * error, so we should just fail the command right away 18096 * (without printing anything to the console). If this 18097 * is not a T3/T4, fall thru to the default recovery 18098 * action. 18099 * T3/T4 is FC only, don't need to check is_fibre 18100 */ 18101 if (SD_IS_T3(un) || SD_IS_T4(un)) { 18102 sd_return_failed_command(un, bp, EIO); 18103 return; 18104 } 18105 /* FALLTHRU */ 18106 18107 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */ 18108 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */ 18109 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */ 18110 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */ 18111 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */ 18112 default: /* Possible future codes in SCSI spec? */ 18113 /* 18114 * For removable-media devices, do not retry if 18115 * ASCQ > 2 as these result mostly from USCSI commands 18116 * on MMC devices issued to check status of an 18117 * operation initiated in immediate mode. Also for 18118 * ASCQ >= 4 do not print console messages as these 18119 * mainly represent a user-initiated operation 18120 * instead of a system failure. 18121 */ 18122 if (un->un_f_has_removable_media) { 18123 si.ssi_severity = SCSI_ERR_ALL; 18124 goto fail_command; 18125 } 18126 break; 18127 } 18128 18129 /* 18130 * As part of our recovery attempt for the NOT READY 18131 * condition, we issue a START STOP UNIT command. However 18132 * we want to wait for a short delay before attempting this 18133 * as there may still be more commands coming back from the 18134 * target with the check condition. To do this we use 18135 * timeout(9F) to call sd_start_stop_unit_callback() after 18136 * the delay interval expires. (sd_start_stop_unit_callback() 18137 * dispatches sd_start_stop_unit_task(), which will issue 18138 * the actual START STOP UNIT command. The delay interval 18139 * is one-half of the delay that we will use to retry the 18140 * command that generated the NOT READY condition. 18141 * 18142 * Note that we could just dispatch sd_start_stop_unit_task() 18143 * from here and allow it to sleep for the delay interval, 18144 * but then we would be tying up the taskq thread 18145 * uncesessarily for the duration of the delay. 18146 * 18147 * Do not issue the START STOP UNIT if the current command 18148 * is already a START STOP UNIT. 18149 */ 18150 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) { 18151 break; 18152 } 18153 18154 /* 18155 * Do not schedule the timeout if one is already pending. 18156 */ 18157 if (un->un_startstop_timeid != NULL) { 18158 SD_INFO(SD_LOG_ERROR, un, 18159 "sd_sense_key_not_ready: restart already issued to" 18160 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)), 18161 ddi_get_instance(SD_DEVINFO(un))); 18162 break; 18163 } 18164 18165 /* 18166 * Schedule the START STOP UNIT command, then queue the command 18167 * for a retry. 18168 * 18169 * Note: A timeout is not scheduled for this retry because we 18170 * want the retry to be serial with the START_STOP_UNIT. The 18171 * retry will be started when the START_STOP_UNIT is completed 18172 * in sd_start_stop_unit_task. 18173 */ 18174 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback, 18175 un, un->un_busy_timeout / 2); 18176 xp->xb_nr_retry_count++; 18177 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter); 18178 return; 18179 18180 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */ 18181 if (sd_error_level < SCSI_ERR_RETRYABLE) { 18182 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18183 "unit does not respond to selection\n"); 18184 } 18185 break; 18186 18187 case 0x3A: /* MEDIUM NOT PRESENT */ 18188 if (sd_error_level >= SCSI_ERR_FATAL) { 18189 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18190 "Caddy not inserted in drive\n"); 18191 } 18192 18193 sr_ejected(un); 18194 un->un_mediastate = DKIO_EJECTED; 18195 /* The state has changed, inform the media watch routines */ 18196 cv_broadcast(&un->un_state_cv); 18197 /* Just fail if no media is present in the drive. */ 18198 goto fail_command; 18199 18200 default: 18201 if (sd_error_level < SCSI_ERR_RETRYABLE) { 18202 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 18203 "Unit not Ready. Additional sense code 0x%x\n", 18204 asc); 18205 } 18206 break; 18207 } 18208 18209 do_retry: 18210 18211 /* 18212 * Retry the command, as some targets may report NOT READY for 18213 * several seconds after being reset. 18214 */ 18215 xp->xb_nr_retry_count++; 18216 si.ssi_severity = SCSI_ERR_RETRYABLE; 18217 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 18218 &si, EIO, un->un_busy_timeout, NULL); 18219 18220 return; 18221 18222 fail_command: 18223 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18224 sd_return_failed_command(un, bp, EIO); 18225 } 18226 18227 18228 18229 /* 18230 * Function: sd_sense_key_medium_or_hardware_error 18231 * 18232 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error" 18233 * sense key. 18234 * 18235 * Context: May be called from interrupt context 18236 */ 18237 18238 static void 18239 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 18240 uint8_t *sense_datap, 18241 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 18242 { 18243 struct sd_sense_info si; 18244 uint8_t sense_key = scsi_sense_key(sense_datap); 18245 uint8_t asc = scsi_sense_asc(sense_datap); 18246 18247 ASSERT(un != NULL); 18248 ASSERT(mutex_owned(SD_MUTEX(un))); 18249 ASSERT(bp != NULL); 18250 ASSERT(xp != NULL); 18251 ASSERT(pktp != NULL); 18252 18253 si.ssi_severity = SCSI_ERR_FATAL; 18254 si.ssi_pfa_flag = FALSE; 18255 18256 if (sense_key == KEY_MEDIUM_ERROR) { 18257 SD_UPDATE_ERRSTATS(un, sd_rq_media_err); 18258 } 18259 18260 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18261 18262 if ((un->un_reset_retry_count != 0) && 18263 (xp->xb_retry_count == un->un_reset_retry_count)) { 18264 mutex_exit(SD_MUTEX(un)); 18265 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */ 18266 if (un->un_f_allow_bus_device_reset == TRUE) { 18267 18268 boolean_t try_resetting_target = B_TRUE; 18269 18270 /* 18271 * We need to be able to handle specific ASC when we are 18272 * handling a KEY_HARDWARE_ERROR. In particular 18273 * taking the default action of resetting the target may 18274 * not be the appropriate way to attempt recovery. 18275 * Resetting a target because of a single LUN failure 18276 * victimizes all LUNs on that target. 18277 * 18278 * This is true for the LSI arrays, if an LSI 18279 * array controller returns an ASC of 0x84 (LUN Dead) we 18280 * should trust it. 18281 */ 18282 18283 if (sense_key == KEY_HARDWARE_ERROR) { 18284 switch (asc) { 18285 case 0x84: 18286 if (SD_IS_LSI(un)) { 18287 try_resetting_target = B_FALSE; 18288 } 18289 break; 18290 default: 18291 break; 18292 } 18293 } 18294 18295 if (try_resetting_target == B_TRUE) { 18296 int reset_retval = 0; 18297 if (un->un_f_lun_reset_enabled == TRUE) { 18298 SD_TRACE(SD_LOG_IO_CORE, un, 18299 "sd_sense_key_medium_or_hardware_" 18300 "error: issuing RESET_LUN\n"); 18301 reset_retval = 18302 scsi_reset(SD_ADDRESS(un), 18303 RESET_LUN); 18304 } 18305 if (reset_retval == 0) { 18306 SD_TRACE(SD_LOG_IO_CORE, un, 18307 "sd_sense_key_medium_or_hardware_" 18308 "error: issuing RESET_TARGET\n"); 18309 (void) scsi_reset(SD_ADDRESS(un), 18310 RESET_TARGET); 18311 } 18312 } 18313 } 18314 mutex_enter(SD_MUTEX(un)); 18315 } 18316 18317 /* 18318 * This really ought to be a fatal error, but we will retry anyway 18319 * as some drives report this as a spurious error. 18320 */ 18321 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18322 &si, EIO, (clock_t)0, NULL); 18323 } 18324 18325 18326 18327 /* 18328 * Function: sd_sense_key_illegal_request 18329 * 18330 * Description: Recovery actions for a SCSI "Illegal Request" sense key. 18331 * 18332 * Context: May be called from interrupt context 18333 */ 18334 18335 static void 18336 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 18337 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18338 { 18339 struct sd_sense_info si; 18340 18341 ASSERT(un != NULL); 18342 ASSERT(mutex_owned(SD_MUTEX(un))); 18343 ASSERT(bp != NULL); 18344 ASSERT(xp != NULL); 18345 ASSERT(pktp != NULL); 18346 18347 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err); 18348 18349 si.ssi_severity = SCSI_ERR_INFO; 18350 si.ssi_pfa_flag = FALSE; 18351 18352 /* Pointless to retry if the target thinks it's an illegal request */ 18353 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18354 sd_return_failed_command(un, bp, EIO); 18355 } 18356 18357 18358 18359 18360 /* 18361 * Function: sd_sense_key_unit_attention 18362 * 18363 * Description: Recovery actions for a SCSI "Unit Attention" sense key. 18364 * 18365 * Context: May be called from interrupt context 18366 */ 18367 18368 static void 18369 sd_sense_key_unit_attention(struct sd_lun *un, 18370 uint8_t *sense_datap, 18371 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 18372 { 18373 /* 18374 * For UNIT ATTENTION we allow retries for one minute. Devices 18375 * like Sonoma can return UNIT ATTENTION close to a minute 18376 * under certain conditions. 18377 */ 18378 int retry_check_flag = SD_RETRIES_UA; 18379 boolean_t kstat_updated = B_FALSE; 18380 struct sd_sense_info si; 18381 uint8_t asc = scsi_sense_asc(sense_datap); 18382 uint8_t ascq = scsi_sense_ascq(sense_datap); 18383 18384 ASSERT(un != NULL); 18385 ASSERT(mutex_owned(SD_MUTEX(un))); 18386 ASSERT(bp != NULL); 18387 ASSERT(xp != NULL); 18388 ASSERT(pktp != NULL); 18389 18390 si.ssi_severity = SCSI_ERR_INFO; 18391 si.ssi_pfa_flag = FALSE; 18392 18393 18394 switch (asc) { 18395 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */ 18396 if (sd_report_pfa != 0) { 18397 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 18398 si.ssi_pfa_flag = TRUE; 18399 retry_check_flag = SD_RETRIES_STANDARD; 18400 goto do_retry; 18401 } 18402 18403 break; 18404 18405 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */ 18406 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 18407 un->un_resvd_status |= 18408 (SD_LOST_RESERVE | SD_WANT_RESERVE); 18409 } 18410 #ifdef _LP64 18411 if (un->un_blockcount + 1 > SD_GROUP1_MAX_ADDRESS) { 18412 if (taskq_dispatch(sd_tq, sd_reenable_dsense_task, 18413 un, KM_NOSLEEP) == 0) { 18414 /* 18415 * If we can't dispatch the task we'll just 18416 * live without descriptor sense. We can 18417 * try again on the next "unit attention" 18418 */ 18419 SD_ERROR(SD_LOG_ERROR, un, 18420 "sd_sense_key_unit_attention: " 18421 "Could not dispatch " 18422 "sd_reenable_dsense_task\n"); 18423 } 18424 } 18425 #endif /* _LP64 */ 18426 /* FALLTHRU */ 18427 18428 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */ 18429 if (!un->un_f_has_removable_media) { 18430 break; 18431 } 18432 18433 /* 18434 * When we get a unit attention from a removable-media device, 18435 * it may be in a state that will take a long time to recover 18436 * (e.g., from a reset). Since we are executing in interrupt 18437 * context here, we cannot wait around for the device to come 18438 * back. So hand this command off to sd_media_change_task() 18439 * for deferred processing under taskq thread context. (Note 18440 * that the command still may be failed if a problem is 18441 * encountered at a later time.) 18442 */ 18443 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp, 18444 KM_NOSLEEP) == 0) { 18445 /* 18446 * Cannot dispatch the request so fail the command. 18447 */ 18448 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18449 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 18450 si.ssi_severity = SCSI_ERR_FATAL; 18451 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18452 sd_return_failed_command(un, bp, EIO); 18453 } 18454 18455 /* 18456 * If failed to dispatch sd_media_change_task(), we already 18457 * updated kstat. If succeed to dispatch sd_media_change_task(), 18458 * we should update kstat later if it encounters an error. So, 18459 * we update kstat_updated flag here. 18460 */ 18461 kstat_updated = B_TRUE; 18462 18463 /* 18464 * Either the command has been successfully dispatched to a 18465 * task Q for retrying, or the dispatch failed. In either case 18466 * do NOT retry again by calling sd_retry_command. This sets up 18467 * two retries of the same command and when one completes and 18468 * frees the resources the other will access freed memory, 18469 * a bad thing. 18470 */ 18471 return; 18472 18473 default: 18474 break; 18475 } 18476 18477 /* 18478 * ASC ASCQ 18479 * 2A 09 Capacity data has changed 18480 * 2A 01 Mode parameters changed 18481 * 3F 0E Reported luns data has changed 18482 * Arrays that support logical unit expansion should report 18483 * capacity changes(2Ah/09). Mode parameters changed and 18484 * reported luns data has changed are the approximation. 18485 */ 18486 if (((asc == 0x2a) && (ascq == 0x09)) || 18487 ((asc == 0x2a) && (ascq == 0x01)) || 18488 ((asc == 0x3f) && (ascq == 0x0e))) { 18489 if (taskq_dispatch(sd_tq, sd_target_change_task, un, 18490 KM_NOSLEEP) == 0) { 18491 SD_ERROR(SD_LOG_ERROR, un, 18492 "sd_sense_key_unit_attention: " 18493 "Could not dispatch sd_target_change_task\n"); 18494 } 18495 } 18496 18497 /* 18498 * Update kstat if we haven't done that. 18499 */ 18500 if (!kstat_updated) { 18501 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18502 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 18503 } 18504 18505 do_retry: 18506 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si, 18507 EIO, SD_UA_RETRY_DELAY, NULL); 18508 } 18509 18510 18511 18512 /* 18513 * Function: sd_sense_key_fail_command 18514 * 18515 * Description: Use to fail a command when we don't like the sense key that 18516 * was returned. 18517 * 18518 * Context: May be called from interrupt context 18519 */ 18520 18521 static void 18522 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 18523 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18524 { 18525 struct sd_sense_info si; 18526 18527 ASSERT(un != NULL); 18528 ASSERT(mutex_owned(SD_MUTEX(un))); 18529 ASSERT(bp != NULL); 18530 ASSERT(xp != NULL); 18531 ASSERT(pktp != NULL); 18532 18533 si.ssi_severity = SCSI_ERR_FATAL; 18534 si.ssi_pfa_flag = FALSE; 18535 18536 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18537 sd_return_failed_command(un, bp, EIO); 18538 } 18539 18540 18541 18542 /* 18543 * Function: sd_sense_key_blank_check 18544 * 18545 * Description: Recovery actions for a SCSI "Blank Check" sense key. 18546 * Has no monetary connotation. 18547 * 18548 * Context: May be called from interrupt context 18549 */ 18550 18551 static void 18552 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 18553 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18554 { 18555 struct sd_sense_info si; 18556 18557 ASSERT(un != NULL); 18558 ASSERT(mutex_owned(SD_MUTEX(un))); 18559 ASSERT(bp != NULL); 18560 ASSERT(xp != NULL); 18561 ASSERT(pktp != NULL); 18562 18563 /* 18564 * Blank check is not fatal for removable devices, therefore 18565 * it does not require a console message. 18566 */ 18567 si.ssi_severity = (un->un_f_has_removable_media) ? SCSI_ERR_ALL : 18568 SCSI_ERR_FATAL; 18569 si.ssi_pfa_flag = FALSE; 18570 18571 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18572 sd_return_failed_command(un, bp, EIO); 18573 } 18574 18575 18576 18577 18578 /* 18579 * Function: sd_sense_key_aborted_command 18580 * 18581 * Description: Recovery actions for a SCSI "Aborted Command" sense key. 18582 * 18583 * Context: May be called from interrupt context 18584 */ 18585 18586 static void 18587 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 18588 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18589 { 18590 struct sd_sense_info si; 18591 18592 ASSERT(un != NULL); 18593 ASSERT(mutex_owned(SD_MUTEX(un))); 18594 ASSERT(bp != NULL); 18595 ASSERT(xp != NULL); 18596 ASSERT(pktp != NULL); 18597 18598 si.ssi_severity = SCSI_ERR_FATAL; 18599 si.ssi_pfa_flag = FALSE; 18600 18601 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18602 18603 /* 18604 * This really ought to be a fatal error, but we will retry anyway 18605 * as some drives report this as a spurious error. 18606 */ 18607 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18608 &si, EIO, drv_usectohz(100000), NULL); 18609 } 18610 18611 18612 18613 /* 18614 * Function: sd_sense_key_default 18615 * 18616 * Description: Default recovery action for several SCSI sense keys (basically 18617 * attempts a retry). 18618 * 18619 * Context: May be called from interrupt context 18620 */ 18621 18622 static void 18623 sd_sense_key_default(struct sd_lun *un, 18624 uint8_t *sense_datap, 18625 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 18626 { 18627 struct sd_sense_info si; 18628 uint8_t sense_key = scsi_sense_key(sense_datap); 18629 18630 ASSERT(un != NULL); 18631 ASSERT(mutex_owned(SD_MUTEX(un))); 18632 ASSERT(bp != NULL); 18633 ASSERT(xp != NULL); 18634 ASSERT(pktp != NULL); 18635 18636 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18637 18638 /* 18639 * Undecoded sense key. Attempt retries and hope that will fix 18640 * the problem. Otherwise, we're dead. 18641 */ 18642 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 18643 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18644 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]); 18645 } 18646 18647 si.ssi_severity = SCSI_ERR_FATAL; 18648 si.ssi_pfa_flag = FALSE; 18649 18650 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18651 &si, EIO, (clock_t)0, NULL); 18652 } 18653 18654 18655 18656 /* 18657 * Function: sd_print_retry_msg 18658 * 18659 * Description: Print a message indicating the retry action being taken. 18660 * 18661 * Arguments: un - ptr to associated softstate 18662 * bp - ptr to buf(9S) for the command 18663 * arg - not used. 18664 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 18665 * or SD_NO_RETRY_ISSUED 18666 * 18667 * Context: May be called from interrupt context 18668 */ 18669 /* ARGSUSED */ 18670 static void 18671 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag) 18672 { 18673 struct sd_xbuf *xp; 18674 struct scsi_pkt *pktp; 18675 char *reasonp; 18676 char *msgp; 18677 18678 ASSERT(un != NULL); 18679 ASSERT(mutex_owned(SD_MUTEX(un))); 18680 ASSERT(bp != NULL); 18681 pktp = SD_GET_PKTP(bp); 18682 ASSERT(pktp != NULL); 18683 xp = SD_GET_XBUF(bp); 18684 ASSERT(xp != NULL); 18685 18686 ASSERT(!mutex_owned(&un->un_pm_mutex)); 18687 mutex_enter(&un->un_pm_mutex); 18688 if ((un->un_state == SD_STATE_SUSPENDED) || 18689 (SD_DEVICE_IS_IN_LOW_POWER(un)) || 18690 (pktp->pkt_flags & FLAG_SILENT)) { 18691 mutex_exit(&un->un_pm_mutex); 18692 goto update_pkt_reason; 18693 } 18694 mutex_exit(&un->un_pm_mutex); 18695 18696 /* 18697 * Suppress messages if they are all the same pkt_reason; with 18698 * TQ, many (up to 256) are returned with the same pkt_reason. 18699 * If we are in panic, then suppress the retry messages. 18700 */ 18701 switch (flag) { 18702 case SD_NO_RETRY_ISSUED: 18703 msgp = "giving up"; 18704 break; 18705 case SD_IMMEDIATE_RETRY_ISSUED: 18706 case SD_DELAYED_RETRY_ISSUED: 18707 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) || 18708 ((pktp->pkt_reason == un->un_last_pkt_reason) && 18709 (sd_error_level != SCSI_ERR_ALL))) { 18710 return; 18711 } 18712 msgp = "retrying command"; 18713 break; 18714 default: 18715 goto update_pkt_reason; 18716 } 18717 18718 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" : 18719 scsi_rname(pktp->pkt_reason)); 18720 18721 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP) { 18722 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18723 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp); 18724 } 18725 18726 update_pkt_reason: 18727 /* 18728 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason. 18729 * This is to prevent multiple console messages for the same failure 18730 * condition. Note that un->un_last_pkt_reason is NOT restored if & 18731 * when the command is retried successfully because there still may be 18732 * more commands coming back with the same value of pktp->pkt_reason. 18733 */ 18734 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) { 18735 un->un_last_pkt_reason = pktp->pkt_reason; 18736 } 18737 } 18738 18739 18740 /* 18741 * Function: sd_print_cmd_incomplete_msg 18742 * 18743 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason. 18744 * 18745 * Arguments: un - ptr to associated softstate 18746 * bp - ptr to buf(9S) for the command 18747 * arg - passed to sd_print_retry_msg() 18748 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 18749 * or SD_NO_RETRY_ISSUED 18750 * 18751 * Context: May be called from interrupt context 18752 */ 18753 18754 static void 18755 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, 18756 int code) 18757 { 18758 dev_info_t *dip; 18759 18760 ASSERT(un != NULL); 18761 ASSERT(mutex_owned(SD_MUTEX(un))); 18762 ASSERT(bp != NULL); 18763 18764 switch (code) { 18765 case SD_NO_RETRY_ISSUED: 18766 /* Command was failed. Someone turned off this target? */ 18767 if (un->un_state != SD_STATE_OFFLINE) { 18768 /* 18769 * Suppress message if we are detaching and 18770 * device has been disconnected 18771 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation 18772 * private interface and not part of the DDI 18773 */ 18774 dip = un->un_sd->sd_dev; 18775 if (!(DEVI_IS_DETACHING(dip) && 18776 DEVI_IS_DEVICE_REMOVED(dip))) { 18777 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18778 "disk not responding to selection\n"); 18779 } 18780 New_state(un, SD_STATE_OFFLINE); 18781 } 18782 break; 18783 18784 case SD_DELAYED_RETRY_ISSUED: 18785 case SD_IMMEDIATE_RETRY_ISSUED: 18786 default: 18787 /* Command was successfully queued for retry */ 18788 sd_print_retry_msg(un, bp, arg, code); 18789 break; 18790 } 18791 } 18792 18793 18794 /* 18795 * Function: sd_pkt_reason_cmd_incomplete 18796 * 18797 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason. 18798 * 18799 * Context: May be called from interrupt context 18800 */ 18801 18802 static void 18803 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 18804 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18805 { 18806 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE; 18807 18808 ASSERT(un != NULL); 18809 ASSERT(mutex_owned(SD_MUTEX(un))); 18810 ASSERT(bp != NULL); 18811 ASSERT(xp != NULL); 18812 ASSERT(pktp != NULL); 18813 18814 /* Do not do a reset if selection did not complete */ 18815 /* Note: Should this not just check the bit? */ 18816 if (pktp->pkt_state != STATE_GOT_BUS) { 18817 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18818 sd_reset_target(un, pktp); 18819 } 18820 18821 /* 18822 * If the target was not successfully selected, then set 18823 * SD_RETRIES_FAILFAST to indicate that we lost communication 18824 * with the target, and further retries and/or commands are 18825 * likely to take a long time. 18826 */ 18827 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) { 18828 flag |= SD_RETRIES_FAILFAST; 18829 } 18830 18831 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18832 18833 sd_retry_command(un, bp, flag, 18834 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18835 } 18836 18837 18838 18839 /* 18840 * Function: sd_pkt_reason_cmd_tran_err 18841 * 18842 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason. 18843 * 18844 * Context: May be called from interrupt context 18845 */ 18846 18847 static void 18848 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 18849 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18850 { 18851 ASSERT(un != NULL); 18852 ASSERT(mutex_owned(SD_MUTEX(un))); 18853 ASSERT(bp != NULL); 18854 ASSERT(xp != NULL); 18855 ASSERT(pktp != NULL); 18856 18857 /* 18858 * Do not reset if we got a parity error, or if 18859 * selection did not complete. 18860 */ 18861 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18862 /* Note: Should this not just check the bit for pkt_state? */ 18863 if (((pktp->pkt_statistics & STAT_PERR) == 0) && 18864 (pktp->pkt_state != STATE_GOT_BUS)) { 18865 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18866 sd_reset_target(un, pktp); 18867 } 18868 18869 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18870 18871 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18872 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18873 } 18874 18875 18876 18877 /* 18878 * Function: sd_pkt_reason_cmd_reset 18879 * 18880 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason. 18881 * 18882 * Context: May be called from interrupt context 18883 */ 18884 18885 static void 18886 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 18887 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18888 { 18889 ASSERT(un != NULL); 18890 ASSERT(mutex_owned(SD_MUTEX(un))); 18891 ASSERT(bp != NULL); 18892 ASSERT(xp != NULL); 18893 ASSERT(pktp != NULL); 18894 18895 /* The target may still be running the command, so try to reset. */ 18896 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18897 sd_reset_target(un, pktp); 18898 18899 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18900 18901 /* 18902 * If pkt_reason is CMD_RESET chances are that this pkt got 18903 * reset because another target on this bus caused it. The target 18904 * that caused it should get CMD_TIMEOUT with pkt_statistics 18905 * of STAT_TIMEOUT/STAT_DEV_RESET. 18906 */ 18907 18908 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 18909 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18910 } 18911 18912 18913 18914 18915 /* 18916 * Function: sd_pkt_reason_cmd_aborted 18917 * 18918 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason. 18919 * 18920 * Context: May be called from interrupt context 18921 */ 18922 18923 static void 18924 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 18925 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18926 { 18927 ASSERT(un != NULL); 18928 ASSERT(mutex_owned(SD_MUTEX(un))); 18929 ASSERT(bp != NULL); 18930 ASSERT(xp != NULL); 18931 ASSERT(pktp != NULL); 18932 18933 /* The target may still be running the command, so try to reset. */ 18934 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18935 sd_reset_target(un, pktp); 18936 18937 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18938 18939 /* 18940 * If pkt_reason is CMD_ABORTED chances are that this pkt got 18941 * aborted because another target on this bus caused it. The target 18942 * that caused it should get CMD_TIMEOUT with pkt_statistics 18943 * of STAT_TIMEOUT/STAT_DEV_RESET. 18944 */ 18945 18946 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 18947 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18948 } 18949 18950 18951 18952 /* 18953 * Function: sd_pkt_reason_cmd_timeout 18954 * 18955 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason. 18956 * 18957 * Context: May be called from interrupt context 18958 */ 18959 18960 static void 18961 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 18962 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18963 { 18964 ASSERT(un != NULL); 18965 ASSERT(mutex_owned(SD_MUTEX(un))); 18966 ASSERT(bp != NULL); 18967 ASSERT(xp != NULL); 18968 ASSERT(pktp != NULL); 18969 18970 18971 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18972 sd_reset_target(un, pktp); 18973 18974 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18975 18976 /* 18977 * A command timeout indicates that we could not establish 18978 * communication with the target, so set SD_RETRIES_FAILFAST 18979 * as further retries/commands are likely to take a long time. 18980 */ 18981 sd_retry_command(un, bp, 18982 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST), 18983 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18984 } 18985 18986 18987 18988 /* 18989 * Function: sd_pkt_reason_cmd_unx_bus_free 18990 * 18991 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason. 18992 * 18993 * Context: May be called from interrupt context 18994 */ 18995 18996 static void 18997 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 18998 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18999 { 19000 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code); 19001 19002 ASSERT(un != NULL); 19003 ASSERT(mutex_owned(SD_MUTEX(un))); 19004 ASSERT(bp != NULL); 19005 ASSERT(xp != NULL); 19006 ASSERT(pktp != NULL); 19007 19008 SD_UPDATE_ERRSTATS(un, sd_harderrs); 19009 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19010 19011 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? 19012 sd_print_retry_msg : NULL; 19013 19014 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 19015 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19016 } 19017 19018 19019 /* 19020 * Function: sd_pkt_reason_cmd_tag_reject 19021 * 19022 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason. 19023 * 19024 * Context: May be called from interrupt context 19025 */ 19026 19027 static void 19028 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 19029 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19030 { 19031 ASSERT(un != NULL); 19032 ASSERT(mutex_owned(SD_MUTEX(un))); 19033 ASSERT(bp != NULL); 19034 ASSERT(xp != NULL); 19035 ASSERT(pktp != NULL); 19036 19037 SD_UPDATE_ERRSTATS(un, sd_harderrs); 19038 pktp->pkt_flags = 0; 19039 un->un_tagflags = 0; 19040 if (un->un_f_opt_queueing == TRUE) { 19041 un->un_throttle = min(un->un_throttle, 3); 19042 } else { 19043 un->un_throttle = 1; 19044 } 19045 mutex_exit(SD_MUTEX(un)); 19046 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 19047 mutex_enter(SD_MUTEX(un)); 19048 19049 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19050 19051 /* Legacy behavior not to check retry counts here. */ 19052 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE), 19053 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19054 } 19055 19056 19057 /* 19058 * Function: sd_pkt_reason_default 19059 * 19060 * Description: Default recovery actions for SCSA pkt_reason values that 19061 * do not have more explicit recovery actions. 19062 * 19063 * Context: May be called from interrupt context 19064 */ 19065 19066 static void 19067 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 19068 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19069 { 19070 ASSERT(un != NULL); 19071 ASSERT(mutex_owned(SD_MUTEX(un))); 19072 ASSERT(bp != NULL); 19073 ASSERT(xp != NULL); 19074 ASSERT(pktp != NULL); 19075 19076 SD_UPDATE_ERRSTATS(un, sd_transerrs); 19077 sd_reset_target(un, pktp); 19078 19079 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19080 19081 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 19082 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19083 } 19084 19085 19086 19087 /* 19088 * Function: sd_pkt_status_check_condition 19089 * 19090 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status. 19091 * 19092 * Context: May be called from interrupt context 19093 */ 19094 19095 static void 19096 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 19097 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19098 { 19099 ASSERT(un != NULL); 19100 ASSERT(mutex_owned(SD_MUTEX(un))); 19101 ASSERT(bp != NULL); 19102 ASSERT(xp != NULL); 19103 ASSERT(pktp != NULL); 19104 19105 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: " 19106 "entry: buf:0x%p xp:0x%p\n", bp, xp); 19107 19108 /* 19109 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the 19110 * command will be retried after the request sense). Otherwise, retry 19111 * the command. Note: we are issuing the request sense even though the 19112 * retry limit may have been reached for the failed command. 19113 */ 19114 if (un->un_f_arq_enabled == FALSE) { 19115 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 19116 "no ARQ, sending request sense command\n"); 19117 sd_send_request_sense_command(un, bp, pktp); 19118 } else { 19119 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 19120 "ARQ,retrying request sense command\n"); 19121 #if defined(__i386) || defined(__amd64) 19122 /* 19123 * The SD_RETRY_DELAY value need to be adjusted here 19124 * when SD_RETRY_DELAY change in sddef.h 19125 */ 19126 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 19127 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, 19128 NULL); 19129 #else 19130 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 19131 EIO, SD_RETRY_DELAY, NULL); 19132 #endif 19133 } 19134 19135 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); 19136 } 19137 19138 19139 /* 19140 * Function: sd_pkt_status_busy 19141 * 19142 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status. 19143 * 19144 * Context: May be called from interrupt context 19145 */ 19146 19147 static void 19148 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 19149 struct scsi_pkt *pktp) 19150 { 19151 ASSERT(un != NULL); 19152 ASSERT(mutex_owned(SD_MUTEX(un))); 19153 ASSERT(bp != NULL); 19154 ASSERT(xp != NULL); 19155 ASSERT(pktp != NULL); 19156 19157 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19158 "sd_pkt_status_busy: entry\n"); 19159 19160 /* If retries are exhausted, just fail the command. */ 19161 if (xp->xb_retry_count >= un->un_busy_retry_count) { 19162 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 19163 "device busy too long\n"); 19164 sd_return_failed_command(un, bp, EIO); 19165 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19166 "sd_pkt_status_busy: exit\n"); 19167 return; 19168 } 19169 xp->xb_retry_count++; 19170 19171 /* 19172 * Try to reset the target. However, we do not want to perform 19173 * more than one reset if the device continues to fail. The reset 19174 * will be performed when the retry count reaches the reset 19175 * threshold. This threshold should be set such that at least 19176 * one retry is issued before the reset is performed. 19177 */ 19178 if (xp->xb_retry_count == 19179 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) { 19180 int rval = 0; 19181 mutex_exit(SD_MUTEX(un)); 19182 if (un->un_f_allow_bus_device_reset == TRUE) { 19183 /* 19184 * First try to reset the LUN; if we cannot then 19185 * try to reset the target. 19186 */ 19187 if (un->un_f_lun_reset_enabled == TRUE) { 19188 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19189 "sd_pkt_status_busy: RESET_LUN\n"); 19190 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 19191 } 19192 if (rval == 0) { 19193 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19194 "sd_pkt_status_busy: RESET_TARGET\n"); 19195 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 19196 } 19197 } 19198 if (rval == 0) { 19199 /* 19200 * If the RESET_LUN and/or RESET_TARGET failed, 19201 * try RESET_ALL 19202 */ 19203 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19204 "sd_pkt_status_busy: RESET_ALL\n"); 19205 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL); 19206 } 19207 mutex_enter(SD_MUTEX(un)); 19208 if (rval == 0) { 19209 /* 19210 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed. 19211 * At this point we give up & fail the command. 19212 */ 19213 sd_return_failed_command(un, bp, EIO); 19214 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19215 "sd_pkt_status_busy: exit (failed cmd)\n"); 19216 return; 19217 } 19218 } 19219 19220 /* 19221 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as 19222 * we have already checked the retry counts above. 19223 */ 19224 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 19225 EIO, un->un_busy_timeout, NULL); 19226 19227 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19228 "sd_pkt_status_busy: exit\n"); 19229 } 19230 19231 19232 /* 19233 * Function: sd_pkt_status_reservation_conflict 19234 * 19235 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI 19236 * command status. 19237 * 19238 * Context: May be called from interrupt context 19239 */ 19240 19241 static void 19242 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp, 19243 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19244 { 19245 ASSERT(un != NULL); 19246 ASSERT(mutex_owned(SD_MUTEX(un))); 19247 ASSERT(bp != NULL); 19248 ASSERT(xp != NULL); 19249 ASSERT(pktp != NULL); 19250 19251 /* 19252 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation 19253 * conflict could be due to various reasons like incorrect keys, not 19254 * registered or not reserved etc. So, we return EACCES to the caller. 19255 */ 19256 if (un->un_reservation_type == SD_SCSI3_RESERVATION) { 19257 int cmd = SD_GET_PKT_OPCODE(pktp); 19258 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) || 19259 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) { 19260 sd_return_failed_command(un, bp, EACCES); 19261 return; 19262 } 19263 } 19264 19265 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 19266 19267 if ((un->un_resvd_status & SD_FAILFAST) != 0) { 19268 if (sd_failfast_enable != 0) { 19269 /* By definition, we must panic here.... */ 19270 sd_panic_for_res_conflict(un); 19271 /*NOTREACHED*/ 19272 } 19273 SD_ERROR(SD_LOG_IO, un, 19274 "sd_handle_resv_conflict: Disk Reserved\n"); 19275 sd_return_failed_command(un, bp, EACCES); 19276 return; 19277 } 19278 19279 /* 19280 * 1147670: retry only if sd_retry_on_reservation_conflict 19281 * property is set (default is 1). Retries will not succeed 19282 * on a disk reserved by another initiator. HA systems 19283 * may reset this via sd.conf to avoid these retries. 19284 * 19285 * Note: The legacy return code for this failure is EIO, however EACCES 19286 * seems more appropriate for a reservation conflict. 19287 */ 19288 if (sd_retry_on_reservation_conflict == 0) { 19289 SD_ERROR(SD_LOG_IO, un, 19290 "sd_handle_resv_conflict: Device Reserved\n"); 19291 sd_return_failed_command(un, bp, EIO); 19292 return; 19293 } 19294 19295 /* 19296 * Retry the command if we can. 19297 * 19298 * Note: The legacy return code for this failure is EIO, however EACCES 19299 * seems more appropriate for a reservation conflict. 19300 */ 19301 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 19302 (clock_t)2, NULL); 19303 } 19304 19305 19306 19307 /* 19308 * Function: sd_pkt_status_qfull 19309 * 19310 * Description: Handle a QUEUE FULL condition from the target. This can 19311 * occur if the HBA does not handle the queue full condition. 19312 * (Basically this means third-party HBAs as Sun HBAs will 19313 * handle the queue full condition.) Note that if there are 19314 * some commands already in the transport, then the queue full 19315 * has occurred because the queue for this nexus is actually 19316 * full. If there are no commands in the transport, then the 19317 * queue full is resulting from some other initiator or lun 19318 * consuming all the resources at the target. 19319 * 19320 * Context: May be called from interrupt context 19321 */ 19322 19323 static void 19324 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 19325 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19326 { 19327 ASSERT(un != NULL); 19328 ASSERT(mutex_owned(SD_MUTEX(un))); 19329 ASSERT(bp != NULL); 19330 ASSERT(xp != NULL); 19331 ASSERT(pktp != NULL); 19332 19333 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19334 "sd_pkt_status_qfull: entry\n"); 19335 19336 /* 19337 * Just lower the QFULL throttle and retry the command. Note that 19338 * we do not limit the number of retries here. 19339 */ 19340 sd_reduce_throttle(un, SD_THROTTLE_QFULL); 19341 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, 19342 SD_RESTART_TIMEOUT, NULL); 19343 19344 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19345 "sd_pkt_status_qfull: exit\n"); 19346 } 19347 19348 19349 /* 19350 * Function: sd_reset_target 19351 * 19352 * Description: Issue a scsi_reset(9F), with either RESET_LUN, 19353 * RESET_TARGET, or RESET_ALL. 19354 * 19355 * Context: May be called under interrupt context. 19356 */ 19357 19358 static void 19359 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp) 19360 { 19361 int rval = 0; 19362 19363 ASSERT(un != NULL); 19364 ASSERT(mutex_owned(SD_MUTEX(un))); 19365 ASSERT(pktp != NULL); 19366 19367 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n"); 19368 19369 /* 19370 * No need to reset if the transport layer has already done so. 19371 */ 19372 if ((pktp->pkt_statistics & 19373 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) { 19374 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19375 "sd_reset_target: no reset\n"); 19376 return; 19377 } 19378 19379 mutex_exit(SD_MUTEX(un)); 19380 19381 if (un->un_f_allow_bus_device_reset == TRUE) { 19382 if (un->un_f_lun_reset_enabled == TRUE) { 19383 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19384 "sd_reset_target: RESET_LUN\n"); 19385 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 19386 } 19387 if (rval == 0) { 19388 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19389 "sd_reset_target: RESET_TARGET\n"); 19390 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 19391 } 19392 } 19393 19394 if (rval == 0) { 19395 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19396 "sd_reset_target: RESET_ALL\n"); 19397 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 19398 } 19399 19400 mutex_enter(SD_MUTEX(un)); 19401 19402 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n"); 19403 } 19404 19405 /* 19406 * Function: sd_target_change_task 19407 * 19408 * Description: Handle dynamic target change 19409 * 19410 * Context: Executes in a taskq() thread context 19411 */ 19412 static void 19413 sd_target_change_task(void *arg) 19414 { 19415 struct sd_lun *un = arg; 19416 uint64_t capacity; 19417 diskaddr_t label_cap; 19418 uint_t lbasize; 19419 sd_ssc_t *ssc; 19420 19421 ASSERT(un != NULL); 19422 ASSERT(!mutex_owned(SD_MUTEX(un))); 19423 19424 if ((un->un_f_blockcount_is_valid == FALSE) || 19425 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 19426 return; 19427 } 19428 19429 ssc = sd_ssc_init(un); 19430 19431 if (sd_send_scsi_READ_CAPACITY(ssc, &capacity, 19432 &lbasize, SD_PATH_DIRECT) != 0) { 19433 SD_ERROR(SD_LOG_ERROR, un, 19434 "sd_target_change_task: fail to read capacity\n"); 19435 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19436 goto task_exit; 19437 } 19438 19439 mutex_enter(SD_MUTEX(un)); 19440 if (capacity <= un->un_blockcount) { 19441 mutex_exit(SD_MUTEX(un)); 19442 goto task_exit; 19443 } 19444 19445 sd_update_block_info(un, lbasize, capacity); 19446 mutex_exit(SD_MUTEX(un)); 19447 19448 /* 19449 * If lun is EFI labeled and lun capacity is greater than the 19450 * capacity contained in the label, log a sys event. 19451 */ 19452 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 19453 (void*)SD_PATH_DIRECT) == 0) { 19454 mutex_enter(SD_MUTEX(un)); 19455 if (un->un_f_blockcount_is_valid && 19456 un->un_blockcount > label_cap) { 19457 mutex_exit(SD_MUTEX(un)); 19458 sd_log_lun_expansion_event(un, KM_SLEEP); 19459 } else { 19460 mutex_exit(SD_MUTEX(un)); 19461 } 19462 } 19463 19464 task_exit: 19465 sd_ssc_fini(ssc); 19466 } 19467 19468 /* 19469 * Function: sd_log_lun_expansion_event 19470 * 19471 * Description: Log lun expansion sys event 19472 * 19473 * Context: Never called from interrupt context 19474 */ 19475 static void 19476 sd_log_lun_expansion_event(struct sd_lun *un, int km_flag) 19477 { 19478 int err; 19479 char *path; 19480 nvlist_t *dle_attr_list; 19481 19482 /* Allocate and build sysevent attribute list */ 19483 err = nvlist_alloc(&dle_attr_list, NV_UNIQUE_NAME_TYPE, km_flag); 19484 if (err != 0) { 19485 SD_ERROR(SD_LOG_ERROR, un, 19486 "sd_log_lun_expansion_event: fail to allocate space\n"); 19487 return; 19488 } 19489 19490 path = kmem_alloc(MAXPATHLEN, km_flag); 19491 if (path == NULL) { 19492 nvlist_free(dle_attr_list); 19493 SD_ERROR(SD_LOG_ERROR, un, 19494 "sd_log_lun_expansion_event: fail to allocate space\n"); 19495 return; 19496 } 19497 /* 19498 * Add path attribute to identify the lun. 19499 * We are using minor node 'a' as the sysevent attribute. 19500 */ 19501 (void) snprintf(path, MAXPATHLEN, "/devices"); 19502 (void) ddi_pathname(SD_DEVINFO(un), path + strlen(path)); 19503 (void) snprintf(path + strlen(path), MAXPATHLEN - strlen(path), 19504 ":a"); 19505 19506 err = nvlist_add_string(dle_attr_list, DEV_PHYS_PATH, path); 19507 if (err != 0) { 19508 nvlist_free(dle_attr_list); 19509 kmem_free(path, MAXPATHLEN); 19510 SD_ERROR(SD_LOG_ERROR, un, 19511 "sd_log_lun_expansion_event: fail to add attribute\n"); 19512 return; 19513 } 19514 19515 /* Log dynamic lun expansion sysevent */ 19516 err = ddi_log_sysevent(SD_DEVINFO(un), SUNW_VENDOR, EC_DEV_STATUS, 19517 ESC_DEV_DLE, dle_attr_list, NULL, km_flag); 19518 if (err != DDI_SUCCESS) { 19519 SD_ERROR(SD_LOG_ERROR, un, 19520 "sd_log_lun_expansion_event: fail to log sysevent\n"); 19521 } 19522 19523 nvlist_free(dle_attr_list); 19524 kmem_free(path, MAXPATHLEN); 19525 } 19526 19527 /* 19528 * Function: sd_media_change_task 19529 * 19530 * Description: Recovery action for CDROM to become available. 19531 * 19532 * Context: Executes in a taskq() thread context 19533 */ 19534 19535 static void 19536 sd_media_change_task(void *arg) 19537 { 19538 struct scsi_pkt *pktp = arg; 19539 struct sd_lun *un; 19540 struct buf *bp; 19541 struct sd_xbuf *xp; 19542 int err = 0; 19543 int retry_count = 0; 19544 int retry_limit = SD_UNIT_ATTENTION_RETRY/10; 19545 struct sd_sense_info si; 19546 19547 ASSERT(pktp != NULL); 19548 bp = (struct buf *)pktp->pkt_private; 19549 ASSERT(bp != NULL); 19550 xp = SD_GET_XBUF(bp); 19551 ASSERT(xp != NULL); 19552 un = SD_GET_UN(bp); 19553 ASSERT(un != NULL); 19554 ASSERT(!mutex_owned(SD_MUTEX(un))); 19555 ASSERT(un->un_f_monitor_media_state); 19556 19557 si.ssi_severity = SCSI_ERR_INFO; 19558 si.ssi_pfa_flag = FALSE; 19559 19560 /* 19561 * When a reset is issued on a CDROM, it takes a long time to 19562 * recover. First few attempts to read capacity and other things 19563 * related to handling unit attention fail (with a ASC 0x4 and 19564 * ASCQ 0x1). In that case we want to do enough retries and we want 19565 * to limit the retries in other cases of genuine failures like 19566 * no media in drive. 19567 */ 19568 while (retry_count++ < retry_limit) { 19569 if ((err = sd_handle_mchange(un)) == 0) { 19570 break; 19571 } 19572 if (err == EAGAIN) { 19573 retry_limit = SD_UNIT_ATTENTION_RETRY; 19574 } 19575 /* Sleep for 0.5 sec. & try again */ 19576 delay(drv_usectohz(500000)); 19577 } 19578 19579 /* 19580 * Dispatch (retry or fail) the original command here, 19581 * along with appropriate console messages.... 19582 * 19583 * Must grab the mutex before calling sd_retry_command, 19584 * sd_print_sense_msg and sd_return_failed_command. 19585 */ 19586 mutex_enter(SD_MUTEX(un)); 19587 if (err != SD_CMD_SUCCESS) { 19588 SD_UPDATE_ERRSTATS(un, sd_harderrs); 19589 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 19590 si.ssi_severity = SCSI_ERR_FATAL; 19591 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 19592 sd_return_failed_command(un, bp, EIO); 19593 } else { 19594 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 19595 &si, EIO, (clock_t)0, NULL); 19596 } 19597 mutex_exit(SD_MUTEX(un)); 19598 } 19599 19600 19601 19602 /* 19603 * Function: sd_handle_mchange 19604 * 19605 * Description: Perform geometry validation & other recovery when CDROM 19606 * has been removed from drive. 19607 * 19608 * Return Code: 0 for success 19609 * errno-type return code of either sd_send_scsi_DOORLOCK() or 19610 * sd_send_scsi_READ_CAPACITY() 19611 * 19612 * Context: Executes in a taskq() thread context 19613 */ 19614 19615 static int 19616 sd_handle_mchange(struct sd_lun *un) 19617 { 19618 uint64_t capacity; 19619 uint32_t lbasize; 19620 int rval; 19621 sd_ssc_t *ssc; 19622 19623 ASSERT(!mutex_owned(SD_MUTEX(un))); 19624 ASSERT(un->un_f_monitor_media_state); 19625 19626 ssc = sd_ssc_init(un); 19627 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize, 19628 SD_PATH_DIRECT_PRIORITY); 19629 19630 if (rval != 0) 19631 goto failed; 19632 19633 mutex_enter(SD_MUTEX(un)); 19634 sd_update_block_info(un, lbasize, capacity); 19635 19636 if (un->un_errstats != NULL) { 19637 struct sd_errstats *stp = 19638 (struct sd_errstats *)un->un_errstats->ks_data; 19639 stp->sd_capacity.value.ui64 = (uint64_t) 19640 ((uint64_t)un->un_blockcount * 19641 (uint64_t)un->un_tgt_blocksize); 19642 } 19643 19644 /* 19645 * Check if the media in the device is writable or not 19646 */ 19647 if (ISCD(un)) { 19648 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT_PRIORITY); 19649 } 19650 19651 /* 19652 * Note: Maybe let the strategy/partitioning chain worry about getting 19653 * valid geometry. 19654 */ 19655 mutex_exit(SD_MUTEX(un)); 19656 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 19657 19658 19659 if (cmlb_validate(un->un_cmlbhandle, 0, 19660 (void *)SD_PATH_DIRECT_PRIORITY) != 0) { 19661 sd_ssc_fini(ssc); 19662 return (EIO); 19663 } else { 19664 if (un->un_f_pkstats_enabled) { 19665 sd_set_pstats(un); 19666 SD_TRACE(SD_LOG_IO_PARTITION, un, 19667 "sd_handle_mchange: un:0x%p pstats created and " 19668 "set\n", un); 19669 } 19670 } 19671 19672 /* 19673 * Try to lock the door 19674 */ 19675 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 19676 SD_PATH_DIRECT_PRIORITY); 19677 failed: 19678 if (rval != 0) 19679 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19680 sd_ssc_fini(ssc); 19681 return (rval); 19682 } 19683 19684 19685 /* 19686 * Function: sd_send_scsi_DOORLOCK 19687 * 19688 * Description: Issue the scsi DOOR LOCK command 19689 * 19690 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 19691 * structure for this target. 19692 * flag - SD_REMOVAL_ALLOW 19693 * SD_REMOVAL_PREVENT 19694 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19695 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19696 * to use the USCSI "direct" chain and bypass the normal 19697 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19698 * command is issued as part of an error recovery action. 19699 * 19700 * Return Code: 0 - Success 19701 * errno return code from sd_ssc_send() 19702 * 19703 * Context: Can sleep. 19704 */ 19705 19706 static int 19707 sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag) 19708 { 19709 struct scsi_extended_sense sense_buf; 19710 union scsi_cdb cdb; 19711 struct uscsi_cmd ucmd_buf; 19712 int status; 19713 struct sd_lun *un; 19714 19715 ASSERT(ssc != NULL); 19716 un = ssc->ssc_un; 19717 ASSERT(un != NULL); 19718 ASSERT(!mutex_owned(SD_MUTEX(un))); 19719 19720 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un); 19721 19722 /* already determined doorlock is not supported, fake success */ 19723 if (un->un_f_doorlock_supported == FALSE) { 19724 return (0); 19725 } 19726 19727 /* 19728 * If we are ejecting and see an SD_REMOVAL_PREVENT 19729 * ignore the command so we can complete the eject 19730 * operation. 19731 */ 19732 if (flag == SD_REMOVAL_PREVENT) { 19733 mutex_enter(SD_MUTEX(un)); 19734 if (un->un_f_ejecting == TRUE) { 19735 mutex_exit(SD_MUTEX(un)); 19736 return (EAGAIN); 19737 } 19738 mutex_exit(SD_MUTEX(un)); 19739 } 19740 19741 bzero(&cdb, sizeof (cdb)); 19742 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19743 19744 cdb.scc_cmd = SCMD_DOORLOCK; 19745 cdb.cdb_opaque[4] = (uchar_t)flag; 19746 19747 ucmd_buf.uscsi_cdb = (char *)&cdb; 19748 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19749 ucmd_buf.uscsi_bufaddr = NULL; 19750 ucmd_buf.uscsi_buflen = 0; 19751 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19752 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19753 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19754 ucmd_buf.uscsi_timeout = 15; 19755 19756 SD_TRACE(SD_LOG_IO, un, 19757 "sd_send_scsi_DOORLOCK: returning sd_ssc_send\n"); 19758 19759 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19760 UIO_SYSSPACE, path_flag); 19761 19762 if (status == 0) 19763 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19764 19765 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) && 19766 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19767 (scsi_sense_key((uint8_t *)&sense_buf) == KEY_ILLEGAL_REQUEST)) { 19768 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19769 19770 /* fake success and skip subsequent doorlock commands */ 19771 un->un_f_doorlock_supported = FALSE; 19772 return (0); 19773 } 19774 19775 return (status); 19776 } 19777 19778 /* 19779 * Function: sd_send_scsi_READ_CAPACITY 19780 * 19781 * Description: This routine uses the scsi READ CAPACITY command to determine 19782 * the device capacity in number of blocks and the device native 19783 * block size. If this function returns a failure, then the 19784 * values in *capp and *lbap are undefined. If the capacity 19785 * returned is 0xffffffff then the lun is too large for a 19786 * normal READ CAPACITY command and the results of a 19787 * READ CAPACITY 16 will be used instead. 19788 * 19789 * Arguments: ssc - ssc contains ptr to soft state struct for the target 19790 * capp - ptr to unsigned 64-bit variable to receive the 19791 * capacity value from the command. 19792 * lbap - ptr to unsigned 32-bit varaible to receive the 19793 * block size value from the command 19794 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19795 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19796 * to use the USCSI "direct" chain and bypass the normal 19797 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19798 * command is issued as part of an error recovery action. 19799 * 19800 * Return Code: 0 - Success 19801 * EIO - IO error 19802 * EACCES - Reservation conflict detected 19803 * EAGAIN - Device is becoming ready 19804 * errno return code from sd_ssc_send() 19805 * 19806 * Context: Can sleep. Blocks until command completes. 19807 */ 19808 19809 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity) 19810 19811 static int 19812 sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp, uint32_t *lbap, 19813 int path_flag) 19814 { 19815 struct scsi_extended_sense sense_buf; 19816 struct uscsi_cmd ucmd_buf; 19817 union scsi_cdb cdb; 19818 uint32_t *capacity_buf; 19819 uint64_t capacity; 19820 uint32_t lbasize; 19821 uint32_t pbsize; 19822 int status; 19823 struct sd_lun *un; 19824 19825 ASSERT(ssc != NULL); 19826 19827 un = ssc->ssc_un; 19828 ASSERT(un != NULL); 19829 ASSERT(!mutex_owned(SD_MUTEX(un))); 19830 ASSERT(capp != NULL); 19831 ASSERT(lbap != NULL); 19832 19833 SD_TRACE(SD_LOG_IO, un, 19834 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 19835 19836 /* 19837 * First send a READ_CAPACITY command to the target. 19838 * (This command is mandatory under SCSI-2.) 19839 * 19840 * Set up the CDB for the READ_CAPACITY command. The Partial 19841 * Medium Indicator bit is cleared. The address field must be 19842 * zero if the PMI bit is zero. 19843 */ 19844 bzero(&cdb, sizeof (cdb)); 19845 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19846 19847 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP); 19848 19849 cdb.scc_cmd = SCMD_READ_CAPACITY; 19850 19851 ucmd_buf.uscsi_cdb = (char *)&cdb; 19852 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19853 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; 19854 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; 19855 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19856 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19857 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19858 ucmd_buf.uscsi_timeout = 60; 19859 19860 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19861 UIO_SYSSPACE, path_flag); 19862 19863 switch (status) { 19864 case 0: 19865 /* Return failure if we did not get valid capacity data. */ 19866 if (ucmd_buf.uscsi_resid != 0) { 19867 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 19868 "sd_send_scsi_READ_CAPACITY received invalid " 19869 "capacity data"); 19870 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19871 return (EIO); 19872 } 19873 /* 19874 * Read capacity and block size from the READ CAPACITY 10 data. 19875 * This data may be adjusted later due to device specific 19876 * issues. 19877 * 19878 * According to the SCSI spec, the READ CAPACITY 10 19879 * command returns the following: 19880 * 19881 * bytes 0-3: Maximum logical block address available. 19882 * (MSB in byte:0 & LSB in byte:3) 19883 * 19884 * bytes 4-7: Block length in bytes 19885 * (MSB in byte:4 & LSB in byte:7) 19886 * 19887 */ 19888 capacity = BE_32(capacity_buf[0]); 19889 lbasize = BE_32(capacity_buf[1]); 19890 19891 /* 19892 * Done with capacity_buf 19893 */ 19894 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19895 19896 /* 19897 * if the reported capacity is set to all 0xf's, then 19898 * this disk is too large and requires SBC-2 commands. 19899 * Reissue the request using READ CAPACITY 16. 19900 */ 19901 if (capacity == 0xffffffff) { 19902 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19903 status = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, 19904 &lbasize, &pbsize, path_flag); 19905 if (status != 0) { 19906 return (status); 19907 } 19908 } 19909 break; /* Success! */ 19910 case EIO: 19911 switch (ucmd_buf.uscsi_status) { 19912 case STATUS_RESERVATION_CONFLICT: 19913 status = EACCES; 19914 break; 19915 case STATUS_CHECK: 19916 /* 19917 * Check condition; look for ASC/ASCQ of 0x04/0x01 19918 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 19919 */ 19920 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19921 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 19922 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 19923 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19924 return (EAGAIN); 19925 } 19926 break; 19927 default: 19928 break; 19929 } 19930 /* FALLTHRU */ 19931 default: 19932 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19933 return (status); 19934 } 19935 19936 /* 19937 * Some ATAPI CD-ROM drives report inaccurate LBA size values 19938 * (2352 and 0 are common) so for these devices always force the value 19939 * to 2048 as required by the ATAPI specs. 19940 */ 19941 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 19942 lbasize = 2048; 19943 } 19944 19945 /* 19946 * Get the maximum LBA value from the READ CAPACITY data. 19947 * Here we assume that the Partial Medium Indicator (PMI) bit 19948 * was cleared when issuing the command. This means that the LBA 19949 * returned from the device is the LBA of the last logical block 19950 * on the logical unit. The actual logical block count will be 19951 * this value plus one. 19952 * 19953 * Currently, for removable media, the capacity is saved in terms 19954 * of un->un_sys_blocksize, so scale the capacity value to reflect this. 19955 */ 19956 if (un->un_f_has_removable_media) 19957 capacity = (capacity + 1) * (lbasize / un->un_sys_blocksize); 19958 19959 /* 19960 * Copy the values from the READ CAPACITY command into the space 19961 * provided by the caller. 19962 */ 19963 *capp = capacity; 19964 *lbap = lbasize; 19965 19966 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: " 19967 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 19968 19969 /* 19970 * Both the lbasize and capacity from the device must be nonzero, 19971 * otherwise we assume that the values are not valid and return 19972 * failure to the caller. (4203735) 19973 */ 19974 if ((capacity == 0) || (lbasize == 0)) { 19975 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 19976 "sd_send_scsi_READ_CAPACITY received invalid value " 19977 "capacity %llu lbasize %d", capacity, lbasize); 19978 return (EIO); 19979 } 19980 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19981 return (0); 19982 } 19983 19984 /* 19985 * Function: sd_send_scsi_READ_CAPACITY_16 19986 * 19987 * Description: This routine uses the scsi READ CAPACITY 16 command to 19988 * determine the device capacity in number of blocks and the 19989 * device native block size. If this function returns a failure, 19990 * then the values in *capp and *lbap are undefined. 19991 * This routine should be called by sd_send_scsi_READ_CAPACITY 19992 * which will apply any device specific adjustments to capacity 19993 * and lbasize. One exception is it is also called by 19994 * sd_get_media_info_ext. In that function, there is no need to 19995 * adjust the capacity and lbasize. 19996 * 19997 * Arguments: ssc - ssc contains ptr to soft state struct for the target 19998 * capp - ptr to unsigned 64-bit variable to receive the 19999 * capacity value from the command. 20000 * lbap - ptr to unsigned 32-bit varaible to receive the 20001 * block size value from the command 20002 * psp - ptr to unsigned 32-bit variable to receive the 20003 * physical block size value from the command 20004 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20005 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20006 * to use the USCSI "direct" chain and bypass the normal 20007 * command waitq. SD_PATH_DIRECT_PRIORITY is used when 20008 * this command is issued as part of an error recovery 20009 * action. 20010 * 20011 * Return Code: 0 - Success 20012 * EIO - IO error 20013 * EACCES - Reservation conflict detected 20014 * EAGAIN - Device is becoming ready 20015 * errno return code from sd_ssc_send() 20016 * 20017 * Context: Can sleep. Blocks until command completes. 20018 */ 20019 20020 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16) 20021 20022 static int 20023 sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp, 20024 uint32_t *lbap, uint32_t *psp, int path_flag) 20025 { 20026 struct scsi_extended_sense sense_buf; 20027 struct uscsi_cmd ucmd_buf; 20028 union scsi_cdb cdb; 20029 uint64_t *capacity16_buf; 20030 uint64_t capacity; 20031 uint32_t lbasize; 20032 uint32_t pbsize; 20033 uint32_t lbpb_exp; 20034 int status; 20035 struct sd_lun *un; 20036 20037 ASSERT(ssc != NULL); 20038 20039 un = ssc->ssc_un; 20040 ASSERT(un != NULL); 20041 ASSERT(!mutex_owned(SD_MUTEX(un))); 20042 ASSERT(capp != NULL); 20043 ASSERT(lbap != NULL); 20044 20045 SD_TRACE(SD_LOG_IO, un, 20046 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 20047 20048 /* 20049 * First send a READ_CAPACITY_16 command to the target. 20050 * 20051 * Set up the CDB for the READ_CAPACITY_16 command. The Partial 20052 * Medium Indicator bit is cleared. The address field must be 20053 * zero if the PMI bit is zero. 20054 */ 20055 bzero(&cdb, sizeof (cdb)); 20056 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20057 20058 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP); 20059 20060 ucmd_buf.uscsi_cdb = (char *)&cdb; 20061 ucmd_buf.uscsi_cdblen = CDB_GROUP4; 20062 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; 20063 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; 20064 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20065 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 20066 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20067 ucmd_buf.uscsi_timeout = 60; 20068 20069 /* 20070 * Read Capacity (16) is a Service Action In command. One 20071 * command byte (0x9E) is overloaded for multiple operations, 20072 * with the second CDB byte specifying the desired operation 20073 */ 20074 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4; 20075 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4; 20076 20077 /* 20078 * Fill in allocation length field 20079 */ 20080 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen); 20081 20082 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20083 UIO_SYSSPACE, path_flag); 20084 20085 switch (status) { 20086 case 0: 20087 /* Return failure if we did not get valid capacity data. */ 20088 if (ucmd_buf.uscsi_resid > 20) { 20089 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 20090 "sd_send_scsi_READ_CAPACITY_16 received invalid " 20091 "capacity data"); 20092 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 20093 return (EIO); 20094 } 20095 20096 /* 20097 * Read capacity and block size from the READ CAPACITY 10 data. 20098 * This data may be adjusted later due to device specific 20099 * issues. 20100 * 20101 * According to the SCSI spec, the READ CAPACITY 10 20102 * command returns the following: 20103 * 20104 * bytes 0-7: Maximum logical block address available. 20105 * (MSB in byte:0 & LSB in byte:7) 20106 * 20107 * bytes 8-11: Block length in bytes 20108 * (MSB in byte:8 & LSB in byte:11) 20109 * 20110 * byte 13: LOGICAL BLOCKS PER PHYSICAL BLOCK EXPONENT 20111 */ 20112 capacity = BE_64(capacity16_buf[0]); 20113 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]); 20114 lbpb_exp = (BE_64(capacity16_buf[1]) >> 40) & 0x0f; 20115 20116 pbsize = lbasize << lbpb_exp; 20117 20118 /* 20119 * Done with capacity16_buf 20120 */ 20121 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 20122 20123 /* 20124 * if the reported capacity is set to all 0xf's, then 20125 * this disk is too large. This could only happen with 20126 * a device that supports LBAs larger than 64 bits which 20127 * are not defined by any current T10 standards. 20128 */ 20129 if (capacity == 0xffffffffffffffff) { 20130 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 20131 "disk is too large"); 20132 return (EIO); 20133 } 20134 break; /* Success! */ 20135 case EIO: 20136 switch (ucmd_buf.uscsi_status) { 20137 case STATUS_RESERVATION_CONFLICT: 20138 status = EACCES; 20139 break; 20140 case STATUS_CHECK: 20141 /* 20142 * Check condition; look for ASC/ASCQ of 0x04/0x01 20143 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 20144 */ 20145 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20146 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 20147 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 20148 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 20149 return (EAGAIN); 20150 } 20151 break; 20152 default: 20153 break; 20154 } 20155 /* FALLTHRU */ 20156 default: 20157 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 20158 return (status); 20159 } 20160 20161 *capp = capacity; 20162 *lbap = lbasize; 20163 *psp = pbsize; 20164 20165 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: " 20166 "capacity:0x%llx lbasize:0x%x, pbsize: 0x%x\n", 20167 capacity, lbasize, pbsize); 20168 20169 return (0); 20170 } 20171 20172 20173 /* 20174 * Function: sd_send_scsi_START_STOP_UNIT 20175 * 20176 * Description: Issue a scsi START STOP UNIT command to the target. 20177 * 20178 * Arguments: ssc - ssc contatins pointer to driver soft state (unit) 20179 * structure for this target. 20180 * pc_flag - SD_POWER_CONDITION 20181 * SD_START_STOP 20182 * flag - SD_TARGET_START 20183 * SD_TARGET_STOP 20184 * SD_TARGET_EJECT 20185 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20186 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20187 * to use the USCSI "direct" chain and bypass the normal 20188 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 20189 * command is issued as part of an error recovery action. 20190 * 20191 * Return Code: 0 - Success 20192 * EIO - IO error 20193 * EACCES - Reservation conflict detected 20194 * ENXIO - Not Ready, medium not present 20195 * errno return code from sd_ssc_send() 20196 * 20197 * Context: Can sleep. 20198 */ 20199 20200 static int 20201 sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int pc_flag, int flag, 20202 int path_flag) 20203 { 20204 struct scsi_extended_sense sense_buf; 20205 union scsi_cdb cdb; 20206 struct uscsi_cmd ucmd_buf; 20207 int status; 20208 struct sd_lun *un; 20209 20210 ASSERT(ssc != NULL); 20211 un = ssc->ssc_un; 20212 ASSERT(un != NULL); 20213 ASSERT(!mutex_owned(SD_MUTEX(un))); 20214 20215 SD_TRACE(SD_LOG_IO, un, 20216 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un); 20217 20218 if (un->un_f_check_start_stop && 20219 ((pc_flag == SD_START_STOP) && (flag != SD_TARGET_EJECT)) && 20220 (un->un_f_start_stop_supported != TRUE)) { 20221 return (0); 20222 } 20223 20224 /* 20225 * If we are performing an eject operation and 20226 * we receive any command other than SD_TARGET_EJECT 20227 * we should immediately return. 20228 */ 20229 if (flag != SD_TARGET_EJECT) { 20230 mutex_enter(SD_MUTEX(un)); 20231 if (un->un_f_ejecting == TRUE) { 20232 mutex_exit(SD_MUTEX(un)); 20233 return (EAGAIN); 20234 } 20235 mutex_exit(SD_MUTEX(un)); 20236 } 20237 20238 bzero(&cdb, sizeof (cdb)); 20239 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20240 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20241 20242 cdb.scc_cmd = SCMD_START_STOP; 20243 cdb.cdb_opaque[4] = (pc_flag == SD_POWER_CONDITION) ? 20244 (uchar_t)(flag << 4) : (uchar_t)flag; 20245 20246 ucmd_buf.uscsi_cdb = (char *)&cdb; 20247 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 20248 ucmd_buf.uscsi_bufaddr = NULL; 20249 ucmd_buf.uscsi_buflen = 0; 20250 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20251 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20252 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 20253 ucmd_buf.uscsi_timeout = 200; 20254 20255 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20256 UIO_SYSSPACE, path_flag); 20257 20258 switch (status) { 20259 case 0: 20260 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20261 break; /* Success! */ 20262 case EIO: 20263 switch (ucmd_buf.uscsi_status) { 20264 case STATUS_RESERVATION_CONFLICT: 20265 status = EACCES; 20266 break; 20267 case STATUS_CHECK: 20268 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) { 20269 switch (scsi_sense_key( 20270 (uint8_t *)&sense_buf)) { 20271 case KEY_ILLEGAL_REQUEST: 20272 status = ENOTSUP; 20273 break; 20274 case KEY_NOT_READY: 20275 if (scsi_sense_asc( 20276 (uint8_t *)&sense_buf) 20277 == 0x3A) { 20278 status = ENXIO; 20279 } 20280 break; 20281 default: 20282 break; 20283 } 20284 } 20285 break; 20286 default: 20287 break; 20288 } 20289 break; 20290 default: 20291 break; 20292 } 20293 20294 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n"); 20295 20296 return (status); 20297 } 20298 20299 20300 /* 20301 * Function: sd_start_stop_unit_callback 20302 * 20303 * Description: timeout(9F) callback to begin recovery process for a 20304 * device that has spun down. 20305 * 20306 * Arguments: arg - pointer to associated softstate struct. 20307 * 20308 * Context: Executes in a timeout(9F) thread context 20309 */ 20310 20311 static void 20312 sd_start_stop_unit_callback(void *arg) 20313 { 20314 struct sd_lun *un = arg; 20315 ASSERT(un != NULL); 20316 ASSERT(!mutex_owned(SD_MUTEX(un))); 20317 20318 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n"); 20319 20320 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP); 20321 } 20322 20323 20324 /* 20325 * Function: sd_start_stop_unit_task 20326 * 20327 * Description: Recovery procedure when a drive is spun down. 20328 * 20329 * Arguments: arg - pointer to associated softstate struct. 20330 * 20331 * Context: Executes in a taskq() thread context 20332 */ 20333 20334 static void 20335 sd_start_stop_unit_task(void *arg) 20336 { 20337 struct sd_lun *un = arg; 20338 sd_ssc_t *ssc; 20339 int power_level; 20340 int rval; 20341 20342 ASSERT(un != NULL); 20343 ASSERT(!mutex_owned(SD_MUTEX(un))); 20344 20345 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n"); 20346 20347 /* 20348 * Some unformatted drives report not ready error, no need to 20349 * restart if format has been initiated. 20350 */ 20351 mutex_enter(SD_MUTEX(un)); 20352 if (un->un_f_format_in_progress == TRUE) { 20353 mutex_exit(SD_MUTEX(un)); 20354 return; 20355 } 20356 mutex_exit(SD_MUTEX(un)); 20357 20358 ssc = sd_ssc_init(un); 20359 /* 20360 * When a START STOP command is issued from here, it is part of a 20361 * failure recovery operation and must be issued before any other 20362 * commands, including any pending retries. Thus it must be sent 20363 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up 20364 * succeeds or not, we will start I/O after the attempt. 20365 * If power condition is supported and the current power level 20366 * is capable of performing I/O, we should set the power condition 20367 * to that level. Otherwise, set the power condition to ACTIVE. 20368 */ 20369 if (un->un_f_power_condition_supported) { 20370 mutex_enter(SD_MUTEX(un)); 20371 ASSERT(SD_PM_IS_LEVEL_VALID(un, un->un_power_level)); 20372 power_level = sd_pwr_pc.ran_perf[un->un_power_level] 20373 > 0 ? un->un_power_level : SD_SPINDLE_ACTIVE; 20374 mutex_exit(SD_MUTEX(un)); 20375 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_POWER_CONDITION, 20376 sd_pl2pc[power_level], SD_PATH_DIRECT_PRIORITY); 20377 } else { 20378 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 20379 SD_TARGET_START, SD_PATH_DIRECT_PRIORITY); 20380 } 20381 20382 if (rval != 0) 20383 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 20384 sd_ssc_fini(ssc); 20385 /* 20386 * The above call blocks until the START_STOP_UNIT command completes. 20387 * Now that it has completed, we must re-try the original IO that 20388 * received the NOT READY condition in the first place. There are 20389 * three possible conditions here: 20390 * 20391 * (1) The original IO is on un_retry_bp. 20392 * (2) The original IO is on the regular wait queue, and un_retry_bp 20393 * is NULL. 20394 * (3) The original IO is on the regular wait queue, and un_retry_bp 20395 * points to some other, unrelated bp. 20396 * 20397 * For each case, we must call sd_start_cmds() with un_retry_bp 20398 * as the argument. If un_retry_bp is NULL, this will initiate 20399 * processing of the regular wait queue. If un_retry_bp is not NULL, 20400 * then this will process the bp on un_retry_bp. That may or may not 20401 * be the original IO, but that does not matter: the important thing 20402 * is to keep the IO processing going at this point. 20403 * 20404 * Note: This is a very specific error recovery sequence associated 20405 * with a drive that is not spun up. We attempt a START_STOP_UNIT and 20406 * serialize the I/O with completion of the spin-up. 20407 */ 20408 mutex_enter(SD_MUTEX(un)); 20409 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 20410 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n", 20411 un, un->un_retry_bp); 20412 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */ 20413 sd_start_cmds(un, un->un_retry_bp); 20414 mutex_exit(SD_MUTEX(un)); 20415 20416 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n"); 20417 } 20418 20419 20420 /* 20421 * Function: sd_send_scsi_INQUIRY 20422 * 20423 * Description: Issue the scsi INQUIRY command. 20424 * 20425 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20426 * structure for this target. 20427 * bufaddr 20428 * buflen 20429 * evpd 20430 * page_code 20431 * page_length 20432 * 20433 * Return Code: 0 - Success 20434 * errno return code from sd_ssc_send() 20435 * 20436 * Context: Can sleep. Does not return until command is completed. 20437 */ 20438 20439 static int 20440 sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr, size_t buflen, 20441 uchar_t evpd, uchar_t page_code, size_t *residp) 20442 { 20443 union scsi_cdb cdb; 20444 struct uscsi_cmd ucmd_buf; 20445 int status; 20446 struct sd_lun *un; 20447 20448 ASSERT(ssc != NULL); 20449 un = ssc->ssc_un; 20450 ASSERT(un != NULL); 20451 ASSERT(!mutex_owned(SD_MUTEX(un))); 20452 ASSERT(bufaddr != NULL); 20453 20454 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un); 20455 20456 bzero(&cdb, sizeof (cdb)); 20457 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20458 bzero(bufaddr, buflen); 20459 20460 cdb.scc_cmd = SCMD_INQUIRY; 20461 cdb.cdb_opaque[1] = evpd; 20462 cdb.cdb_opaque[2] = page_code; 20463 FORMG0COUNT(&cdb, buflen); 20464 20465 ucmd_buf.uscsi_cdb = (char *)&cdb; 20466 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 20467 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 20468 ucmd_buf.uscsi_buflen = buflen; 20469 ucmd_buf.uscsi_rqbuf = NULL; 20470 ucmd_buf.uscsi_rqlen = 0; 20471 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 20472 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */ 20473 20474 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20475 UIO_SYSSPACE, SD_PATH_DIRECT); 20476 20477 /* 20478 * Only handle status == 0, the upper-level caller 20479 * will put different assessment based on the context. 20480 */ 20481 if (status == 0) 20482 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20483 20484 if ((status == 0) && (residp != NULL)) { 20485 *residp = ucmd_buf.uscsi_resid; 20486 } 20487 20488 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n"); 20489 20490 return (status); 20491 } 20492 20493 20494 /* 20495 * Function: sd_send_scsi_TEST_UNIT_READY 20496 * 20497 * Description: Issue the scsi TEST UNIT READY command. 20498 * This routine can be told to set the flag USCSI_DIAGNOSE to 20499 * prevent retrying failed commands. Use this when the intent 20500 * is either to check for device readiness, to clear a Unit 20501 * Attention, or to clear any outstanding sense data. 20502 * However under specific conditions the expected behavior 20503 * is for retries to bring a device ready, so use the flag 20504 * with caution. 20505 * 20506 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20507 * structure for this target. 20508 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present 20509 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE. 20510 * 0: dont check for media present, do retries on cmd. 20511 * 20512 * Return Code: 0 - Success 20513 * EIO - IO error 20514 * EACCES - Reservation conflict detected 20515 * ENXIO - Not Ready, medium not present 20516 * errno return code from sd_ssc_send() 20517 * 20518 * Context: Can sleep. Does not return until command is completed. 20519 */ 20520 20521 static int 20522 sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag) 20523 { 20524 struct scsi_extended_sense sense_buf; 20525 union scsi_cdb cdb; 20526 struct uscsi_cmd ucmd_buf; 20527 int status; 20528 struct sd_lun *un; 20529 20530 ASSERT(ssc != NULL); 20531 un = ssc->ssc_un; 20532 ASSERT(un != NULL); 20533 ASSERT(!mutex_owned(SD_MUTEX(un))); 20534 20535 SD_TRACE(SD_LOG_IO, un, 20536 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un); 20537 20538 /* 20539 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect 20540 * timeouts when they receive a TUR and the queue is not empty. Check 20541 * the configuration flag set during attach (indicating the drive has 20542 * this firmware bug) and un_ncmds_in_transport before issuing the 20543 * TUR. If there are 20544 * pending commands return success, this is a bit arbitrary but is ok 20545 * for non-removables (i.e. the eliteI disks) and non-clustering 20546 * configurations. 20547 */ 20548 if (un->un_f_cfg_tur_check == TRUE) { 20549 mutex_enter(SD_MUTEX(un)); 20550 if (un->un_ncmds_in_transport != 0) { 20551 mutex_exit(SD_MUTEX(un)); 20552 return (0); 20553 } 20554 mutex_exit(SD_MUTEX(un)); 20555 } 20556 20557 bzero(&cdb, sizeof (cdb)); 20558 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20559 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20560 20561 cdb.scc_cmd = SCMD_TEST_UNIT_READY; 20562 20563 ucmd_buf.uscsi_cdb = (char *)&cdb; 20564 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 20565 ucmd_buf.uscsi_bufaddr = NULL; 20566 ucmd_buf.uscsi_buflen = 0; 20567 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20568 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20569 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 20570 20571 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ 20572 if ((flag & SD_DONT_RETRY_TUR) != 0) { 20573 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; 20574 } 20575 ucmd_buf.uscsi_timeout = 60; 20576 20577 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20578 UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : 20579 SD_PATH_STANDARD)); 20580 20581 switch (status) { 20582 case 0: 20583 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20584 break; /* Success! */ 20585 case EIO: 20586 switch (ucmd_buf.uscsi_status) { 20587 case STATUS_RESERVATION_CONFLICT: 20588 status = EACCES; 20589 break; 20590 case STATUS_CHECK: 20591 if ((flag & SD_CHECK_FOR_MEDIA) == 0) { 20592 break; 20593 } 20594 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20595 (scsi_sense_key((uint8_t *)&sense_buf) == 20596 KEY_NOT_READY) && 20597 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x3A)) { 20598 status = ENXIO; 20599 } 20600 break; 20601 default: 20602 break; 20603 } 20604 break; 20605 default: 20606 break; 20607 } 20608 20609 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n"); 20610 20611 return (status); 20612 } 20613 20614 /* 20615 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN 20616 * 20617 * Description: Issue the scsi PERSISTENT RESERVE IN command. 20618 * 20619 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20620 * structure for this target. 20621 * 20622 * Return Code: 0 - Success 20623 * EACCES 20624 * ENOTSUP 20625 * errno return code from sd_ssc_send() 20626 * 20627 * Context: Can sleep. Does not return until command is completed. 20628 */ 20629 20630 static int 20631 sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc, uchar_t usr_cmd, 20632 uint16_t data_len, uchar_t *data_bufp) 20633 { 20634 struct scsi_extended_sense sense_buf; 20635 union scsi_cdb cdb; 20636 struct uscsi_cmd ucmd_buf; 20637 int status; 20638 int no_caller_buf = FALSE; 20639 struct sd_lun *un; 20640 20641 ASSERT(ssc != NULL); 20642 un = ssc->ssc_un; 20643 ASSERT(un != NULL); 20644 ASSERT(!mutex_owned(SD_MUTEX(un))); 20645 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV)); 20646 20647 SD_TRACE(SD_LOG_IO, un, 20648 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un); 20649 20650 bzero(&cdb, sizeof (cdb)); 20651 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20652 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20653 if (data_bufp == NULL) { 20654 /* Allocate a default buf if the caller did not give one */ 20655 ASSERT(data_len == 0); 20656 data_len = MHIOC_RESV_KEY_SIZE; 20657 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP); 20658 no_caller_buf = TRUE; 20659 } 20660 20661 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN; 20662 cdb.cdb_opaque[1] = usr_cmd; 20663 FORMG1COUNT(&cdb, data_len); 20664 20665 ucmd_buf.uscsi_cdb = (char *)&cdb; 20666 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 20667 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; 20668 ucmd_buf.uscsi_buflen = data_len; 20669 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20670 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20671 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20672 ucmd_buf.uscsi_timeout = 60; 20673 20674 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20675 UIO_SYSSPACE, SD_PATH_STANDARD); 20676 20677 switch (status) { 20678 case 0: 20679 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20680 20681 break; /* Success! */ 20682 case EIO: 20683 switch (ucmd_buf.uscsi_status) { 20684 case STATUS_RESERVATION_CONFLICT: 20685 status = EACCES; 20686 break; 20687 case STATUS_CHECK: 20688 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20689 (scsi_sense_key((uint8_t *)&sense_buf) == 20690 KEY_ILLEGAL_REQUEST)) { 20691 status = ENOTSUP; 20692 } 20693 break; 20694 default: 20695 break; 20696 } 20697 break; 20698 default: 20699 break; 20700 } 20701 20702 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n"); 20703 20704 if (no_caller_buf == TRUE) { 20705 kmem_free(data_bufp, data_len); 20706 } 20707 20708 return (status); 20709 } 20710 20711 20712 /* 20713 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT 20714 * 20715 * Description: This routine is the driver entry point for handling CD-ROM 20716 * multi-host persistent reservation requests (MHIOCGRP_INKEYS, 20717 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the 20718 * device. 20719 * 20720 * Arguments: ssc - ssc contains un - pointer to soft state struct 20721 * for the target. 20722 * usr_cmd SCSI-3 reservation facility command (one of 20723 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE, 20724 * SD_SCSI3_PREEMPTANDABORT) 20725 * usr_bufp - user provided pointer register, reserve descriptor or 20726 * preempt and abort structure (mhioc_register_t, 20727 * mhioc_resv_desc_t, mhioc_preemptandabort_t) 20728 * 20729 * Return Code: 0 - Success 20730 * EACCES 20731 * ENOTSUP 20732 * errno return code from sd_ssc_send() 20733 * 20734 * Context: Can sleep. Does not return until command is completed. 20735 */ 20736 20737 static int 20738 sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, uchar_t usr_cmd, 20739 uchar_t *usr_bufp) 20740 { 20741 struct scsi_extended_sense sense_buf; 20742 union scsi_cdb cdb; 20743 struct uscsi_cmd ucmd_buf; 20744 int status; 20745 uchar_t data_len = sizeof (sd_prout_t); 20746 sd_prout_t *prp; 20747 struct sd_lun *un; 20748 20749 ASSERT(ssc != NULL); 20750 un = ssc->ssc_un; 20751 ASSERT(un != NULL); 20752 ASSERT(!mutex_owned(SD_MUTEX(un))); 20753 ASSERT(data_len == 24); /* required by scsi spec */ 20754 20755 SD_TRACE(SD_LOG_IO, un, 20756 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un); 20757 20758 if (usr_bufp == NULL) { 20759 return (EINVAL); 20760 } 20761 20762 bzero(&cdb, sizeof (cdb)); 20763 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20764 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20765 prp = kmem_zalloc(data_len, KM_SLEEP); 20766 20767 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT; 20768 cdb.cdb_opaque[1] = usr_cmd; 20769 FORMG1COUNT(&cdb, data_len); 20770 20771 ucmd_buf.uscsi_cdb = (char *)&cdb; 20772 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 20773 ucmd_buf.uscsi_bufaddr = (caddr_t)prp; 20774 ucmd_buf.uscsi_buflen = data_len; 20775 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20776 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20777 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 20778 ucmd_buf.uscsi_timeout = 60; 20779 20780 switch (usr_cmd) { 20781 case SD_SCSI3_REGISTER: { 20782 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp; 20783 20784 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 20785 bcopy(ptr->newkey.key, prp->service_key, 20786 MHIOC_RESV_KEY_SIZE); 20787 prp->aptpl = ptr->aptpl; 20788 break; 20789 } 20790 case SD_SCSI3_RESERVE: 20791 case SD_SCSI3_RELEASE: { 20792 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 20793 20794 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 20795 prp->scope_address = BE_32(ptr->scope_specific_addr); 20796 cdb.cdb_opaque[2] = ptr->type; 20797 break; 20798 } 20799 case SD_SCSI3_PREEMPTANDABORT: { 20800 mhioc_preemptandabort_t *ptr = 20801 (mhioc_preemptandabort_t *)usr_bufp; 20802 20803 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 20804 bcopy(ptr->victim_key.key, prp->service_key, 20805 MHIOC_RESV_KEY_SIZE); 20806 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr); 20807 cdb.cdb_opaque[2] = ptr->resvdesc.type; 20808 ucmd_buf.uscsi_flags |= USCSI_HEAD; 20809 break; 20810 } 20811 case SD_SCSI3_REGISTERANDIGNOREKEY: 20812 { 20813 mhioc_registerandignorekey_t *ptr; 20814 ptr = (mhioc_registerandignorekey_t *)usr_bufp; 20815 bcopy(ptr->newkey.key, 20816 prp->service_key, MHIOC_RESV_KEY_SIZE); 20817 prp->aptpl = ptr->aptpl; 20818 break; 20819 } 20820 default: 20821 ASSERT(FALSE); 20822 break; 20823 } 20824 20825 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20826 UIO_SYSSPACE, SD_PATH_STANDARD); 20827 20828 switch (status) { 20829 case 0: 20830 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20831 break; /* Success! */ 20832 case EIO: 20833 switch (ucmd_buf.uscsi_status) { 20834 case STATUS_RESERVATION_CONFLICT: 20835 status = EACCES; 20836 break; 20837 case STATUS_CHECK: 20838 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20839 (scsi_sense_key((uint8_t *)&sense_buf) == 20840 KEY_ILLEGAL_REQUEST)) { 20841 status = ENOTSUP; 20842 } 20843 break; 20844 default: 20845 break; 20846 } 20847 break; 20848 default: 20849 break; 20850 } 20851 20852 kmem_free(prp, data_len); 20853 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n"); 20854 return (status); 20855 } 20856 20857 20858 /* 20859 * Function: sd_send_scsi_SYNCHRONIZE_CACHE 20860 * 20861 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target 20862 * 20863 * Arguments: un - pointer to the target's soft state struct 20864 * dkc - pointer to the callback structure 20865 * 20866 * Return Code: 0 - success 20867 * errno-type error code 20868 * 20869 * Context: kernel thread context only. 20870 * 20871 * _______________________________________________________________ 20872 * | dkc_flag & | dkc_callback | DKIOCFLUSHWRITECACHE | 20873 * |FLUSH_VOLATILE| | operation | 20874 * |______________|______________|_________________________________| 20875 * | 0 | NULL | Synchronous flush on both | 20876 * | | | volatile and non-volatile cache | 20877 * |______________|______________|_________________________________| 20878 * | 1 | NULL | Synchronous flush on volatile | 20879 * | | | cache; disk drivers may suppress| 20880 * | | | flush if disk table indicates | 20881 * | | | non-volatile cache | 20882 * |______________|______________|_________________________________| 20883 * | 0 | !NULL | Asynchronous flush on both | 20884 * | | | volatile and non-volatile cache;| 20885 * |______________|______________|_________________________________| 20886 * | 1 | !NULL | Asynchronous flush on volatile | 20887 * | | | cache; disk drivers may suppress| 20888 * | | | flush if disk table indicates | 20889 * | | | non-volatile cache | 20890 * |______________|______________|_________________________________| 20891 * 20892 */ 20893 20894 static int 20895 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc) 20896 { 20897 struct sd_uscsi_info *uip; 20898 struct uscsi_cmd *uscmd; 20899 union scsi_cdb *cdb; 20900 struct buf *bp; 20901 int rval = 0; 20902 int is_async; 20903 20904 SD_TRACE(SD_LOG_IO, un, 20905 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un); 20906 20907 ASSERT(un != NULL); 20908 ASSERT(!mutex_owned(SD_MUTEX(un))); 20909 20910 if (dkc == NULL || dkc->dkc_callback == NULL) { 20911 is_async = FALSE; 20912 } else { 20913 is_async = TRUE; 20914 } 20915 20916 mutex_enter(SD_MUTEX(un)); 20917 /* check whether cache flush should be suppressed */ 20918 if (un->un_f_suppress_cache_flush == TRUE) { 20919 mutex_exit(SD_MUTEX(un)); 20920 /* 20921 * suppress the cache flush if the device is told to do 20922 * so by sd.conf or disk table 20923 */ 20924 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_SYNCHRONIZE_CACHE: \ 20925 skip the cache flush since suppress_cache_flush is %d!\n", 20926 un->un_f_suppress_cache_flush); 20927 20928 if (is_async == TRUE) { 20929 /* invoke callback for asynchronous flush */ 20930 (*dkc->dkc_callback)(dkc->dkc_cookie, 0); 20931 } 20932 return (rval); 20933 } 20934 mutex_exit(SD_MUTEX(un)); 20935 20936 /* 20937 * check dkc_flag & FLUSH_VOLATILE so SYNC_NV bit can be 20938 * set properly 20939 */ 20940 cdb = kmem_zalloc(CDB_GROUP1, KM_SLEEP); 20941 cdb->scc_cmd = SCMD_SYNCHRONIZE_CACHE; 20942 20943 mutex_enter(SD_MUTEX(un)); 20944 if (dkc != NULL && un->un_f_sync_nv_supported && 20945 (dkc->dkc_flag & FLUSH_VOLATILE)) { 20946 /* 20947 * if the device supports SYNC_NV bit, turn on 20948 * the SYNC_NV bit to only flush volatile cache 20949 */ 20950 cdb->cdb_un.tag |= SD_SYNC_NV_BIT; 20951 } 20952 mutex_exit(SD_MUTEX(un)); 20953 20954 /* 20955 * First get some memory for the uscsi_cmd struct and cdb 20956 * and initialize for SYNCHRONIZE_CACHE cmd. 20957 */ 20958 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 20959 uscmd->uscsi_cdblen = CDB_GROUP1; 20960 uscmd->uscsi_cdb = (caddr_t)cdb; 20961 uscmd->uscsi_bufaddr = NULL; 20962 uscmd->uscsi_buflen = 0; 20963 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 20964 uscmd->uscsi_rqlen = SENSE_LENGTH; 20965 uscmd->uscsi_rqresid = SENSE_LENGTH; 20966 uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 20967 uscmd->uscsi_timeout = sd_io_time; 20968 20969 /* 20970 * Allocate an sd_uscsi_info struct and fill it with the info 20971 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 20972 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 20973 * since we allocate the buf here in this function, we do not 20974 * need to preserve the prior contents of b_private. 20975 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 20976 */ 20977 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 20978 uip->ui_flags = SD_PATH_DIRECT; 20979 uip->ui_cmdp = uscmd; 20980 20981 bp = getrbuf(KM_SLEEP); 20982 bp->b_private = uip; 20983 20984 /* 20985 * Setup buffer to carry uscsi request. 20986 */ 20987 bp->b_flags = B_BUSY; 20988 bp->b_bcount = 0; 20989 bp->b_blkno = 0; 20990 20991 if (is_async == TRUE) { 20992 bp->b_iodone = sd_send_scsi_SYNCHRONIZE_CACHE_biodone; 20993 uip->ui_dkc = *dkc; 20994 } 20995 20996 bp->b_edev = SD_GET_DEV(un); 20997 bp->b_dev = cmpdev(bp->b_edev); /* maybe unnecessary? */ 20998 20999 /* 21000 * Unset un_f_sync_cache_required flag 21001 */ 21002 mutex_enter(SD_MUTEX(un)); 21003 un->un_f_sync_cache_required = FALSE; 21004 mutex_exit(SD_MUTEX(un)); 21005 21006 (void) sd_uscsi_strategy(bp); 21007 21008 /* 21009 * If synchronous request, wait for completion 21010 * If async just return and let b_iodone callback 21011 * cleanup. 21012 * NOTE: On return, u_ncmds_in_driver will be decremented, 21013 * but it was also incremented in sd_uscsi_strategy(), so 21014 * we should be ok. 21015 */ 21016 if (is_async == FALSE) { 21017 (void) biowait(bp); 21018 rval = sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp); 21019 } 21020 21021 return (rval); 21022 } 21023 21024 21025 static int 21026 sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp) 21027 { 21028 struct sd_uscsi_info *uip; 21029 struct uscsi_cmd *uscmd; 21030 uint8_t *sense_buf; 21031 struct sd_lun *un; 21032 int status; 21033 union scsi_cdb *cdb; 21034 21035 uip = (struct sd_uscsi_info *)(bp->b_private); 21036 ASSERT(uip != NULL); 21037 21038 uscmd = uip->ui_cmdp; 21039 ASSERT(uscmd != NULL); 21040 21041 sense_buf = (uint8_t *)uscmd->uscsi_rqbuf; 21042 ASSERT(sense_buf != NULL); 21043 21044 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 21045 ASSERT(un != NULL); 21046 21047 cdb = (union scsi_cdb *)uscmd->uscsi_cdb; 21048 21049 status = geterror(bp); 21050 switch (status) { 21051 case 0: 21052 break; /* Success! */ 21053 case EIO: 21054 switch (uscmd->uscsi_status) { 21055 case STATUS_RESERVATION_CONFLICT: 21056 /* Ignore reservation conflict */ 21057 status = 0; 21058 goto done; 21059 21060 case STATUS_CHECK: 21061 if ((uscmd->uscsi_rqstatus == STATUS_GOOD) && 21062 (scsi_sense_key(sense_buf) == 21063 KEY_ILLEGAL_REQUEST)) { 21064 /* Ignore Illegal Request error */ 21065 if (cdb->cdb_un.tag&SD_SYNC_NV_BIT) { 21066 mutex_enter(SD_MUTEX(un)); 21067 un->un_f_sync_nv_supported = FALSE; 21068 mutex_exit(SD_MUTEX(un)); 21069 status = 0; 21070 SD_TRACE(SD_LOG_IO, un, 21071 "un_f_sync_nv_supported \ 21072 is set to false.\n"); 21073 goto done; 21074 } 21075 21076 mutex_enter(SD_MUTEX(un)); 21077 un->un_f_sync_cache_supported = FALSE; 21078 mutex_exit(SD_MUTEX(un)); 21079 SD_TRACE(SD_LOG_IO, un, 21080 "sd_send_scsi_SYNCHRONIZE_CACHE_biodone: \ 21081 un_f_sync_cache_supported set to false \ 21082 with asc = %x, ascq = %x\n", 21083 scsi_sense_asc(sense_buf), 21084 scsi_sense_ascq(sense_buf)); 21085 status = ENOTSUP; 21086 goto done; 21087 } 21088 break; 21089 default: 21090 break; 21091 } 21092 /* FALLTHRU */ 21093 default: 21094 /* 21095 * Turn on the un_f_sync_cache_required flag 21096 * since the SYNC CACHE command failed 21097 */ 21098 mutex_enter(SD_MUTEX(un)); 21099 un->un_f_sync_cache_required = TRUE; 21100 mutex_exit(SD_MUTEX(un)); 21101 21102 /* 21103 * Don't log an error message if this device 21104 * has removable media. 21105 */ 21106 if (!un->un_f_has_removable_media) { 21107 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 21108 "SYNCHRONIZE CACHE command failed (%d)\n", status); 21109 } 21110 break; 21111 } 21112 21113 done: 21114 if (uip->ui_dkc.dkc_callback != NULL) { 21115 (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status); 21116 } 21117 21118 ASSERT((bp->b_flags & B_REMAPPED) == 0); 21119 freerbuf(bp); 21120 kmem_free(uip, sizeof (struct sd_uscsi_info)); 21121 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); 21122 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen); 21123 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 21124 21125 return (status); 21126 } 21127 21128 21129 /* 21130 * Function: sd_send_scsi_GET_CONFIGURATION 21131 * 21132 * Description: Issues the get configuration command to the device. 21133 * Called from sd_check_for_writable_cd & sd_get_media_info 21134 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN 21135 * Arguments: ssc 21136 * ucmdbuf 21137 * rqbuf 21138 * rqbuflen 21139 * bufaddr 21140 * buflen 21141 * path_flag 21142 * 21143 * Return Code: 0 - Success 21144 * errno return code from sd_ssc_send() 21145 * 21146 * Context: Can sleep. Does not return until command is completed. 21147 * 21148 */ 21149 21150 static int 21151 sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, struct uscsi_cmd *ucmdbuf, 21152 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen, 21153 int path_flag) 21154 { 21155 char cdb[CDB_GROUP1]; 21156 int status; 21157 struct sd_lun *un; 21158 21159 ASSERT(ssc != NULL); 21160 un = ssc->ssc_un; 21161 ASSERT(un != NULL); 21162 ASSERT(!mutex_owned(SD_MUTEX(un))); 21163 ASSERT(bufaddr != NULL); 21164 ASSERT(ucmdbuf != NULL); 21165 ASSERT(rqbuf != NULL); 21166 21167 SD_TRACE(SD_LOG_IO, un, 21168 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un); 21169 21170 bzero(cdb, sizeof (cdb)); 21171 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 21172 bzero(rqbuf, rqbuflen); 21173 bzero(bufaddr, buflen); 21174 21175 /* 21176 * Set up cdb field for the get configuration command. 21177 */ 21178 cdb[0] = SCMD_GET_CONFIGURATION; 21179 cdb[1] = 0x02; /* Requested Type */ 21180 cdb[8] = SD_PROFILE_HEADER_LEN; 21181 ucmdbuf->uscsi_cdb = cdb; 21182 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 21183 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 21184 ucmdbuf->uscsi_buflen = buflen; 21185 ucmdbuf->uscsi_timeout = sd_io_time; 21186 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 21187 ucmdbuf->uscsi_rqlen = rqbuflen; 21188 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 21189 21190 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL, 21191 UIO_SYSSPACE, path_flag); 21192 21193 switch (status) { 21194 case 0: 21195 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21196 break; /* Success! */ 21197 case EIO: 21198 switch (ucmdbuf->uscsi_status) { 21199 case STATUS_RESERVATION_CONFLICT: 21200 status = EACCES; 21201 break; 21202 default: 21203 break; 21204 } 21205 break; 21206 default: 21207 break; 21208 } 21209 21210 if (status == 0) { 21211 SD_DUMP_MEMORY(un, SD_LOG_IO, 21212 "sd_send_scsi_GET_CONFIGURATION: data", 21213 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 21214 } 21215 21216 SD_TRACE(SD_LOG_IO, un, 21217 "sd_send_scsi_GET_CONFIGURATION: exit\n"); 21218 21219 return (status); 21220 } 21221 21222 /* 21223 * Function: sd_send_scsi_feature_GET_CONFIGURATION 21224 * 21225 * Description: Issues the get configuration command to the device to 21226 * retrieve a specific feature. Called from 21227 * sd_check_for_writable_cd & sd_set_mmc_caps. 21228 * Arguments: ssc 21229 * ucmdbuf 21230 * rqbuf 21231 * rqbuflen 21232 * bufaddr 21233 * buflen 21234 * feature 21235 * 21236 * Return Code: 0 - Success 21237 * errno return code from sd_ssc_send() 21238 * 21239 * Context: Can sleep. Does not return until command is completed. 21240 * 21241 */ 21242 static int 21243 sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc, 21244 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 21245 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag) 21246 { 21247 char cdb[CDB_GROUP1]; 21248 int status; 21249 struct sd_lun *un; 21250 21251 ASSERT(ssc != NULL); 21252 un = ssc->ssc_un; 21253 ASSERT(un != NULL); 21254 ASSERT(!mutex_owned(SD_MUTEX(un))); 21255 ASSERT(bufaddr != NULL); 21256 ASSERT(ucmdbuf != NULL); 21257 ASSERT(rqbuf != NULL); 21258 21259 SD_TRACE(SD_LOG_IO, un, 21260 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un); 21261 21262 bzero(cdb, sizeof (cdb)); 21263 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 21264 bzero(rqbuf, rqbuflen); 21265 bzero(bufaddr, buflen); 21266 21267 /* 21268 * Set up cdb field for the get configuration command. 21269 */ 21270 cdb[0] = SCMD_GET_CONFIGURATION; 21271 cdb[1] = 0x02; /* Requested Type */ 21272 cdb[3] = feature; 21273 cdb[8] = buflen; 21274 ucmdbuf->uscsi_cdb = cdb; 21275 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 21276 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 21277 ucmdbuf->uscsi_buflen = buflen; 21278 ucmdbuf->uscsi_timeout = sd_io_time; 21279 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 21280 ucmdbuf->uscsi_rqlen = rqbuflen; 21281 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 21282 21283 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL, 21284 UIO_SYSSPACE, path_flag); 21285 21286 switch (status) { 21287 case 0: 21288 21289 break; /* Success! */ 21290 case EIO: 21291 switch (ucmdbuf->uscsi_status) { 21292 case STATUS_RESERVATION_CONFLICT: 21293 status = EACCES; 21294 break; 21295 default: 21296 break; 21297 } 21298 break; 21299 default: 21300 break; 21301 } 21302 21303 if (status == 0) { 21304 SD_DUMP_MEMORY(un, SD_LOG_IO, 21305 "sd_send_scsi_feature_GET_CONFIGURATION: data", 21306 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 21307 } 21308 21309 SD_TRACE(SD_LOG_IO, un, 21310 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n"); 21311 21312 return (status); 21313 } 21314 21315 21316 /* 21317 * Function: sd_send_scsi_MODE_SENSE 21318 * 21319 * Description: Utility function for issuing a scsi MODE SENSE command. 21320 * Note: This routine uses a consistent implementation for Group0, 21321 * Group1, and Group2 commands across all platforms. ATAPI devices 21322 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 21323 * 21324 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21325 * structure for this target. 21326 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 21327 * CDB_GROUP[1|2] (10 byte). 21328 * bufaddr - buffer for page data retrieved from the target. 21329 * buflen - size of page to be retrieved. 21330 * page_code - page code of data to be retrieved from the target. 21331 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 21332 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 21333 * to use the USCSI "direct" chain and bypass the normal 21334 * command waitq. 21335 * 21336 * Return Code: 0 - Success 21337 * errno return code from sd_ssc_send() 21338 * 21339 * Context: Can sleep. Does not return until command is completed. 21340 */ 21341 21342 static int 21343 sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr, 21344 size_t buflen, uchar_t page_code, int path_flag) 21345 { 21346 struct scsi_extended_sense sense_buf; 21347 union scsi_cdb cdb; 21348 struct uscsi_cmd ucmd_buf; 21349 int status; 21350 int headlen; 21351 struct sd_lun *un; 21352 21353 ASSERT(ssc != NULL); 21354 un = ssc->ssc_un; 21355 ASSERT(un != NULL); 21356 ASSERT(!mutex_owned(SD_MUTEX(un))); 21357 ASSERT(bufaddr != NULL); 21358 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 21359 (cdbsize == CDB_GROUP2)); 21360 21361 SD_TRACE(SD_LOG_IO, un, 21362 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un); 21363 21364 bzero(&cdb, sizeof (cdb)); 21365 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21366 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21367 bzero(bufaddr, buflen); 21368 21369 if (cdbsize == CDB_GROUP0) { 21370 cdb.scc_cmd = SCMD_MODE_SENSE; 21371 cdb.cdb_opaque[2] = page_code; 21372 FORMG0COUNT(&cdb, buflen); 21373 headlen = MODE_HEADER_LENGTH; 21374 } else { 21375 cdb.scc_cmd = SCMD_MODE_SENSE_G1; 21376 cdb.cdb_opaque[2] = page_code; 21377 FORMG1COUNT(&cdb, buflen); 21378 headlen = MODE_HEADER_LENGTH_GRP2; 21379 } 21380 21381 ASSERT(headlen <= buflen); 21382 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 21383 21384 ucmd_buf.uscsi_cdb = (char *)&cdb; 21385 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 21386 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 21387 ucmd_buf.uscsi_buflen = buflen; 21388 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21389 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21390 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 21391 ucmd_buf.uscsi_timeout = 60; 21392 21393 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21394 UIO_SYSSPACE, path_flag); 21395 21396 switch (status) { 21397 case 0: 21398 /* 21399 * sr_check_wp() uses 0x3f page code and check the header of 21400 * mode page to determine if target device is write-protected. 21401 * But some USB devices return 0 bytes for 0x3f page code. For 21402 * this case, make sure that mode page header is returned at 21403 * least. 21404 */ 21405 if (buflen - ucmd_buf.uscsi_resid < headlen) { 21406 status = EIO; 21407 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 21408 "mode page header is not returned"); 21409 } 21410 break; /* Success! */ 21411 case EIO: 21412 switch (ucmd_buf.uscsi_status) { 21413 case STATUS_RESERVATION_CONFLICT: 21414 status = EACCES; 21415 break; 21416 default: 21417 break; 21418 } 21419 break; 21420 default: 21421 break; 21422 } 21423 21424 if (status == 0) { 21425 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data", 21426 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21427 } 21428 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n"); 21429 21430 return (status); 21431 } 21432 21433 21434 /* 21435 * Function: sd_send_scsi_MODE_SELECT 21436 * 21437 * Description: Utility function for issuing a scsi MODE SELECT command. 21438 * Note: This routine uses a consistent implementation for Group0, 21439 * Group1, and Group2 commands across all platforms. ATAPI devices 21440 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 21441 * 21442 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21443 * structure for this target. 21444 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 21445 * CDB_GROUP[1|2] (10 byte). 21446 * bufaddr - buffer for page data retrieved from the target. 21447 * buflen - size of page to be retrieved. 21448 * save_page - boolean to determin if SP bit should be set. 21449 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 21450 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 21451 * to use the USCSI "direct" chain and bypass the normal 21452 * command waitq. 21453 * 21454 * Return Code: 0 - Success 21455 * errno return code from sd_ssc_send() 21456 * 21457 * Context: Can sleep. Does not return until command is completed. 21458 */ 21459 21460 static int 21461 sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr, 21462 size_t buflen, uchar_t save_page, int path_flag) 21463 { 21464 struct scsi_extended_sense sense_buf; 21465 union scsi_cdb cdb; 21466 struct uscsi_cmd ucmd_buf; 21467 int status; 21468 struct sd_lun *un; 21469 21470 ASSERT(ssc != NULL); 21471 un = ssc->ssc_un; 21472 ASSERT(un != NULL); 21473 ASSERT(!mutex_owned(SD_MUTEX(un))); 21474 ASSERT(bufaddr != NULL); 21475 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 21476 (cdbsize == CDB_GROUP2)); 21477 21478 SD_TRACE(SD_LOG_IO, un, 21479 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un); 21480 21481 bzero(&cdb, sizeof (cdb)); 21482 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21483 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21484 21485 /* Set the PF bit for many third party drives */ 21486 cdb.cdb_opaque[1] = 0x10; 21487 21488 /* Set the savepage(SP) bit if given */ 21489 if (save_page == SD_SAVE_PAGE) { 21490 cdb.cdb_opaque[1] |= 0x01; 21491 } 21492 21493 if (cdbsize == CDB_GROUP0) { 21494 cdb.scc_cmd = SCMD_MODE_SELECT; 21495 FORMG0COUNT(&cdb, buflen); 21496 } else { 21497 cdb.scc_cmd = SCMD_MODE_SELECT_G1; 21498 FORMG1COUNT(&cdb, buflen); 21499 } 21500 21501 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 21502 21503 ucmd_buf.uscsi_cdb = (char *)&cdb; 21504 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 21505 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 21506 ucmd_buf.uscsi_buflen = buflen; 21507 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21508 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21509 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 21510 ucmd_buf.uscsi_timeout = 60; 21511 21512 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21513 UIO_SYSSPACE, path_flag); 21514 21515 switch (status) { 21516 case 0: 21517 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21518 break; /* Success! */ 21519 case EIO: 21520 switch (ucmd_buf.uscsi_status) { 21521 case STATUS_RESERVATION_CONFLICT: 21522 status = EACCES; 21523 break; 21524 default: 21525 break; 21526 } 21527 break; 21528 default: 21529 break; 21530 } 21531 21532 if (status == 0) { 21533 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data", 21534 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21535 } 21536 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n"); 21537 21538 return (status); 21539 } 21540 21541 21542 /* 21543 * Function: sd_send_scsi_RDWR 21544 * 21545 * Description: Issue a scsi READ or WRITE command with the given parameters. 21546 * 21547 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21548 * structure for this target. 21549 * cmd: SCMD_READ or SCMD_WRITE 21550 * bufaddr: Address of caller's buffer to receive the RDWR data 21551 * buflen: Length of caller's buffer receive the RDWR data. 21552 * start_block: Block number for the start of the RDWR operation. 21553 * (Assumes target-native block size.) 21554 * residp: Pointer to variable to receive the redisual of the 21555 * RDWR operation (may be NULL of no residual requested). 21556 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 21557 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 21558 * to use the USCSI "direct" chain and bypass the normal 21559 * command waitq. 21560 * 21561 * Return Code: 0 - Success 21562 * errno return code from sd_ssc_send() 21563 * 21564 * Context: Can sleep. Does not return until command is completed. 21565 */ 21566 21567 static int 21568 sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr, 21569 size_t buflen, daddr_t start_block, int path_flag) 21570 { 21571 struct scsi_extended_sense sense_buf; 21572 union scsi_cdb cdb; 21573 struct uscsi_cmd ucmd_buf; 21574 uint32_t block_count; 21575 int status; 21576 int cdbsize; 21577 uchar_t flag; 21578 struct sd_lun *un; 21579 21580 ASSERT(ssc != NULL); 21581 un = ssc->ssc_un; 21582 ASSERT(un != NULL); 21583 ASSERT(!mutex_owned(SD_MUTEX(un))); 21584 ASSERT(bufaddr != NULL); 21585 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE)); 21586 21587 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un); 21588 21589 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 21590 return (EINVAL); 21591 } 21592 21593 mutex_enter(SD_MUTEX(un)); 21594 block_count = SD_BYTES2TGTBLOCKS(un, buflen); 21595 mutex_exit(SD_MUTEX(un)); 21596 21597 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE; 21598 21599 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: " 21600 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n", 21601 bufaddr, buflen, start_block, block_count); 21602 21603 bzero(&cdb, sizeof (cdb)); 21604 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21605 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21606 21607 /* Compute CDB size to use */ 21608 if (start_block > 0xffffffff) 21609 cdbsize = CDB_GROUP4; 21610 else if ((start_block & 0xFFE00000) || 21611 (un->un_f_cfg_is_atapi == TRUE)) 21612 cdbsize = CDB_GROUP1; 21613 else 21614 cdbsize = CDB_GROUP0; 21615 21616 switch (cdbsize) { 21617 case CDB_GROUP0: /* 6-byte CDBs */ 21618 cdb.scc_cmd = cmd; 21619 FORMG0ADDR(&cdb, start_block); 21620 FORMG0COUNT(&cdb, block_count); 21621 break; 21622 case CDB_GROUP1: /* 10-byte CDBs */ 21623 cdb.scc_cmd = cmd | SCMD_GROUP1; 21624 FORMG1ADDR(&cdb, start_block); 21625 FORMG1COUNT(&cdb, block_count); 21626 break; 21627 case CDB_GROUP4: /* 16-byte CDBs */ 21628 cdb.scc_cmd = cmd | SCMD_GROUP4; 21629 FORMG4LONGADDR(&cdb, (uint64_t)start_block); 21630 FORMG4COUNT(&cdb, block_count); 21631 break; 21632 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */ 21633 default: 21634 /* All others reserved */ 21635 return (EINVAL); 21636 } 21637 21638 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */ 21639 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 21640 21641 ucmd_buf.uscsi_cdb = (char *)&cdb; 21642 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 21643 ucmd_buf.uscsi_bufaddr = bufaddr; 21644 ucmd_buf.uscsi_buflen = buflen; 21645 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21646 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21647 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; 21648 ucmd_buf.uscsi_timeout = 60; 21649 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21650 UIO_SYSSPACE, path_flag); 21651 21652 switch (status) { 21653 case 0: 21654 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21655 break; /* Success! */ 21656 case EIO: 21657 switch (ucmd_buf.uscsi_status) { 21658 case STATUS_RESERVATION_CONFLICT: 21659 status = EACCES; 21660 break; 21661 default: 21662 break; 21663 } 21664 break; 21665 default: 21666 break; 21667 } 21668 21669 if (status == 0) { 21670 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data", 21671 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21672 } 21673 21674 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n"); 21675 21676 return (status); 21677 } 21678 21679 21680 /* 21681 * Function: sd_send_scsi_LOG_SENSE 21682 * 21683 * Description: Issue a scsi LOG_SENSE command with the given parameters. 21684 * 21685 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21686 * structure for this target. 21687 * 21688 * Return Code: 0 - Success 21689 * errno return code from sd_ssc_send() 21690 * 21691 * Context: Can sleep. Does not return until command is completed. 21692 */ 21693 21694 static int 21695 sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr, uint16_t buflen, 21696 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, 21697 int path_flag) 21698 21699 { 21700 struct scsi_extended_sense sense_buf; 21701 union scsi_cdb cdb; 21702 struct uscsi_cmd ucmd_buf; 21703 int status; 21704 struct sd_lun *un; 21705 21706 ASSERT(ssc != NULL); 21707 un = ssc->ssc_un; 21708 ASSERT(un != NULL); 21709 ASSERT(!mutex_owned(SD_MUTEX(un))); 21710 21711 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un); 21712 21713 bzero(&cdb, sizeof (cdb)); 21714 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21715 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21716 21717 cdb.scc_cmd = SCMD_LOG_SENSE_G1; 21718 cdb.cdb_opaque[2] = (page_control << 6) | page_code; 21719 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8); 21720 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF); 21721 FORMG1COUNT(&cdb, buflen); 21722 21723 ucmd_buf.uscsi_cdb = (char *)&cdb; 21724 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 21725 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 21726 ucmd_buf.uscsi_buflen = buflen; 21727 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21728 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21729 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 21730 ucmd_buf.uscsi_timeout = 60; 21731 21732 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21733 UIO_SYSSPACE, path_flag); 21734 21735 switch (status) { 21736 case 0: 21737 break; 21738 case EIO: 21739 switch (ucmd_buf.uscsi_status) { 21740 case STATUS_RESERVATION_CONFLICT: 21741 status = EACCES; 21742 break; 21743 case STATUS_CHECK: 21744 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 21745 (scsi_sense_key((uint8_t *)&sense_buf) == 21746 KEY_ILLEGAL_REQUEST) && 21747 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x24)) { 21748 /* 21749 * ASC 0x24: INVALID FIELD IN CDB 21750 */ 21751 switch (page_code) { 21752 case START_STOP_CYCLE_PAGE: 21753 /* 21754 * The start stop cycle counter is 21755 * implemented as page 0x31 in earlier 21756 * generation disks. In new generation 21757 * disks the start stop cycle counter is 21758 * implemented as page 0xE. To properly 21759 * handle this case if an attempt for 21760 * log page 0xE is made and fails we 21761 * will try again using page 0x31. 21762 * 21763 * Network storage BU committed to 21764 * maintain the page 0x31 for this 21765 * purpose and will not have any other 21766 * page implemented with page code 0x31 21767 * until all disks transition to the 21768 * standard page. 21769 */ 21770 mutex_enter(SD_MUTEX(un)); 21771 un->un_start_stop_cycle_page = 21772 START_STOP_CYCLE_VU_PAGE; 21773 cdb.cdb_opaque[2] = 21774 (char)(page_control << 6) | 21775 un->un_start_stop_cycle_page; 21776 mutex_exit(SD_MUTEX(un)); 21777 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 21778 status = sd_ssc_send( 21779 ssc, &ucmd_buf, FKIOCTL, 21780 UIO_SYSSPACE, path_flag); 21781 21782 break; 21783 case TEMPERATURE_PAGE: 21784 status = ENOTTY; 21785 break; 21786 default: 21787 break; 21788 } 21789 } 21790 break; 21791 default: 21792 break; 21793 } 21794 break; 21795 default: 21796 break; 21797 } 21798 21799 if (status == 0) { 21800 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21801 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data", 21802 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21803 } 21804 21805 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n"); 21806 21807 return (status); 21808 } 21809 21810 21811 /* 21812 * Function: sdioctl 21813 * 21814 * Description: Driver's ioctl(9e) entry point function. 21815 * 21816 * Arguments: dev - device number 21817 * cmd - ioctl operation to be performed 21818 * arg - user argument, contains data to be set or reference 21819 * parameter for get 21820 * flag - bit flag, indicating open settings, 32/64 bit type 21821 * cred_p - user credential pointer 21822 * rval_p - calling process return value (OPT) 21823 * 21824 * Return Code: EINVAL 21825 * ENOTTY 21826 * ENXIO 21827 * EIO 21828 * EFAULT 21829 * ENOTSUP 21830 * EPERM 21831 * 21832 * Context: Called from the device switch at normal priority. 21833 */ 21834 21835 static int 21836 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) 21837 { 21838 struct sd_lun *un = NULL; 21839 int err = 0; 21840 int i = 0; 21841 cred_t *cr; 21842 int tmprval = EINVAL; 21843 boolean_t is_valid; 21844 sd_ssc_t *ssc; 21845 21846 /* 21847 * All device accesses go thru sdstrategy where we check on suspend 21848 * status 21849 */ 21850 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21851 return (ENXIO); 21852 } 21853 21854 ASSERT(!mutex_owned(SD_MUTEX(un))); 21855 21856 /* Initialize sd_ssc_t for internal uscsi commands */ 21857 ssc = sd_ssc_init(un); 21858 21859 is_valid = SD_IS_VALID_LABEL(un); 21860 21861 /* 21862 * Moved this wait from sd_uscsi_strategy to here for 21863 * reasons of deadlock prevention. Internal driver commands, 21864 * specifically those to change a devices power level, result 21865 * in a call to sd_uscsi_strategy. 21866 */ 21867 mutex_enter(SD_MUTEX(un)); 21868 while ((un->un_state == SD_STATE_SUSPENDED) || 21869 (un->un_state == SD_STATE_PM_CHANGING)) { 21870 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 21871 } 21872 /* 21873 * Twiddling the counter here protects commands from now 21874 * through to the top of sd_uscsi_strategy. Without the 21875 * counter inc. a power down, for example, could get in 21876 * after the above check for state is made and before 21877 * execution gets to the top of sd_uscsi_strategy. 21878 * That would cause problems. 21879 */ 21880 un->un_ncmds_in_driver++; 21881 21882 if (!is_valid && 21883 (flag & (FNDELAY | FNONBLOCK))) { 21884 switch (cmd) { 21885 case DKIOCGGEOM: /* SD_PATH_DIRECT */ 21886 case DKIOCGVTOC: 21887 case DKIOCGEXTVTOC: 21888 case DKIOCGAPART: 21889 case DKIOCPARTINFO: 21890 case DKIOCEXTPARTINFO: 21891 case DKIOCSGEOM: 21892 case DKIOCSAPART: 21893 case DKIOCGETEFI: 21894 case DKIOCPARTITION: 21895 case DKIOCSVTOC: 21896 case DKIOCSEXTVTOC: 21897 case DKIOCSETEFI: 21898 case DKIOCGMBOOT: 21899 case DKIOCSMBOOT: 21900 case DKIOCG_PHYGEOM: 21901 case DKIOCG_VIRTGEOM: 21902 #if defined(__i386) || defined(__amd64) 21903 case DKIOCSETEXTPART: 21904 #endif 21905 /* let cmlb handle it */ 21906 goto skip_ready_valid; 21907 21908 case CDROMPAUSE: 21909 case CDROMRESUME: 21910 case CDROMPLAYMSF: 21911 case CDROMPLAYTRKIND: 21912 case CDROMREADTOCHDR: 21913 case CDROMREADTOCENTRY: 21914 case CDROMSTOP: 21915 case CDROMSTART: 21916 case CDROMVOLCTRL: 21917 case CDROMSUBCHNL: 21918 case CDROMREADMODE2: 21919 case CDROMREADMODE1: 21920 case CDROMREADOFFSET: 21921 case CDROMSBLKMODE: 21922 case CDROMGBLKMODE: 21923 case CDROMGDRVSPEED: 21924 case CDROMSDRVSPEED: 21925 case CDROMCDDA: 21926 case CDROMCDXA: 21927 case CDROMSUBCODE: 21928 if (!ISCD(un)) { 21929 un->un_ncmds_in_driver--; 21930 ASSERT(un->un_ncmds_in_driver >= 0); 21931 mutex_exit(SD_MUTEX(un)); 21932 err = ENOTTY; 21933 goto done_without_assess; 21934 } 21935 break; 21936 case FDEJECT: 21937 case DKIOCEJECT: 21938 case CDROMEJECT: 21939 if (!un->un_f_eject_media_supported) { 21940 un->un_ncmds_in_driver--; 21941 ASSERT(un->un_ncmds_in_driver >= 0); 21942 mutex_exit(SD_MUTEX(un)); 21943 err = ENOTTY; 21944 goto done_without_assess; 21945 } 21946 break; 21947 case DKIOCFLUSHWRITECACHE: 21948 mutex_exit(SD_MUTEX(un)); 21949 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 21950 if (err != 0) { 21951 mutex_enter(SD_MUTEX(un)); 21952 un->un_ncmds_in_driver--; 21953 ASSERT(un->un_ncmds_in_driver >= 0); 21954 mutex_exit(SD_MUTEX(un)); 21955 err = EIO; 21956 goto done_quick_assess; 21957 } 21958 mutex_enter(SD_MUTEX(un)); 21959 /* FALLTHROUGH */ 21960 case DKIOCREMOVABLE: 21961 case DKIOCHOTPLUGGABLE: 21962 case DKIOCINFO: 21963 case DKIOCGMEDIAINFO: 21964 case DKIOCGMEDIAINFOEXT: 21965 case MHIOCENFAILFAST: 21966 case MHIOCSTATUS: 21967 case MHIOCTKOWN: 21968 case MHIOCRELEASE: 21969 case MHIOCGRP_INKEYS: 21970 case MHIOCGRP_INRESV: 21971 case MHIOCGRP_REGISTER: 21972 case MHIOCGRP_RESERVE: 21973 case MHIOCGRP_PREEMPTANDABORT: 21974 case MHIOCGRP_REGISTERANDIGNOREKEY: 21975 case CDROMCLOSETRAY: 21976 case USCSICMD: 21977 goto skip_ready_valid; 21978 default: 21979 break; 21980 } 21981 21982 mutex_exit(SD_MUTEX(un)); 21983 err = sd_ready_and_valid(ssc, SDPART(dev)); 21984 mutex_enter(SD_MUTEX(un)); 21985 21986 if (err != SD_READY_VALID) { 21987 switch (cmd) { 21988 case DKIOCSTATE: 21989 case CDROMGDRVSPEED: 21990 case CDROMSDRVSPEED: 21991 case FDEJECT: /* for eject command */ 21992 case DKIOCEJECT: 21993 case CDROMEJECT: 21994 case DKIOCREMOVABLE: 21995 case DKIOCHOTPLUGGABLE: 21996 break; 21997 default: 21998 if (un->un_f_has_removable_media) { 21999 err = ENXIO; 22000 } else { 22001 /* Do not map SD_RESERVED_BY_OTHERS to EIO */ 22002 if (err == SD_RESERVED_BY_OTHERS) { 22003 err = EACCES; 22004 } else { 22005 err = EIO; 22006 } 22007 } 22008 un->un_ncmds_in_driver--; 22009 ASSERT(un->un_ncmds_in_driver >= 0); 22010 mutex_exit(SD_MUTEX(un)); 22011 22012 goto done_without_assess; 22013 } 22014 } 22015 } 22016 22017 skip_ready_valid: 22018 mutex_exit(SD_MUTEX(un)); 22019 22020 switch (cmd) { 22021 case DKIOCINFO: 22022 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n"); 22023 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag); 22024 break; 22025 22026 case DKIOCGMEDIAINFO: 22027 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n"); 22028 err = sd_get_media_info(dev, (caddr_t)arg, flag); 22029 break; 22030 22031 case DKIOCGMEDIAINFOEXT: 22032 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFOEXT\n"); 22033 err = sd_get_media_info_ext(dev, (caddr_t)arg, flag); 22034 break; 22035 22036 case DKIOCGGEOM: 22037 case DKIOCGVTOC: 22038 case DKIOCGEXTVTOC: 22039 case DKIOCGAPART: 22040 case DKIOCPARTINFO: 22041 case DKIOCEXTPARTINFO: 22042 case DKIOCSGEOM: 22043 case DKIOCSAPART: 22044 case DKIOCGETEFI: 22045 case DKIOCPARTITION: 22046 case DKIOCSVTOC: 22047 case DKIOCSEXTVTOC: 22048 case DKIOCSETEFI: 22049 case DKIOCGMBOOT: 22050 case DKIOCSMBOOT: 22051 case DKIOCG_PHYGEOM: 22052 case DKIOCG_VIRTGEOM: 22053 #if defined(__i386) || defined(__amd64) 22054 case DKIOCSETEXTPART: 22055 #endif 22056 SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd); 22057 22058 /* TUR should spin up */ 22059 22060 if (un->un_f_has_removable_media) 22061 err = sd_send_scsi_TEST_UNIT_READY(ssc, 22062 SD_CHECK_FOR_MEDIA); 22063 22064 else 22065 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 22066 22067 if (err != 0) 22068 goto done_with_assess; 22069 22070 err = cmlb_ioctl(un->un_cmlbhandle, dev, 22071 cmd, arg, flag, cred_p, rval_p, (void *)SD_PATH_DIRECT); 22072 22073 if ((err == 0) && 22074 ((cmd == DKIOCSETEFI) || 22075 (un->un_f_pkstats_enabled) && 22076 (cmd == DKIOCSAPART || cmd == DKIOCSVTOC || 22077 cmd == DKIOCSEXTVTOC))) { 22078 22079 tmprval = cmlb_validate(un->un_cmlbhandle, CMLB_SILENT, 22080 (void *)SD_PATH_DIRECT); 22081 if ((tmprval == 0) && un->un_f_pkstats_enabled) { 22082 sd_set_pstats(un); 22083 SD_TRACE(SD_LOG_IO_PARTITION, un, 22084 "sd_ioctl: un:0x%p pstats created and " 22085 "set\n", un); 22086 } 22087 } 22088 22089 if ((cmd == DKIOCSVTOC || cmd == DKIOCSEXTVTOC) || 22090 ((cmd == DKIOCSETEFI) && (tmprval == 0))) { 22091 22092 mutex_enter(SD_MUTEX(un)); 22093 if (un->un_f_devid_supported && 22094 (un->un_f_opt_fab_devid == TRUE)) { 22095 if (un->un_devid == NULL) { 22096 sd_register_devid(ssc, SD_DEVINFO(un), 22097 SD_TARGET_IS_UNRESERVED); 22098 } else { 22099 /* 22100 * The device id for this disk 22101 * has been fabricated. The 22102 * device id must be preserved 22103 * by writing it back out to 22104 * disk. 22105 */ 22106 if (sd_write_deviceid(ssc) != 0) { 22107 ddi_devid_free(un->un_devid); 22108 un->un_devid = NULL; 22109 } 22110 } 22111 } 22112 mutex_exit(SD_MUTEX(un)); 22113 } 22114 22115 break; 22116 22117 case DKIOCLOCK: 22118 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n"); 22119 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 22120 SD_PATH_STANDARD); 22121 goto done_with_assess; 22122 22123 case DKIOCUNLOCK: 22124 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n"); 22125 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW, 22126 SD_PATH_STANDARD); 22127 goto done_with_assess; 22128 22129 case DKIOCSTATE: { 22130 enum dkio_state state; 22131 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n"); 22132 22133 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) { 22134 err = EFAULT; 22135 } else { 22136 err = sd_check_media(dev, state); 22137 if (err == 0) { 22138 if (ddi_copyout(&un->un_mediastate, (void *)arg, 22139 sizeof (int), flag) != 0) 22140 err = EFAULT; 22141 } 22142 } 22143 break; 22144 } 22145 22146 case DKIOCREMOVABLE: 22147 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n"); 22148 i = un->un_f_has_removable_media ? 1 : 0; 22149 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 22150 err = EFAULT; 22151 } else { 22152 err = 0; 22153 } 22154 break; 22155 22156 case DKIOCHOTPLUGGABLE: 22157 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCHOTPLUGGABLE\n"); 22158 i = un->un_f_is_hotpluggable ? 1 : 0; 22159 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 22160 err = EFAULT; 22161 } else { 22162 err = 0; 22163 } 22164 break; 22165 22166 case DKIOCGTEMPERATURE: 22167 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n"); 22168 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag); 22169 break; 22170 22171 case MHIOCENFAILFAST: 22172 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n"); 22173 if ((err = drv_priv(cred_p)) == 0) { 22174 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag); 22175 } 22176 break; 22177 22178 case MHIOCTKOWN: 22179 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n"); 22180 if ((err = drv_priv(cred_p)) == 0) { 22181 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag); 22182 } 22183 break; 22184 22185 case MHIOCRELEASE: 22186 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n"); 22187 if ((err = drv_priv(cred_p)) == 0) { 22188 err = sd_mhdioc_release(dev); 22189 } 22190 break; 22191 22192 case MHIOCSTATUS: 22193 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n"); 22194 if ((err = drv_priv(cred_p)) == 0) { 22195 switch (sd_send_scsi_TEST_UNIT_READY(ssc, 0)) { 22196 case 0: 22197 err = 0; 22198 break; 22199 case EACCES: 22200 *rval_p = 1; 22201 err = 0; 22202 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22203 break; 22204 default: 22205 err = EIO; 22206 goto done_with_assess; 22207 } 22208 } 22209 break; 22210 22211 case MHIOCQRESERVE: 22212 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n"); 22213 if ((err = drv_priv(cred_p)) == 0) { 22214 err = sd_reserve_release(dev, SD_RESERVE); 22215 } 22216 break; 22217 22218 case MHIOCREREGISTERDEVID: 22219 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n"); 22220 if (drv_priv(cred_p) == EPERM) { 22221 err = EPERM; 22222 } else if (!un->un_f_devid_supported) { 22223 err = ENOTTY; 22224 } else { 22225 err = sd_mhdioc_register_devid(dev); 22226 } 22227 break; 22228 22229 case MHIOCGRP_INKEYS: 22230 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n"); 22231 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 22232 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22233 err = ENOTSUP; 22234 } else { 22235 err = sd_mhdioc_inkeys(dev, (caddr_t)arg, 22236 flag); 22237 } 22238 } 22239 break; 22240 22241 case MHIOCGRP_INRESV: 22242 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n"); 22243 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 22244 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22245 err = ENOTSUP; 22246 } else { 22247 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag); 22248 } 22249 } 22250 break; 22251 22252 case MHIOCGRP_REGISTER: 22253 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n"); 22254 if ((err = drv_priv(cred_p)) != EPERM) { 22255 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22256 err = ENOTSUP; 22257 } else if (arg != NULL) { 22258 mhioc_register_t reg; 22259 if (ddi_copyin((void *)arg, ®, 22260 sizeof (mhioc_register_t), flag) != 0) { 22261 err = EFAULT; 22262 } else { 22263 err = 22264 sd_send_scsi_PERSISTENT_RESERVE_OUT( 22265 ssc, SD_SCSI3_REGISTER, 22266 (uchar_t *)®); 22267 if (err != 0) 22268 goto done_with_assess; 22269 } 22270 } 22271 } 22272 break; 22273 22274 case MHIOCGRP_RESERVE: 22275 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n"); 22276 if ((err = drv_priv(cred_p)) != EPERM) { 22277 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22278 err = ENOTSUP; 22279 } else if (arg != NULL) { 22280 mhioc_resv_desc_t resv_desc; 22281 if (ddi_copyin((void *)arg, &resv_desc, 22282 sizeof (mhioc_resv_desc_t), flag) != 0) { 22283 err = EFAULT; 22284 } else { 22285 err = 22286 sd_send_scsi_PERSISTENT_RESERVE_OUT( 22287 ssc, SD_SCSI3_RESERVE, 22288 (uchar_t *)&resv_desc); 22289 if (err != 0) 22290 goto done_with_assess; 22291 } 22292 } 22293 } 22294 break; 22295 22296 case MHIOCGRP_PREEMPTANDABORT: 22297 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 22298 if ((err = drv_priv(cred_p)) != EPERM) { 22299 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22300 err = ENOTSUP; 22301 } else if (arg != NULL) { 22302 mhioc_preemptandabort_t preempt_abort; 22303 if (ddi_copyin((void *)arg, &preempt_abort, 22304 sizeof (mhioc_preemptandabort_t), 22305 flag) != 0) { 22306 err = EFAULT; 22307 } else { 22308 err = 22309 sd_send_scsi_PERSISTENT_RESERVE_OUT( 22310 ssc, SD_SCSI3_PREEMPTANDABORT, 22311 (uchar_t *)&preempt_abort); 22312 if (err != 0) 22313 goto done_with_assess; 22314 } 22315 } 22316 } 22317 break; 22318 22319 case MHIOCGRP_REGISTERANDIGNOREKEY: 22320 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTERANDIGNOREKEY\n"); 22321 if ((err = drv_priv(cred_p)) != EPERM) { 22322 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22323 err = ENOTSUP; 22324 } else if (arg != NULL) { 22325 mhioc_registerandignorekey_t r_and_i; 22326 if (ddi_copyin((void *)arg, (void *)&r_and_i, 22327 sizeof (mhioc_registerandignorekey_t), 22328 flag) != 0) { 22329 err = EFAULT; 22330 } else { 22331 err = 22332 sd_send_scsi_PERSISTENT_RESERVE_OUT( 22333 ssc, SD_SCSI3_REGISTERANDIGNOREKEY, 22334 (uchar_t *)&r_and_i); 22335 if (err != 0) 22336 goto done_with_assess; 22337 } 22338 } 22339 } 22340 break; 22341 22342 case USCSICMD: 22343 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n"); 22344 cr = ddi_get_cred(); 22345 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 22346 err = EPERM; 22347 } else { 22348 enum uio_seg uioseg; 22349 22350 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE : 22351 UIO_USERSPACE; 22352 if (un->un_f_format_in_progress == TRUE) { 22353 err = EAGAIN; 22354 break; 22355 } 22356 22357 err = sd_ssc_send(ssc, 22358 (struct uscsi_cmd *)arg, 22359 flag, uioseg, SD_PATH_STANDARD); 22360 if (err != 0) 22361 goto done_with_assess; 22362 else 22363 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 22364 } 22365 break; 22366 22367 case CDROMPAUSE: 22368 case CDROMRESUME: 22369 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n"); 22370 if (!ISCD(un)) { 22371 err = ENOTTY; 22372 } else { 22373 err = sr_pause_resume(dev, cmd); 22374 } 22375 break; 22376 22377 case CDROMPLAYMSF: 22378 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n"); 22379 if (!ISCD(un)) { 22380 err = ENOTTY; 22381 } else { 22382 err = sr_play_msf(dev, (caddr_t)arg, flag); 22383 } 22384 break; 22385 22386 case CDROMPLAYTRKIND: 22387 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n"); 22388 #if defined(__i386) || defined(__amd64) 22389 /* 22390 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead 22391 */ 22392 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 22393 #else 22394 if (!ISCD(un)) { 22395 #endif 22396 err = ENOTTY; 22397 } else { 22398 err = sr_play_trkind(dev, (caddr_t)arg, flag); 22399 } 22400 break; 22401 22402 case CDROMREADTOCHDR: 22403 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n"); 22404 if (!ISCD(un)) { 22405 err = ENOTTY; 22406 } else { 22407 err = sr_read_tochdr(dev, (caddr_t)arg, flag); 22408 } 22409 break; 22410 22411 case CDROMREADTOCENTRY: 22412 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n"); 22413 if (!ISCD(un)) { 22414 err = ENOTTY; 22415 } else { 22416 err = sr_read_tocentry(dev, (caddr_t)arg, flag); 22417 } 22418 break; 22419 22420 case CDROMSTOP: 22421 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n"); 22422 if (!ISCD(un)) { 22423 err = ENOTTY; 22424 } else { 22425 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 22426 SD_TARGET_STOP, SD_PATH_STANDARD); 22427 goto done_with_assess; 22428 } 22429 break; 22430 22431 case CDROMSTART: 22432 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n"); 22433 if (!ISCD(un)) { 22434 err = ENOTTY; 22435 } else { 22436 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 22437 SD_TARGET_START, SD_PATH_STANDARD); 22438 goto done_with_assess; 22439 } 22440 break; 22441 22442 case CDROMCLOSETRAY: 22443 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n"); 22444 if (!ISCD(un)) { 22445 err = ENOTTY; 22446 } else { 22447 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 22448 SD_TARGET_CLOSE, SD_PATH_STANDARD); 22449 goto done_with_assess; 22450 } 22451 break; 22452 22453 case FDEJECT: /* for eject command */ 22454 case DKIOCEJECT: 22455 case CDROMEJECT: 22456 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n"); 22457 if (!un->un_f_eject_media_supported) { 22458 err = ENOTTY; 22459 } else { 22460 err = sr_eject(dev); 22461 } 22462 break; 22463 22464 case CDROMVOLCTRL: 22465 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n"); 22466 if (!ISCD(un)) { 22467 err = ENOTTY; 22468 } else { 22469 err = sr_volume_ctrl(dev, (caddr_t)arg, flag); 22470 } 22471 break; 22472 22473 case CDROMSUBCHNL: 22474 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n"); 22475 if (!ISCD(un)) { 22476 err = ENOTTY; 22477 } else { 22478 err = sr_read_subchannel(dev, (caddr_t)arg, flag); 22479 } 22480 break; 22481 22482 case CDROMREADMODE2: 22483 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n"); 22484 if (!ISCD(un)) { 22485 err = ENOTTY; 22486 } else if (un->un_f_cfg_is_atapi == TRUE) { 22487 /* 22488 * If the drive supports READ CD, use that instead of 22489 * switching the LBA size via a MODE SELECT 22490 * Block Descriptor 22491 */ 22492 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag); 22493 } else { 22494 err = sr_read_mode2(dev, (caddr_t)arg, flag); 22495 } 22496 break; 22497 22498 case CDROMREADMODE1: 22499 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n"); 22500 if (!ISCD(un)) { 22501 err = ENOTTY; 22502 } else { 22503 err = sr_read_mode1(dev, (caddr_t)arg, flag); 22504 } 22505 break; 22506 22507 case CDROMREADOFFSET: 22508 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n"); 22509 if (!ISCD(un)) { 22510 err = ENOTTY; 22511 } else { 22512 err = sr_read_sony_session_offset(dev, (caddr_t)arg, 22513 flag); 22514 } 22515 break; 22516 22517 case CDROMSBLKMODE: 22518 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n"); 22519 /* 22520 * There is no means of changing block size in case of atapi 22521 * drives, thus return ENOTTY if drive type is atapi 22522 */ 22523 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 22524 err = ENOTTY; 22525 } else if (un->un_f_mmc_cap == TRUE) { 22526 22527 /* 22528 * MMC Devices do not support changing the 22529 * logical block size 22530 * 22531 * Note: EINVAL is being returned instead of ENOTTY to 22532 * maintain consistancy with the original mmc 22533 * driver update. 22534 */ 22535 err = EINVAL; 22536 } else { 22537 mutex_enter(SD_MUTEX(un)); 22538 if ((!(un->un_exclopen & (1<<SDPART(dev)))) || 22539 (un->un_ncmds_in_transport > 0)) { 22540 mutex_exit(SD_MUTEX(un)); 22541 err = EINVAL; 22542 } else { 22543 mutex_exit(SD_MUTEX(un)); 22544 err = sr_change_blkmode(dev, cmd, arg, flag); 22545 } 22546 } 22547 break; 22548 22549 case CDROMGBLKMODE: 22550 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n"); 22551 if (!ISCD(un)) { 22552 err = ENOTTY; 22553 } else if ((un->un_f_cfg_is_atapi != FALSE) && 22554 (un->un_f_blockcount_is_valid != FALSE)) { 22555 /* 22556 * Drive is an ATAPI drive so return target block 22557 * size for ATAPI drives since we cannot change the 22558 * blocksize on ATAPI drives. Used primarily to detect 22559 * if an ATAPI cdrom is present. 22560 */ 22561 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg, 22562 sizeof (int), flag) != 0) { 22563 err = EFAULT; 22564 } else { 22565 err = 0; 22566 } 22567 22568 } else { 22569 /* 22570 * Drive supports changing block sizes via a Mode 22571 * Select. 22572 */ 22573 err = sr_change_blkmode(dev, cmd, arg, flag); 22574 } 22575 break; 22576 22577 case CDROMGDRVSPEED: 22578 case CDROMSDRVSPEED: 22579 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n"); 22580 if (!ISCD(un)) { 22581 err = ENOTTY; 22582 } else if (un->un_f_mmc_cap == TRUE) { 22583 /* 22584 * Note: In the future the driver implementation 22585 * for getting and 22586 * setting cd speed should entail: 22587 * 1) If non-mmc try the Toshiba mode page 22588 * (sr_change_speed) 22589 * 2) If mmc but no support for Real Time Streaming try 22590 * the SET CD SPEED (0xBB) command 22591 * (sr_atapi_change_speed) 22592 * 3) If mmc and support for Real Time Streaming 22593 * try the GET PERFORMANCE and SET STREAMING 22594 * commands (not yet implemented, 4380808) 22595 */ 22596 /* 22597 * As per recent MMC spec, CD-ROM speed is variable 22598 * and changes with LBA. Since there is no such 22599 * things as drive speed now, fail this ioctl. 22600 * 22601 * Note: EINVAL is returned for consistancy of original 22602 * implementation which included support for getting 22603 * the drive speed of mmc devices but not setting 22604 * the drive speed. Thus EINVAL would be returned 22605 * if a set request was made for an mmc device. 22606 * We no longer support get or set speed for 22607 * mmc but need to remain consistent with regard 22608 * to the error code returned. 22609 */ 22610 err = EINVAL; 22611 } else if (un->un_f_cfg_is_atapi == TRUE) { 22612 err = sr_atapi_change_speed(dev, cmd, arg, flag); 22613 } else { 22614 err = sr_change_speed(dev, cmd, arg, flag); 22615 } 22616 break; 22617 22618 case CDROMCDDA: 22619 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n"); 22620 if (!ISCD(un)) { 22621 err = ENOTTY; 22622 } else { 22623 err = sr_read_cdda(dev, (void *)arg, flag); 22624 } 22625 break; 22626 22627 case CDROMCDXA: 22628 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n"); 22629 if (!ISCD(un)) { 22630 err = ENOTTY; 22631 } else { 22632 err = sr_read_cdxa(dev, (caddr_t)arg, flag); 22633 } 22634 break; 22635 22636 case CDROMSUBCODE: 22637 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n"); 22638 if (!ISCD(un)) { 22639 err = ENOTTY; 22640 } else { 22641 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag); 22642 } 22643 break; 22644 22645 22646 #ifdef SDDEBUG 22647 /* RESET/ABORTS testing ioctls */ 22648 case DKIOCRESET: { 22649 int reset_level; 22650 22651 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) { 22652 err = EFAULT; 22653 } else { 22654 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: " 22655 "reset_level = 0x%lx\n", reset_level); 22656 if (scsi_reset(SD_ADDRESS(un), reset_level)) { 22657 err = 0; 22658 } else { 22659 err = EIO; 22660 } 22661 } 22662 break; 22663 } 22664 22665 case DKIOCABORT: 22666 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n"); 22667 if (scsi_abort(SD_ADDRESS(un), NULL)) { 22668 err = 0; 22669 } else { 22670 err = EIO; 22671 } 22672 break; 22673 #endif 22674 22675 #ifdef SD_FAULT_INJECTION 22676 /* SDIOC FaultInjection testing ioctls */ 22677 case SDIOCSTART: 22678 case SDIOCSTOP: 22679 case SDIOCINSERTPKT: 22680 case SDIOCINSERTXB: 22681 case SDIOCINSERTUN: 22682 case SDIOCINSERTARQ: 22683 case SDIOCPUSH: 22684 case SDIOCRETRIEVE: 22685 case SDIOCRUN: 22686 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" 22687 "SDIOC detected cmd:0x%X:\n", cmd); 22688 /* call error generator */ 22689 sd_faultinjection_ioctl(cmd, arg, un); 22690 err = 0; 22691 break; 22692 22693 #endif /* SD_FAULT_INJECTION */ 22694 22695 case DKIOCFLUSHWRITECACHE: 22696 { 22697 struct dk_callback *dkc = (struct dk_callback *)arg; 22698 22699 mutex_enter(SD_MUTEX(un)); 22700 if (!un->un_f_sync_cache_supported || 22701 !un->un_f_write_cache_enabled) { 22702 err = un->un_f_sync_cache_supported ? 22703 0 : ENOTSUP; 22704 mutex_exit(SD_MUTEX(un)); 22705 if ((flag & FKIOCTL) && dkc != NULL && 22706 dkc->dkc_callback != NULL) { 22707 (*dkc->dkc_callback)(dkc->dkc_cookie, 22708 err); 22709 /* 22710 * Did callback and reported error. 22711 * Since we did a callback, ioctl 22712 * should return 0. 22713 */ 22714 err = 0; 22715 } 22716 break; 22717 } 22718 mutex_exit(SD_MUTEX(un)); 22719 22720 if ((flag & FKIOCTL) && dkc != NULL && 22721 dkc->dkc_callback != NULL) { 22722 /* async SYNC CACHE request */ 22723 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 22724 } else { 22725 /* synchronous SYNC CACHE request */ 22726 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); 22727 } 22728 } 22729 break; 22730 22731 case DKIOCGETWCE: { 22732 22733 int wce; 22734 22735 if ((err = sd_get_write_cache_enabled(ssc, &wce)) != 0) { 22736 break; 22737 } 22738 22739 if (ddi_copyout(&wce, (void *)arg, sizeof (wce), flag)) { 22740 err = EFAULT; 22741 } 22742 break; 22743 } 22744 22745 case DKIOCSETWCE: { 22746 22747 int wce, sync_supported; 22748 22749 if (ddi_copyin((void *)arg, &wce, sizeof (wce), flag)) { 22750 err = EFAULT; 22751 break; 22752 } 22753 22754 /* 22755 * Synchronize multiple threads trying to enable 22756 * or disable the cache via the un_f_wcc_cv 22757 * condition variable. 22758 */ 22759 mutex_enter(SD_MUTEX(un)); 22760 22761 /* 22762 * Don't allow the cache to be enabled if the 22763 * config file has it disabled. 22764 */ 22765 if (un->un_f_opt_disable_cache && wce) { 22766 mutex_exit(SD_MUTEX(un)); 22767 err = EINVAL; 22768 break; 22769 } 22770 22771 /* 22772 * Wait for write cache change in progress 22773 * bit to be clear before proceeding. 22774 */ 22775 while (un->un_f_wcc_inprog) 22776 cv_wait(&un->un_wcc_cv, SD_MUTEX(un)); 22777 22778 un->un_f_wcc_inprog = 1; 22779 22780 if (un->un_f_write_cache_enabled && wce == 0) { 22781 /* 22782 * Disable the write cache. Don't clear 22783 * un_f_write_cache_enabled until after 22784 * the mode select and flush are complete. 22785 */ 22786 sync_supported = un->un_f_sync_cache_supported; 22787 22788 /* 22789 * If cache flush is suppressed, we assume that the 22790 * controller firmware will take care of managing the 22791 * write cache for us: no need to explicitly 22792 * disable it. 22793 */ 22794 if (!un->un_f_suppress_cache_flush) { 22795 mutex_exit(SD_MUTEX(un)); 22796 if ((err = sd_cache_control(ssc, 22797 SD_CACHE_NOCHANGE, 22798 SD_CACHE_DISABLE)) == 0 && 22799 sync_supported) { 22800 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, 22801 NULL); 22802 } 22803 } else { 22804 mutex_exit(SD_MUTEX(un)); 22805 } 22806 22807 mutex_enter(SD_MUTEX(un)); 22808 if (err == 0) { 22809 un->un_f_write_cache_enabled = 0; 22810 } 22811 22812 } else if (!un->un_f_write_cache_enabled && wce != 0) { 22813 /* 22814 * Set un_f_write_cache_enabled first, so there is 22815 * no window where the cache is enabled, but the 22816 * bit says it isn't. 22817 */ 22818 un->un_f_write_cache_enabled = 1; 22819 22820 /* 22821 * If cache flush is suppressed, we assume that the 22822 * controller firmware will take care of managing the 22823 * write cache for us: no need to explicitly 22824 * enable it. 22825 */ 22826 if (!un->un_f_suppress_cache_flush) { 22827 mutex_exit(SD_MUTEX(un)); 22828 err = sd_cache_control(ssc, SD_CACHE_NOCHANGE, 22829 SD_CACHE_ENABLE); 22830 } else { 22831 mutex_exit(SD_MUTEX(un)); 22832 } 22833 22834 mutex_enter(SD_MUTEX(un)); 22835 22836 if (err) { 22837 un->un_f_write_cache_enabled = 0; 22838 } 22839 } 22840 22841 un->un_f_wcc_inprog = 0; 22842 cv_broadcast(&un->un_wcc_cv); 22843 mutex_exit(SD_MUTEX(un)); 22844 break; 22845 } 22846 22847 default: 22848 err = ENOTTY; 22849 break; 22850 } 22851 mutex_enter(SD_MUTEX(un)); 22852 un->un_ncmds_in_driver--; 22853 ASSERT(un->un_ncmds_in_driver >= 0); 22854 mutex_exit(SD_MUTEX(un)); 22855 22856 22857 done_without_assess: 22858 sd_ssc_fini(ssc); 22859 22860 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 22861 return (err); 22862 22863 done_with_assess: 22864 mutex_enter(SD_MUTEX(un)); 22865 un->un_ncmds_in_driver--; 22866 ASSERT(un->un_ncmds_in_driver >= 0); 22867 mutex_exit(SD_MUTEX(un)); 22868 22869 done_quick_assess: 22870 if (err != 0) 22871 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22872 /* Uninitialize sd_ssc_t pointer */ 22873 sd_ssc_fini(ssc); 22874 22875 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 22876 return (err); 22877 } 22878 22879 22880 /* 22881 * Function: sd_dkio_ctrl_info 22882 * 22883 * Description: This routine is the driver entry point for handling controller 22884 * information ioctl requests (DKIOCINFO). 22885 * 22886 * Arguments: dev - the device number 22887 * arg - pointer to user provided dk_cinfo structure 22888 * specifying the controller type and attributes. 22889 * flag - this argument is a pass through to ddi_copyxxx() 22890 * directly from the mode argument of ioctl(). 22891 * 22892 * Return Code: 0 22893 * EFAULT 22894 * ENXIO 22895 */ 22896 22897 static int 22898 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag) 22899 { 22900 struct sd_lun *un = NULL; 22901 struct dk_cinfo *info; 22902 dev_info_t *pdip; 22903 int lun, tgt; 22904 22905 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22906 return (ENXIO); 22907 } 22908 22909 info = (struct dk_cinfo *) 22910 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); 22911 22912 switch (un->un_ctype) { 22913 case CTYPE_CDROM: 22914 info->dki_ctype = DKC_CDROM; 22915 break; 22916 default: 22917 info->dki_ctype = DKC_SCSI_CCS; 22918 break; 22919 } 22920 pdip = ddi_get_parent(SD_DEVINFO(un)); 22921 info->dki_cnum = ddi_get_instance(pdip); 22922 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) { 22923 (void) strcpy(info->dki_cname, ddi_get_name(pdip)); 22924 } else { 22925 (void) strncpy(info->dki_cname, ddi_node_name(pdip), 22926 DK_DEVLEN - 1); 22927 } 22928 22929 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 22930 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0); 22931 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 22932 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0); 22933 22934 /* Unit Information */ 22935 info->dki_unit = ddi_get_instance(SD_DEVINFO(un)); 22936 info->dki_slave = ((tgt << 3) | lun); 22937 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)), 22938 DK_DEVLEN - 1); 22939 info->dki_flags = DKI_FMTVOL; 22940 info->dki_partition = SDPART(dev); 22941 22942 /* Max Transfer size of this device in blocks */ 22943 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize; 22944 info->dki_addr = 0; 22945 info->dki_space = 0; 22946 info->dki_prio = 0; 22947 info->dki_vec = 0; 22948 22949 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) { 22950 kmem_free(info, sizeof (struct dk_cinfo)); 22951 return (EFAULT); 22952 } else { 22953 kmem_free(info, sizeof (struct dk_cinfo)); 22954 return (0); 22955 } 22956 } 22957 22958 22959 /* 22960 * Function: sd_get_media_info 22961 * 22962 * Description: This routine is the driver entry point for handling ioctl 22963 * requests for the media type or command set profile used by the 22964 * drive to operate on the media (DKIOCGMEDIAINFO). 22965 * 22966 * Arguments: dev - the device number 22967 * arg - pointer to user provided dk_minfo structure 22968 * specifying the media type, logical block size and 22969 * drive capacity. 22970 * flag - this argument is a pass through to ddi_copyxxx() 22971 * directly from the mode argument of ioctl(). 22972 * 22973 * Return Code: 0 22974 * EACCESS 22975 * EFAULT 22976 * ENXIO 22977 * EIO 22978 */ 22979 22980 static int 22981 sd_get_media_info(dev_t dev, caddr_t arg, int flag) 22982 { 22983 struct sd_lun *un = NULL; 22984 struct uscsi_cmd com; 22985 struct scsi_inquiry *sinq; 22986 struct dk_minfo media_info; 22987 u_longlong_t media_capacity; 22988 uint64_t capacity; 22989 uint_t lbasize; 22990 uchar_t *out_data; 22991 uchar_t *rqbuf; 22992 int rval = 0; 22993 int rtn; 22994 sd_ssc_t *ssc; 22995 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 22996 (un->un_state == SD_STATE_OFFLINE)) { 22997 return (ENXIO); 22998 } 22999 23000 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info: entry\n"); 23001 23002 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 23003 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 23004 23005 /* Issue a TUR to determine if the drive is ready with media present */ 23006 ssc = sd_ssc_init(un); 23007 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_CHECK_FOR_MEDIA); 23008 if (rval == ENXIO) { 23009 goto done; 23010 } else if (rval != 0) { 23011 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23012 } 23013 23014 /* Now get configuration data */ 23015 if (ISCD(un)) { 23016 media_info.dki_media_type = DK_CDROM; 23017 23018 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 23019 if (un->un_f_mmc_cap == TRUE) { 23020 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, 23021 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN, 23022 SD_PATH_STANDARD); 23023 23024 if (rtn) { 23025 /* 23026 * We ignore all failures for CD and need to 23027 * put the assessment before processing code 23028 * to avoid missing assessment for FMA. 23029 */ 23030 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23031 /* 23032 * Failed for other than an illegal request 23033 * or command not supported 23034 */ 23035 if ((com.uscsi_status == STATUS_CHECK) && 23036 (com.uscsi_rqstatus == STATUS_GOOD)) { 23037 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 23038 (rqbuf[12] != 0x20)) { 23039 rval = EIO; 23040 goto no_assessment; 23041 } 23042 } 23043 } else { 23044 /* 23045 * The GET CONFIGURATION command succeeded 23046 * so set the media type according to the 23047 * returned data 23048 */ 23049 media_info.dki_media_type = out_data[6]; 23050 media_info.dki_media_type <<= 8; 23051 media_info.dki_media_type |= out_data[7]; 23052 } 23053 } 23054 } else { 23055 /* 23056 * The profile list is not available, so we attempt to identify 23057 * the media type based on the inquiry data 23058 */ 23059 sinq = un->un_sd->sd_inq; 23060 if ((sinq->inq_dtype == DTYPE_DIRECT) || 23061 (sinq->inq_dtype == DTYPE_OPTICAL)) { 23062 /* This is a direct access device or optical disk */ 23063 media_info.dki_media_type = DK_FIXED_DISK; 23064 23065 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 23066 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 23067 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 23068 media_info.dki_media_type = DK_ZIP; 23069 } else if ( 23070 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 23071 media_info.dki_media_type = DK_JAZ; 23072 } 23073 } 23074 } else { 23075 /* 23076 * Not a CD, direct access or optical disk so return 23077 * unknown media 23078 */ 23079 media_info.dki_media_type = DK_UNKNOWN; 23080 } 23081 } 23082 23083 /* Now read the capacity so we can provide the lbasize and capacity */ 23084 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize, 23085 SD_PATH_DIRECT); 23086 switch (rval) { 23087 case 0: 23088 break; 23089 case EACCES: 23090 rval = EACCES; 23091 goto done; 23092 default: 23093 rval = EIO; 23094 goto done; 23095 } 23096 23097 /* 23098 * If lun is expanded dynamically, update the un structure. 23099 */ 23100 mutex_enter(SD_MUTEX(un)); 23101 if ((un->un_f_blockcount_is_valid == TRUE) && 23102 (un->un_f_tgt_blocksize_is_valid == TRUE) && 23103 (capacity > un->un_blockcount)) { 23104 sd_update_block_info(un, lbasize, capacity); 23105 } 23106 mutex_exit(SD_MUTEX(un)); 23107 23108 media_info.dki_lbsize = lbasize; 23109 media_capacity = capacity; 23110 23111 /* 23112 * sd_send_scsi_READ_CAPACITY() reports capacity in 23113 * un->un_sys_blocksize chunks. So we need to convert it into 23114 * cap.lbasize chunks. 23115 */ 23116 media_capacity *= un->un_sys_blocksize; 23117 media_capacity /= lbasize; 23118 media_info.dki_capacity = media_capacity; 23119 23120 if (ddi_copyout(&media_info, arg, sizeof (struct dk_minfo), flag)) { 23121 rval = EFAULT; 23122 /* Put goto. Anybody might add some code below in future */ 23123 goto no_assessment; 23124 } 23125 done: 23126 if (rval != 0) { 23127 if (rval == EIO) 23128 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23129 else 23130 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23131 } 23132 no_assessment: 23133 sd_ssc_fini(ssc); 23134 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 23135 kmem_free(rqbuf, SENSE_LENGTH); 23136 return (rval); 23137 } 23138 23139 /* 23140 * Function: sd_get_media_info_ext 23141 * 23142 * Description: This routine is the driver entry point for handling ioctl 23143 * requests for the media type or command set profile used by the 23144 * drive to operate on the media (DKIOCGMEDIAINFOEXT). The 23145 * difference this ioctl and DKIOCGMEDIAINFO is the return value 23146 * of this ioctl contains both logical block size and physical 23147 * block size. 23148 * 23149 * 23150 * Arguments: dev - the device number 23151 * arg - pointer to user provided dk_minfo_ext structure 23152 * specifying the media type, logical block size, 23153 * physical block size and disk capacity. 23154 * flag - this argument is a pass through to ddi_copyxxx() 23155 * directly from the mode argument of ioctl(). 23156 * 23157 * Return Code: 0 23158 * EACCESS 23159 * EFAULT 23160 * ENXIO 23161 * EIO 23162 */ 23163 23164 static int 23165 sd_get_media_info_ext(dev_t dev, caddr_t arg, int flag) 23166 { 23167 struct sd_lun *un = NULL; 23168 struct uscsi_cmd com; 23169 struct scsi_inquiry *sinq; 23170 struct dk_minfo_ext media_info_ext; 23171 u_longlong_t media_capacity; 23172 uint64_t capacity; 23173 uint_t lbasize; 23174 uint_t pbsize; 23175 uchar_t *out_data; 23176 uchar_t *rqbuf; 23177 int rval = 0; 23178 int rtn; 23179 sd_ssc_t *ssc; 23180 23181 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 23182 (un->un_state == SD_STATE_OFFLINE)) { 23183 return (ENXIO); 23184 } 23185 23186 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info_ext: entry\n"); 23187 23188 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 23189 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 23190 ssc = sd_ssc_init(un); 23191 23192 /* Issue a TUR to determine if the drive is ready with media present */ 23193 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_CHECK_FOR_MEDIA); 23194 if (rval == ENXIO) { 23195 goto done; 23196 } else if (rval != 0) { 23197 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23198 } 23199 23200 /* Now get configuration data */ 23201 if (ISCD(un)) { 23202 media_info_ext.dki_media_type = DK_CDROM; 23203 23204 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 23205 if (un->un_f_mmc_cap == TRUE) { 23206 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, 23207 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN, 23208 SD_PATH_STANDARD); 23209 23210 if (rtn) { 23211 /* 23212 * We ignore all failures for CD and need to 23213 * put the assessment before processing code 23214 * to avoid missing assessment for FMA. 23215 */ 23216 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23217 /* 23218 * Failed for other than an illegal request 23219 * or command not supported 23220 */ 23221 if ((com.uscsi_status == STATUS_CHECK) && 23222 (com.uscsi_rqstatus == STATUS_GOOD)) { 23223 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 23224 (rqbuf[12] != 0x20)) { 23225 rval = EIO; 23226 goto no_assessment; 23227 } 23228 } 23229 } else { 23230 /* 23231 * The GET CONFIGURATION command succeeded 23232 * so set the media type according to the 23233 * returned data 23234 */ 23235 media_info_ext.dki_media_type = out_data[6]; 23236 media_info_ext.dki_media_type <<= 8; 23237 media_info_ext.dki_media_type |= out_data[7]; 23238 } 23239 } 23240 } else { 23241 /* 23242 * The profile list is not available, so we attempt to identify 23243 * the media type based on the inquiry data 23244 */ 23245 sinq = un->un_sd->sd_inq; 23246 if ((sinq->inq_dtype == DTYPE_DIRECT) || 23247 (sinq->inq_dtype == DTYPE_OPTICAL)) { 23248 /* This is a direct access device or optical disk */ 23249 media_info_ext.dki_media_type = DK_FIXED_DISK; 23250 23251 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 23252 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 23253 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 23254 media_info_ext.dki_media_type = DK_ZIP; 23255 } else if ( 23256 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 23257 media_info_ext.dki_media_type = DK_JAZ; 23258 } 23259 } 23260 } else { 23261 /* 23262 * Not a CD, direct access or optical disk so return 23263 * unknown media 23264 */ 23265 media_info_ext.dki_media_type = DK_UNKNOWN; 23266 } 23267 } 23268 23269 /* 23270 * Now read the capacity so we can provide the lbasize, 23271 * pbsize and capacity. 23272 */ 23273 rval = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, &lbasize, &pbsize, 23274 SD_PATH_DIRECT); 23275 23276 if (rval != 0) { 23277 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize, 23278 SD_PATH_DIRECT); 23279 23280 switch (rval) { 23281 case 0: 23282 pbsize = lbasize; 23283 media_capacity = capacity; 23284 /* 23285 * sd_send_scsi_READ_CAPACITY() reports capacity in 23286 * un->un_sys_blocksize chunks. So we need to convert 23287 * it into cap.lbsize chunks. 23288 */ 23289 if (un->un_f_has_removable_media) { 23290 media_capacity *= un->un_sys_blocksize; 23291 media_capacity /= lbasize; 23292 } 23293 break; 23294 case EACCES: 23295 rval = EACCES; 23296 goto done; 23297 default: 23298 rval = EIO; 23299 goto done; 23300 } 23301 } else { 23302 media_capacity = capacity; 23303 } 23304 23305 /* 23306 * If lun is expanded dynamically, update the un structure. 23307 */ 23308 mutex_enter(SD_MUTEX(un)); 23309 if ((un->un_f_blockcount_is_valid == TRUE) && 23310 (un->un_f_tgt_blocksize_is_valid == TRUE) && 23311 (capacity > un->un_blockcount)) { 23312 sd_update_block_info(un, lbasize, capacity); 23313 } 23314 mutex_exit(SD_MUTEX(un)); 23315 23316 media_info_ext.dki_lbsize = lbasize; 23317 media_info_ext.dki_capacity = media_capacity; 23318 media_info_ext.dki_pbsize = pbsize; 23319 23320 if (ddi_copyout(&media_info_ext, arg, sizeof (struct dk_minfo_ext), 23321 flag)) { 23322 rval = EFAULT; 23323 goto no_assessment; 23324 } 23325 done: 23326 if (rval != 0) { 23327 if (rval == EIO) 23328 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23329 else 23330 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23331 } 23332 no_assessment: 23333 sd_ssc_fini(ssc); 23334 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 23335 kmem_free(rqbuf, SENSE_LENGTH); 23336 return (rval); 23337 } 23338 23339 /* 23340 * Function: sd_check_media 23341 * 23342 * Description: This utility routine implements the functionality for the 23343 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the 23344 * driver state changes from that specified by the user 23345 * (inserted or ejected). For example, if the user specifies 23346 * DKIO_EJECTED and the current media state is inserted this 23347 * routine will immediately return DKIO_INSERTED. However, if the 23348 * current media state is not inserted the user thread will be 23349 * blocked until the drive state changes. If DKIO_NONE is specified 23350 * the user thread will block until a drive state change occurs. 23351 * 23352 * Arguments: dev - the device number 23353 * state - user pointer to a dkio_state, updated with the current 23354 * drive state at return. 23355 * 23356 * Return Code: ENXIO 23357 * EIO 23358 * EAGAIN 23359 * EINTR 23360 */ 23361 23362 static int 23363 sd_check_media(dev_t dev, enum dkio_state state) 23364 { 23365 struct sd_lun *un = NULL; 23366 enum dkio_state prev_state; 23367 opaque_t token = NULL; 23368 int rval = 0; 23369 sd_ssc_t *ssc; 23370 dev_t sub_dev; 23371 23372 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23373 return (ENXIO); 23374 } 23375 23376 /* 23377 * sub_dev is used when submitting request to scsi watch. 23378 * All submissions are unified to use same device number. 23379 */ 23380 sub_dev = sd_make_device(SD_DEVINFO(un)); 23381 23382 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n"); 23383 23384 ssc = sd_ssc_init(un); 23385 23386 mutex_enter(SD_MUTEX(un)); 23387 23388 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: " 23389 "state=%x, mediastate=%x\n", state, un->un_mediastate); 23390 23391 prev_state = un->un_mediastate; 23392 23393 /* is there anything to do? */ 23394 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) { 23395 /* 23396 * submit the request to the scsi_watch service; 23397 * scsi_media_watch_cb() does the real work 23398 */ 23399 mutex_exit(SD_MUTEX(un)); 23400 23401 /* 23402 * This change handles the case where a scsi watch request is 23403 * added to a device that is powered down. To accomplish this 23404 * we power up the device before adding the scsi watch request, 23405 * since the scsi watch sends a TUR directly to the device 23406 * which the device cannot handle if it is powered down. 23407 */ 23408 if (sd_pm_entry(un) != DDI_SUCCESS) { 23409 mutex_enter(SD_MUTEX(un)); 23410 goto done; 23411 } 23412 23413 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), 23414 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 23415 (caddr_t)sub_dev); 23416 23417 sd_pm_exit(un); 23418 23419 mutex_enter(SD_MUTEX(un)); 23420 if (token == NULL) { 23421 rval = EAGAIN; 23422 goto done; 23423 } 23424 23425 /* 23426 * This is a special case IOCTL that doesn't return 23427 * until the media state changes. Routine sdpower 23428 * knows about and handles this so don't count it 23429 * as an active cmd in the driver, which would 23430 * keep the device busy to the pm framework. 23431 * If the count isn't decremented the device can't 23432 * be powered down. 23433 */ 23434 un->un_ncmds_in_driver--; 23435 ASSERT(un->un_ncmds_in_driver >= 0); 23436 23437 /* 23438 * if a prior request had been made, this will be the same 23439 * token, as scsi_watch was designed that way. 23440 */ 23441 un->un_swr_token = token; 23442 un->un_specified_mediastate = state; 23443 23444 /* 23445 * now wait for media change 23446 * we will not be signalled unless mediastate == state but it is 23447 * still better to test for this condition, since there is a 23448 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED 23449 */ 23450 SD_TRACE(SD_LOG_COMMON, un, 23451 "sd_check_media: waiting for media state change\n"); 23452 while (un->un_mediastate == state) { 23453 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) { 23454 SD_TRACE(SD_LOG_COMMON, un, 23455 "sd_check_media: waiting for media state " 23456 "was interrupted\n"); 23457 un->un_ncmds_in_driver++; 23458 rval = EINTR; 23459 goto done; 23460 } 23461 SD_TRACE(SD_LOG_COMMON, un, 23462 "sd_check_media: received signal, state=%x\n", 23463 un->un_mediastate); 23464 } 23465 /* 23466 * Inc the counter to indicate the device once again 23467 * has an active outstanding cmd. 23468 */ 23469 un->un_ncmds_in_driver++; 23470 } 23471 23472 /* invalidate geometry */ 23473 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) { 23474 sr_ejected(un); 23475 } 23476 23477 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) { 23478 uint64_t capacity; 23479 uint_t lbasize; 23480 23481 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n"); 23482 mutex_exit(SD_MUTEX(un)); 23483 /* 23484 * Since the following routines use SD_PATH_DIRECT, we must 23485 * call PM directly before the upcoming disk accesses. This 23486 * may cause the disk to be power/spin up. 23487 */ 23488 23489 if (sd_pm_entry(un) == DDI_SUCCESS) { 23490 rval = sd_send_scsi_READ_CAPACITY(ssc, 23491 &capacity, &lbasize, SD_PATH_DIRECT); 23492 if (rval != 0) { 23493 sd_pm_exit(un); 23494 if (rval == EIO) 23495 sd_ssc_assessment(ssc, 23496 SD_FMT_STATUS_CHECK); 23497 else 23498 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23499 mutex_enter(SD_MUTEX(un)); 23500 goto done; 23501 } 23502 } else { 23503 rval = EIO; 23504 mutex_enter(SD_MUTEX(un)); 23505 goto done; 23506 } 23507 mutex_enter(SD_MUTEX(un)); 23508 23509 sd_update_block_info(un, lbasize, capacity); 23510 23511 /* 23512 * Check if the media in the device is writable or not 23513 */ 23514 if (ISCD(un)) { 23515 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT); 23516 } 23517 23518 mutex_exit(SD_MUTEX(un)); 23519 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 23520 if ((cmlb_validate(un->un_cmlbhandle, 0, 23521 (void *)SD_PATH_DIRECT) == 0) && un->un_f_pkstats_enabled) { 23522 sd_set_pstats(un); 23523 SD_TRACE(SD_LOG_IO_PARTITION, un, 23524 "sd_check_media: un:0x%p pstats created and " 23525 "set\n", un); 23526 } 23527 23528 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 23529 SD_PATH_DIRECT); 23530 23531 sd_pm_exit(un); 23532 23533 if (rval != 0) { 23534 if (rval == EIO) 23535 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23536 else 23537 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23538 } 23539 23540 mutex_enter(SD_MUTEX(un)); 23541 } 23542 done: 23543 sd_ssc_fini(ssc); 23544 un->un_f_watcht_stopped = FALSE; 23545 if (token != NULL && un->un_swr_token != NULL) { 23546 /* 23547 * Use of this local token and the mutex ensures that we avoid 23548 * some race conditions associated with terminating the 23549 * scsi watch. 23550 */ 23551 token = un->un_swr_token; 23552 mutex_exit(SD_MUTEX(un)); 23553 (void) scsi_watch_request_terminate(token, 23554 SCSI_WATCH_TERMINATE_WAIT); 23555 if (scsi_watch_get_ref_count(token) == 0) { 23556 mutex_enter(SD_MUTEX(un)); 23557 un->un_swr_token = (opaque_t)NULL; 23558 } else { 23559 mutex_enter(SD_MUTEX(un)); 23560 } 23561 } 23562 23563 /* 23564 * Update the capacity kstat value, if no media previously 23565 * (capacity kstat is 0) and a media has been inserted 23566 * (un_f_blockcount_is_valid == TRUE) 23567 */ 23568 if (un->un_errstats) { 23569 struct sd_errstats *stp = NULL; 23570 23571 stp = (struct sd_errstats *)un->un_errstats->ks_data; 23572 if ((stp->sd_capacity.value.ui64 == 0) && 23573 (un->un_f_blockcount_is_valid == TRUE)) { 23574 stp->sd_capacity.value.ui64 = 23575 (uint64_t)((uint64_t)un->un_blockcount * 23576 un->un_sys_blocksize); 23577 } 23578 } 23579 mutex_exit(SD_MUTEX(un)); 23580 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n"); 23581 return (rval); 23582 } 23583 23584 23585 /* 23586 * Function: sd_delayed_cv_broadcast 23587 * 23588 * Description: Delayed cv_broadcast to allow for target to recover from media 23589 * insertion. 23590 * 23591 * Arguments: arg - driver soft state (unit) structure 23592 */ 23593 23594 static void 23595 sd_delayed_cv_broadcast(void *arg) 23596 { 23597 struct sd_lun *un = arg; 23598 23599 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n"); 23600 23601 mutex_enter(SD_MUTEX(un)); 23602 un->un_dcvb_timeid = NULL; 23603 cv_broadcast(&un->un_state_cv); 23604 mutex_exit(SD_MUTEX(un)); 23605 } 23606 23607 23608 /* 23609 * Function: sd_media_watch_cb 23610 * 23611 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This 23612 * routine processes the TUR sense data and updates the driver 23613 * state if a transition has occurred. The user thread 23614 * (sd_check_media) is then signalled. 23615 * 23616 * Arguments: arg - the device 'dev_t' is used for context to discriminate 23617 * among multiple watches that share this callback function 23618 * resultp - scsi watch facility result packet containing scsi 23619 * packet, status byte and sense data 23620 * 23621 * Return Code: 0 for success, -1 for failure 23622 */ 23623 23624 static int 23625 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 23626 { 23627 struct sd_lun *un; 23628 struct scsi_status *statusp = resultp->statusp; 23629 uint8_t *sensep = (uint8_t *)resultp->sensep; 23630 enum dkio_state state = DKIO_NONE; 23631 dev_t dev = (dev_t)arg; 23632 uchar_t actual_sense_length; 23633 uint8_t skey, asc, ascq; 23634 23635 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23636 return (-1); 23637 } 23638 actual_sense_length = resultp->actual_sense_length; 23639 23640 mutex_enter(SD_MUTEX(un)); 23641 SD_TRACE(SD_LOG_COMMON, un, 23642 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n", 23643 *((char *)statusp), (void *)sensep, actual_sense_length); 23644 23645 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) { 23646 un->un_mediastate = DKIO_DEV_GONE; 23647 cv_broadcast(&un->un_state_cv); 23648 mutex_exit(SD_MUTEX(un)); 23649 23650 return (0); 23651 } 23652 23653 /* 23654 * If there was a check condition then sensep points to valid sense data 23655 * If status was not a check condition but a reservation or busy status 23656 * then the new state is DKIO_NONE 23657 */ 23658 if (sensep != NULL) { 23659 skey = scsi_sense_key(sensep); 23660 asc = scsi_sense_asc(sensep); 23661 ascq = scsi_sense_ascq(sensep); 23662 23663 SD_INFO(SD_LOG_COMMON, un, 23664 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n", 23665 skey, asc, ascq); 23666 /* This routine only uses up to 13 bytes of sense data. */ 23667 if (actual_sense_length >= 13) { 23668 if (skey == KEY_UNIT_ATTENTION) { 23669 if (asc == 0x28) { 23670 state = DKIO_INSERTED; 23671 } 23672 } else if (skey == KEY_NOT_READY) { 23673 /* 23674 * Sense data of 02/06/00 means that the 23675 * drive could not read the media (No 23676 * reference position found). In this case 23677 * to prevent a hang on the DKIOCSTATE IOCTL 23678 * we set the media state to DKIO_INSERTED. 23679 */ 23680 if (asc == 0x06 && ascq == 0x00) 23681 state = DKIO_INSERTED; 23682 23683 /* 23684 * if 02/04/02 means that the host 23685 * should send start command. Explicitly 23686 * leave the media state as is 23687 * (inserted) as the media is inserted 23688 * and host has stopped device for PM 23689 * reasons. Upon next true read/write 23690 * to this media will bring the 23691 * device to the right state good for 23692 * media access. 23693 */ 23694 if (asc == 0x3a) { 23695 state = DKIO_EJECTED; 23696 } else { 23697 /* 23698 * If the drive is busy with an 23699 * operation or long write, keep the 23700 * media in an inserted state. 23701 */ 23702 23703 if ((asc == 0x04) && 23704 ((ascq == 0x02) || 23705 (ascq == 0x07) || 23706 (ascq == 0x08))) { 23707 state = DKIO_INSERTED; 23708 } 23709 } 23710 } else if (skey == KEY_NO_SENSE) { 23711 if ((asc == 0x00) && (ascq == 0x00)) { 23712 /* 23713 * Sense Data 00/00/00 does not provide 23714 * any information about the state of 23715 * the media. Ignore it. 23716 */ 23717 mutex_exit(SD_MUTEX(un)); 23718 return (0); 23719 } 23720 } 23721 } 23722 } else if ((*((char *)statusp) == STATUS_GOOD) && 23723 (resultp->pkt->pkt_reason == CMD_CMPLT)) { 23724 state = DKIO_INSERTED; 23725 } 23726 23727 SD_TRACE(SD_LOG_COMMON, un, 23728 "sd_media_watch_cb: state=%x, specified=%x\n", 23729 state, un->un_specified_mediastate); 23730 23731 /* 23732 * now signal the waiting thread if this is *not* the specified state; 23733 * delay the signal if the state is DKIO_INSERTED to allow the target 23734 * to recover 23735 */ 23736 if (state != un->un_specified_mediastate) { 23737 un->un_mediastate = state; 23738 if (state == DKIO_INSERTED) { 23739 /* 23740 * delay the signal to give the drive a chance 23741 * to do what it apparently needs to do 23742 */ 23743 SD_TRACE(SD_LOG_COMMON, un, 23744 "sd_media_watch_cb: delayed cv_broadcast\n"); 23745 if (un->un_dcvb_timeid == NULL) { 23746 un->un_dcvb_timeid = 23747 timeout(sd_delayed_cv_broadcast, un, 23748 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY)); 23749 } 23750 } else { 23751 SD_TRACE(SD_LOG_COMMON, un, 23752 "sd_media_watch_cb: immediate cv_broadcast\n"); 23753 cv_broadcast(&un->un_state_cv); 23754 } 23755 } 23756 mutex_exit(SD_MUTEX(un)); 23757 return (0); 23758 } 23759 23760 23761 /* 23762 * Function: sd_dkio_get_temp 23763 * 23764 * Description: This routine is the driver entry point for handling ioctl 23765 * requests to get the disk temperature. 23766 * 23767 * Arguments: dev - the device number 23768 * arg - pointer to user provided dk_temperature structure. 23769 * flag - this argument is a pass through to ddi_copyxxx() 23770 * directly from the mode argument of ioctl(). 23771 * 23772 * Return Code: 0 23773 * EFAULT 23774 * ENXIO 23775 * EAGAIN 23776 */ 23777 23778 static int 23779 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag) 23780 { 23781 struct sd_lun *un = NULL; 23782 struct dk_temperature *dktemp = NULL; 23783 uchar_t *temperature_page; 23784 int rval = 0; 23785 int path_flag = SD_PATH_STANDARD; 23786 sd_ssc_t *ssc; 23787 23788 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23789 return (ENXIO); 23790 } 23791 23792 ssc = sd_ssc_init(un); 23793 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP); 23794 23795 /* copyin the disk temp argument to get the user flags */ 23796 if (ddi_copyin((void *)arg, dktemp, 23797 sizeof (struct dk_temperature), flag) != 0) { 23798 rval = EFAULT; 23799 goto done; 23800 } 23801 23802 /* Initialize the temperature to invalid. */ 23803 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 23804 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 23805 23806 /* 23807 * Note: Investigate removing the "bypass pm" semantic. 23808 * Can we just bypass PM always? 23809 */ 23810 if (dktemp->dkt_flags & DKT_BYPASS_PM) { 23811 path_flag = SD_PATH_DIRECT; 23812 ASSERT(!mutex_owned(&un->un_pm_mutex)); 23813 mutex_enter(&un->un_pm_mutex); 23814 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 23815 /* 23816 * If DKT_BYPASS_PM is set, and the drive happens to be 23817 * in low power mode, we can not wake it up, Need to 23818 * return EAGAIN. 23819 */ 23820 mutex_exit(&un->un_pm_mutex); 23821 rval = EAGAIN; 23822 goto done; 23823 } else { 23824 /* 23825 * Indicate to PM the device is busy. This is required 23826 * to avoid a race - i.e. the ioctl is issuing a 23827 * command and the pm framework brings down the device 23828 * to low power mode (possible power cut-off on some 23829 * platforms). 23830 */ 23831 mutex_exit(&un->un_pm_mutex); 23832 if (sd_pm_entry(un) != DDI_SUCCESS) { 23833 rval = EAGAIN; 23834 goto done; 23835 } 23836 } 23837 } 23838 23839 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP); 23840 23841 rval = sd_send_scsi_LOG_SENSE(ssc, temperature_page, 23842 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag); 23843 if (rval != 0) 23844 goto done2; 23845 23846 /* 23847 * For the current temperature verify that the parameter length is 0x02 23848 * and the parameter code is 0x00 23849 */ 23850 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) && 23851 (temperature_page[5] == 0x00)) { 23852 if (temperature_page[9] == 0xFF) { 23853 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 23854 } else { 23855 dktemp->dkt_cur_temp = (short)(temperature_page[9]); 23856 } 23857 } 23858 23859 /* 23860 * For the reference temperature verify that the parameter 23861 * length is 0x02 and the parameter code is 0x01 23862 */ 23863 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) && 23864 (temperature_page[11] == 0x01)) { 23865 if (temperature_page[15] == 0xFF) { 23866 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 23867 } else { 23868 dktemp->dkt_ref_temp = (short)(temperature_page[15]); 23869 } 23870 } 23871 23872 /* Do the copyout regardless of the temperature commands status. */ 23873 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature), 23874 flag) != 0) { 23875 rval = EFAULT; 23876 goto done1; 23877 } 23878 23879 done2: 23880 if (rval != 0) { 23881 if (rval == EIO) 23882 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23883 else 23884 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23885 } 23886 done1: 23887 if (path_flag == SD_PATH_DIRECT) { 23888 sd_pm_exit(un); 23889 } 23890 23891 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE); 23892 done: 23893 sd_ssc_fini(ssc); 23894 if (dktemp != NULL) { 23895 kmem_free(dktemp, sizeof (struct dk_temperature)); 23896 } 23897 23898 return (rval); 23899 } 23900 23901 23902 /* 23903 * Function: sd_log_page_supported 23904 * 23905 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of 23906 * supported log pages. 23907 * 23908 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 23909 * structure for this target. 23910 * log_page - 23911 * 23912 * Return Code: -1 - on error (log sense is optional and may not be supported). 23913 * 0 - log page not found. 23914 * 1 - log page found. 23915 */ 23916 23917 static int 23918 sd_log_page_supported(sd_ssc_t *ssc, int log_page) 23919 { 23920 uchar_t *log_page_data; 23921 int i; 23922 int match = 0; 23923 int log_size; 23924 int status = 0; 23925 struct sd_lun *un; 23926 23927 ASSERT(ssc != NULL); 23928 un = ssc->ssc_un; 23929 ASSERT(un != NULL); 23930 23931 log_page_data = kmem_zalloc(0xFF, KM_SLEEP); 23932 23933 status = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 0xFF, 0, 0x01, 0, 23934 SD_PATH_DIRECT); 23935 23936 if (status != 0) { 23937 if (status == EIO) { 23938 /* 23939 * Some disks do not support log sense, we 23940 * should ignore this kind of error(sense key is 23941 * 0x5 - illegal request). 23942 */ 23943 uint8_t *sensep; 23944 int senlen; 23945 23946 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 23947 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 23948 ssc->ssc_uscsi_cmd->uscsi_rqresid); 23949 23950 if (senlen > 0 && 23951 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 23952 sd_ssc_assessment(ssc, 23953 SD_FMT_IGNORE_COMPROMISE); 23954 } else { 23955 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23956 } 23957 } else { 23958 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23959 } 23960 23961 SD_ERROR(SD_LOG_COMMON, un, 23962 "sd_log_page_supported: failed log page retrieval\n"); 23963 kmem_free(log_page_data, 0xFF); 23964 return (-1); 23965 } 23966 23967 log_size = log_page_data[3]; 23968 23969 /* 23970 * The list of supported log pages start from the fourth byte. Check 23971 * until we run out of log pages or a match is found. 23972 */ 23973 for (i = 4; (i < (log_size + 4)) && !match; i++) { 23974 if (log_page_data[i] == log_page) { 23975 match++; 23976 } 23977 } 23978 kmem_free(log_page_data, 0xFF); 23979 return (match); 23980 } 23981 23982 23983 /* 23984 * Function: sd_mhdioc_failfast 23985 * 23986 * Description: This routine is the driver entry point for handling ioctl 23987 * requests to enable/disable the multihost failfast option. 23988 * (MHIOCENFAILFAST) 23989 * 23990 * Arguments: dev - the device number 23991 * arg - user specified probing interval. 23992 * flag - this argument is a pass through to ddi_copyxxx() 23993 * directly from the mode argument of ioctl(). 23994 * 23995 * Return Code: 0 23996 * EFAULT 23997 * ENXIO 23998 */ 23999 24000 static int 24001 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag) 24002 { 24003 struct sd_lun *un = NULL; 24004 int mh_time; 24005 int rval = 0; 24006 24007 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24008 return (ENXIO); 24009 } 24010 24011 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag)) 24012 return (EFAULT); 24013 24014 if (mh_time) { 24015 mutex_enter(SD_MUTEX(un)); 24016 un->un_resvd_status |= SD_FAILFAST; 24017 mutex_exit(SD_MUTEX(un)); 24018 /* 24019 * If mh_time is INT_MAX, then this ioctl is being used for 24020 * SCSI-3 PGR purposes, and we don't need to spawn watch thread. 24021 */ 24022 if (mh_time != INT_MAX) { 24023 rval = sd_check_mhd(dev, mh_time); 24024 } 24025 } else { 24026 (void) sd_check_mhd(dev, 0); 24027 mutex_enter(SD_MUTEX(un)); 24028 un->un_resvd_status &= ~SD_FAILFAST; 24029 mutex_exit(SD_MUTEX(un)); 24030 } 24031 return (rval); 24032 } 24033 24034 24035 /* 24036 * Function: sd_mhdioc_takeown 24037 * 24038 * Description: This routine is the driver entry point for handling ioctl 24039 * requests to forcefully acquire exclusive access rights to the 24040 * multihost disk (MHIOCTKOWN). 24041 * 24042 * Arguments: dev - the device number 24043 * arg - user provided structure specifying the delay 24044 * parameters in milliseconds 24045 * flag - this argument is a pass through to ddi_copyxxx() 24046 * directly from the mode argument of ioctl(). 24047 * 24048 * Return Code: 0 24049 * EFAULT 24050 * ENXIO 24051 */ 24052 24053 static int 24054 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag) 24055 { 24056 struct sd_lun *un = NULL; 24057 struct mhioctkown *tkown = NULL; 24058 int rval = 0; 24059 24060 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24061 return (ENXIO); 24062 } 24063 24064 if (arg != NULL) { 24065 tkown = (struct mhioctkown *) 24066 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP); 24067 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag); 24068 if (rval != 0) { 24069 rval = EFAULT; 24070 goto error; 24071 } 24072 } 24073 24074 rval = sd_take_ownership(dev, tkown); 24075 mutex_enter(SD_MUTEX(un)); 24076 if (rval == 0) { 24077 un->un_resvd_status |= SD_RESERVE; 24078 if (tkown != NULL && tkown->reinstate_resv_delay != 0) { 24079 sd_reinstate_resv_delay = 24080 tkown->reinstate_resv_delay * 1000; 24081 } else { 24082 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 24083 } 24084 /* 24085 * Give the scsi_watch routine interval set by 24086 * the MHIOCENFAILFAST ioctl precedence here. 24087 */ 24088 if ((un->un_resvd_status & SD_FAILFAST) == 0) { 24089 mutex_exit(SD_MUTEX(un)); 24090 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000); 24091 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24092 "sd_mhdioc_takeown : %d\n", 24093 sd_reinstate_resv_delay); 24094 } else { 24095 mutex_exit(SD_MUTEX(un)); 24096 } 24097 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY, 24098 sd_mhd_reset_notify_cb, (caddr_t)un); 24099 } else { 24100 un->un_resvd_status &= ~SD_RESERVE; 24101 mutex_exit(SD_MUTEX(un)); 24102 } 24103 24104 error: 24105 if (tkown != NULL) { 24106 kmem_free(tkown, sizeof (struct mhioctkown)); 24107 } 24108 return (rval); 24109 } 24110 24111 24112 /* 24113 * Function: sd_mhdioc_release 24114 * 24115 * Description: This routine is the driver entry point for handling ioctl 24116 * requests to release exclusive access rights to the multihost 24117 * disk (MHIOCRELEASE). 24118 * 24119 * Arguments: dev - the device number 24120 * 24121 * Return Code: 0 24122 * ENXIO 24123 */ 24124 24125 static int 24126 sd_mhdioc_release(dev_t dev) 24127 { 24128 struct sd_lun *un = NULL; 24129 timeout_id_t resvd_timeid_save; 24130 int resvd_status_save; 24131 int rval = 0; 24132 24133 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24134 return (ENXIO); 24135 } 24136 24137 mutex_enter(SD_MUTEX(un)); 24138 resvd_status_save = un->un_resvd_status; 24139 un->un_resvd_status &= 24140 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE); 24141 if (un->un_resvd_timeid) { 24142 resvd_timeid_save = un->un_resvd_timeid; 24143 un->un_resvd_timeid = NULL; 24144 mutex_exit(SD_MUTEX(un)); 24145 (void) untimeout(resvd_timeid_save); 24146 } else { 24147 mutex_exit(SD_MUTEX(un)); 24148 } 24149 24150 /* 24151 * destroy any pending timeout thread that may be attempting to 24152 * reinstate reservation on this device. 24153 */ 24154 sd_rmv_resv_reclaim_req(dev); 24155 24156 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) { 24157 mutex_enter(SD_MUTEX(un)); 24158 if ((un->un_mhd_token) && 24159 ((un->un_resvd_status & SD_FAILFAST) == 0)) { 24160 mutex_exit(SD_MUTEX(un)); 24161 (void) sd_check_mhd(dev, 0); 24162 } else { 24163 mutex_exit(SD_MUTEX(un)); 24164 } 24165 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 24166 sd_mhd_reset_notify_cb, (caddr_t)un); 24167 } else { 24168 /* 24169 * sd_mhd_watch_cb will restart the resvd recover timeout thread 24170 */ 24171 mutex_enter(SD_MUTEX(un)); 24172 un->un_resvd_status = resvd_status_save; 24173 mutex_exit(SD_MUTEX(un)); 24174 } 24175 return (rval); 24176 } 24177 24178 24179 /* 24180 * Function: sd_mhdioc_register_devid 24181 * 24182 * Description: This routine is the driver entry point for handling ioctl 24183 * requests to register the device id (MHIOCREREGISTERDEVID). 24184 * 24185 * Note: The implementation for this ioctl has been updated to 24186 * be consistent with the original PSARC case (1999/357) 24187 * (4375899, 4241671, 4220005) 24188 * 24189 * Arguments: dev - the device number 24190 * 24191 * Return Code: 0 24192 * ENXIO 24193 */ 24194 24195 static int 24196 sd_mhdioc_register_devid(dev_t dev) 24197 { 24198 struct sd_lun *un = NULL; 24199 int rval = 0; 24200 sd_ssc_t *ssc; 24201 24202 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24203 return (ENXIO); 24204 } 24205 24206 ASSERT(!mutex_owned(SD_MUTEX(un))); 24207 24208 mutex_enter(SD_MUTEX(un)); 24209 24210 /* If a devid already exists, de-register it */ 24211 if (un->un_devid != NULL) { 24212 ddi_devid_unregister(SD_DEVINFO(un)); 24213 /* 24214 * After unregister devid, needs to free devid memory 24215 */ 24216 ddi_devid_free(un->un_devid); 24217 un->un_devid = NULL; 24218 } 24219 24220 /* Check for reservation conflict */ 24221 mutex_exit(SD_MUTEX(un)); 24222 ssc = sd_ssc_init(un); 24223 rval = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 24224 mutex_enter(SD_MUTEX(un)); 24225 24226 switch (rval) { 24227 case 0: 24228 sd_register_devid(ssc, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED); 24229 break; 24230 case EACCES: 24231 break; 24232 default: 24233 rval = EIO; 24234 } 24235 24236 mutex_exit(SD_MUTEX(un)); 24237 if (rval != 0) { 24238 if (rval == EIO) 24239 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 24240 else 24241 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 24242 } 24243 sd_ssc_fini(ssc); 24244 return (rval); 24245 } 24246 24247 24248 /* 24249 * Function: sd_mhdioc_inkeys 24250 * 24251 * Description: This routine is the driver entry point for handling ioctl 24252 * requests to issue the SCSI-3 Persistent In Read Keys command 24253 * to the device (MHIOCGRP_INKEYS). 24254 * 24255 * Arguments: dev - the device number 24256 * arg - user provided in_keys structure 24257 * flag - this argument is a pass through to ddi_copyxxx() 24258 * directly from the mode argument of ioctl(). 24259 * 24260 * Return Code: code returned by sd_persistent_reservation_in_read_keys() 24261 * ENXIO 24262 * EFAULT 24263 */ 24264 24265 static int 24266 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag) 24267 { 24268 struct sd_lun *un; 24269 mhioc_inkeys_t inkeys; 24270 int rval = 0; 24271 24272 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24273 return (ENXIO); 24274 } 24275 24276 #ifdef _MULTI_DATAMODEL 24277 switch (ddi_model_convert_from(flag & FMODELS)) { 24278 case DDI_MODEL_ILP32: { 24279 struct mhioc_inkeys32 inkeys32; 24280 24281 if (ddi_copyin(arg, &inkeys32, 24282 sizeof (struct mhioc_inkeys32), flag) != 0) { 24283 return (EFAULT); 24284 } 24285 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li; 24286 if ((rval = sd_persistent_reservation_in_read_keys(un, 24287 &inkeys, flag)) != 0) { 24288 return (rval); 24289 } 24290 inkeys32.generation = inkeys.generation; 24291 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32), 24292 flag) != 0) { 24293 return (EFAULT); 24294 } 24295 break; 24296 } 24297 case DDI_MODEL_NONE: 24298 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), 24299 flag) != 0) { 24300 return (EFAULT); 24301 } 24302 if ((rval = sd_persistent_reservation_in_read_keys(un, 24303 &inkeys, flag)) != 0) { 24304 return (rval); 24305 } 24306 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), 24307 flag) != 0) { 24308 return (EFAULT); 24309 } 24310 break; 24311 } 24312 24313 #else /* ! _MULTI_DATAMODEL */ 24314 24315 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) { 24316 return (EFAULT); 24317 } 24318 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag); 24319 if (rval != 0) { 24320 return (rval); 24321 } 24322 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) { 24323 return (EFAULT); 24324 } 24325 24326 #endif /* _MULTI_DATAMODEL */ 24327 24328 return (rval); 24329 } 24330 24331 24332 /* 24333 * Function: sd_mhdioc_inresv 24334 * 24335 * Description: This routine is the driver entry point for handling ioctl 24336 * requests to issue the SCSI-3 Persistent In Read Reservations 24337 * command to the device (MHIOCGRP_INKEYS). 24338 * 24339 * Arguments: dev - the device number 24340 * arg - user provided in_resv structure 24341 * flag - this argument is a pass through to ddi_copyxxx() 24342 * directly from the mode argument of ioctl(). 24343 * 24344 * Return Code: code returned by sd_persistent_reservation_in_read_resv() 24345 * ENXIO 24346 * EFAULT 24347 */ 24348 24349 static int 24350 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag) 24351 { 24352 struct sd_lun *un; 24353 mhioc_inresvs_t inresvs; 24354 int rval = 0; 24355 24356 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24357 return (ENXIO); 24358 } 24359 24360 #ifdef _MULTI_DATAMODEL 24361 24362 switch (ddi_model_convert_from(flag & FMODELS)) { 24363 case DDI_MODEL_ILP32: { 24364 struct mhioc_inresvs32 inresvs32; 24365 24366 if (ddi_copyin(arg, &inresvs32, 24367 sizeof (struct mhioc_inresvs32), flag) != 0) { 24368 return (EFAULT); 24369 } 24370 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li; 24371 if ((rval = sd_persistent_reservation_in_read_resv(un, 24372 &inresvs, flag)) != 0) { 24373 return (rval); 24374 } 24375 inresvs32.generation = inresvs.generation; 24376 if (ddi_copyout(&inresvs32, arg, 24377 sizeof (struct mhioc_inresvs32), flag) != 0) { 24378 return (EFAULT); 24379 } 24380 break; 24381 } 24382 case DDI_MODEL_NONE: 24383 if (ddi_copyin(arg, &inresvs, 24384 sizeof (mhioc_inresvs_t), flag) != 0) { 24385 return (EFAULT); 24386 } 24387 if ((rval = sd_persistent_reservation_in_read_resv(un, 24388 &inresvs, flag)) != 0) { 24389 return (rval); 24390 } 24391 if (ddi_copyout(&inresvs, arg, 24392 sizeof (mhioc_inresvs_t), flag) != 0) { 24393 return (EFAULT); 24394 } 24395 break; 24396 } 24397 24398 #else /* ! _MULTI_DATAMODEL */ 24399 24400 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) { 24401 return (EFAULT); 24402 } 24403 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag); 24404 if (rval != 0) { 24405 return (rval); 24406 } 24407 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) { 24408 return (EFAULT); 24409 } 24410 24411 #endif /* ! _MULTI_DATAMODEL */ 24412 24413 return (rval); 24414 } 24415 24416 24417 /* 24418 * The following routines support the clustering functionality described below 24419 * and implement lost reservation reclaim functionality. 24420 * 24421 * Clustering 24422 * ---------- 24423 * The clustering code uses two different, independent forms of SCSI 24424 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3 24425 * Persistent Group Reservations. For any particular disk, it will use either 24426 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk. 24427 * 24428 * SCSI-2 24429 * The cluster software takes ownership of a multi-hosted disk by issuing the 24430 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the 24431 * MHIOCRELEASE ioctl. Closely related is the MHIOCENFAILFAST ioctl -- a 24432 * cluster, just after taking ownership of the disk with the MHIOCTKOWN ioctl 24433 * then issues the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the 24434 * driver. The meaning of failfast is that if the driver (on this host) ever 24435 * encounters the scsi error return code RESERVATION_CONFLICT from the device, 24436 * it should immediately panic the host. The motivation for this ioctl is that 24437 * if this host does encounter reservation conflict, the underlying cause is 24438 * that some other host of the cluster has decided that this host is no longer 24439 * in the cluster and has seized control of the disks for itself. Since this 24440 * host is no longer in the cluster, it ought to panic itself. The 24441 * MHIOCENFAILFAST ioctl does two things: 24442 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT 24443 * error to panic the host 24444 * (b) it sets up a periodic timer to test whether this host still has 24445 * "access" (in that no other host has reserved the device): if the 24446 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The 24447 * purpose of that periodic timer is to handle scenarios where the host is 24448 * otherwise temporarily quiescent, temporarily doing no real i/o. 24449 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host, 24450 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for 24451 * the device itself. 24452 * 24453 * SCSI-3 PGR 24454 * A direct semantic implementation of the SCSI-3 Persistent Reservation 24455 * facility is supported through the shared multihost disk ioctls 24456 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE, 24457 * MHIOCGRP_PREEMPTANDABORT) 24458 * 24459 * Reservation Reclaim: 24460 * -------------------- 24461 * To support the lost reservation reclaim operations this driver creates a 24462 * single thread to handle reinstating reservations on all devices that have 24463 * lost reservations sd_resv_reclaim_requests are logged for all devices that 24464 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb 24465 * and the reservation reclaim thread loops through the requests to regain the 24466 * lost reservations. 24467 */ 24468 24469 /* 24470 * Function: sd_check_mhd() 24471 * 24472 * Description: This function sets up and submits a scsi watch request or 24473 * terminates an existing watch request. This routine is used in 24474 * support of reservation reclaim. 24475 * 24476 * Arguments: dev - the device 'dev_t' is used for context to discriminate 24477 * among multiple watches that share the callback function 24478 * interval - the number of microseconds specifying the watch 24479 * interval for issuing TEST UNIT READY commands. If 24480 * set to 0 the watch should be terminated. If the 24481 * interval is set to 0 and if the device is required 24482 * to hold reservation while disabling failfast, the 24483 * watch is restarted with an interval of 24484 * reinstate_resv_delay. 24485 * 24486 * Return Code: 0 - Successful submit/terminate of scsi watch request 24487 * ENXIO - Indicates an invalid device was specified 24488 * EAGAIN - Unable to submit the scsi watch request 24489 */ 24490 24491 static int 24492 sd_check_mhd(dev_t dev, int interval) 24493 { 24494 struct sd_lun *un; 24495 opaque_t token; 24496 24497 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24498 return (ENXIO); 24499 } 24500 24501 /* is this a watch termination request? */ 24502 if (interval == 0) { 24503 mutex_enter(SD_MUTEX(un)); 24504 /* if there is an existing watch task then terminate it */ 24505 if (un->un_mhd_token) { 24506 token = un->un_mhd_token; 24507 un->un_mhd_token = NULL; 24508 mutex_exit(SD_MUTEX(un)); 24509 (void) scsi_watch_request_terminate(token, 24510 SCSI_WATCH_TERMINATE_ALL_WAIT); 24511 mutex_enter(SD_MUTEX(un)); 24512 } else { 24513 mutex_exit(SD_MUTEX(un)); 24514 /* 24515 * Note: If we return here we don't check for the 24516 * failfast case. This is the original legacy 24517 * implementation but perhaps we should be checking 24518 * the failfast case. 24519 */ 24520 return (0); 24521 } 24522 /* 24523 * If the device is required to hold reservation while 24524 * disabling failfast, we need to restart the scsi_watch 24525 * routine with an interval of reinstate_resv_delay. 24526 */ 24527 if (un->un_resvd_status & SD_RESERVE) { 24528 interval = sd_reinstate_resv_delay/1000; 24529 } else { 24530 /* no failfast so bail */ 24531 mutex_exit(SD_MUTEX(un)); 24532 return (0); 24533 } 24534 mutex_exit(SD_MUTEX(un)); 24535 } 24536 24537 /* 24538 * adjust minimum time interval to 1 second, 24539 * and convert from msecs to usecs 24540 */ 24541 if (interval > 0 && interval < 1000) { 24542 interval = 1000; 24543 } 24544 interval *= 1000; 24545 24546 /* 24547 * submit the request to the scsi_watch service 24548 */ 24549 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval, 24550 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev); 24551 if (token == NULL) { 24552 return (EAGAIN); 24553 } 24554 24555 /* 24556 * save token for termination later on 24557 */ 24558 mutex_enter(SD_MUTEX(un)); 24559 un->un_mhd_token = token; 24560 mutex_exit(SD_MUTEX(un)); 24561 return (0); 24562 } 24563 24564 24565 /* 24566 * Function: sd_mhd_watch_cb() 24567 * 24568 * Description: This function is the call back function used by the scsi watch 24569 * facility. The scsi watch facility sends the "Test Unit Ready" 24570 * and processes the status. If applicable (i.e. a "Unit Attention" 24571 * status and automatic "Request Sense" not used) the scsi watch 24572 * facility will send a "Request Sense" and retrieve the sense data 24573 * to be passed to this callback function. In either case the 24574 * automatic "Request Sense" or the facility submitting one, this 24575 * callback is passed the status and sense data. 24576 * 24577 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24578 * among multiple watches that share this callback function 24579 * resultp - scsi watch facility result packet containing scsi 24580 * packet, status byte and sense data 24581 * 24582 * Return Code: 0 - continue the watch task 24583 * non-zero - terminate the watch task 24584 */ 24585 24586 static int 24587 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 24588 { 24589 struct sd_lun *un; 24590 struct scsi_status *statusp; 24591 uint8_t *sensep; 24592 struct scsi_pkt *pkt; 24593 uchar_t actual_sense_length; 24594 dev_t dev = (dev_t)arg; 24595 24596 ASSERT(resultp != NULL); 24597 statusp = resultp->statusp; 24598 sensep = (uint8_t *)resultp->sensep; 24599 pkt = resultp->pkt; 24600 actual_sense_length = resultp->actual_sense_length; 24601 24602 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24603 return (ENXIO); 24604 } 24605 24606 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24607 "sd_mhd_watch_cb: reason '%s', status '%s'\n", 24608 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp))); 24609 24610 /* Begin processing of the status and/or sense data */ 24611 if (pkt->pkt_reason != CMD_CMPLT) { 24612 /* Handle the incomplete packet */ 24613 sd_mhd_watch_incomplete(un, pkt); 24614 return (0); 24615 } else if (*((unsigned char *)statusp) != STATUS_GOOD) { 24616 if (*((unsigned char *)statusp) 24617 == STATUS_RESERVATION_CONFLICT) { 24618 /* 24619 * Handle a reservation conflict by panicking if 24620 * configured for failfast or by logging the conflict 24621 * and updating the reservation status 24622 */ 24623 mutex_enter(SD_MUTEX(un)); 24624 if ((un->un_resvd_status & SD_FAILFAST) && 24625 (sd_failfast_enable)) { 24626 sd_panic_for_res_conflict(un); 24627 /*NOTREACHED*/ 24628 } 24629 SD_INFO(SD_LOG_IOCTL_MHD, un, 24630 "sd_mhd_watch_cb: Reservation Conflict\n"); 24631 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 24632 mutex_exit(SD_MUTEX(un)); 24633 } 24634 } 24635 24636 if (sensep != NULL) { 24637 if (actual_sense_length >= (SENSE_LENGTH - 2)) { 24638 mutex_enter(SD_MUTEX(un)); 24639 if ((scsi_sense_asc(sensep) == 24640 SD_SCSI_RESET_SENSE_CODE) && 24641 (un->un_resvd_status & SD_RESERVE)) { 24642 /* 24643 * The additional sense code indicates a power 24644 * on or bus device reset has occurred; update 24645 * the reservation status. 24646 */ 24647 un->un_resvd_status |= 24648 (SD_LOST_RESERVE | SD_WANT_RESERVE); 24649 SD_INFO(SD_LOG_IOCTL_MHD, un, 24650 "sd_mhd_watch_cb: Lost Reservation\n"); 24651 } 24652 } else { 24653 return (0); 24654 } 24655 } else { 24656 mutex_enter(SD_MUTEX(un)); 24657 } 24658 24659 if ((un->un_resvd_status & SD_RESERVE) && 24660 (un->un_resvd_status & SD_LOST_RESERVE)) { 24661 if (un->un_resvd_status & SD_WANT_RESERVE) { 24662 /* 24663 * A reset occurred in between the last probe and this 24664 * one so if a timeout is pending cancel it. 24665 */ 24666 if (un->un_resvd_timeid) { 24667 timeout_id_t temp_id = un->un_resvd_timeid; 24668 un->un_resvd_timeid = NULL; 24669 mutex_exit(SD_MUTEX(un)); 24670 (void) untimeout(temp_id); 24671 mutex_enter(SD_MUTEX(un)); 24672 } 24673 un->un_resvd_status &= ~SD_WANT_RESERVE; 24674 } 24675 if (un->un_resvd_timeid == 0) { 24676 /* Schedule a timeout to handle the lost reservation */ 24677 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover, 24678 (void *)dev, 24679 drv_usectohz(sd_reinstate_resv_delay)); 24680 } 24681 } 24682 mutex_exit(SD_MUTEX(un)); 24683 return (0); 24684 } 24685 24686 24687 /* 24688 * Function: sd_mhd_watch_incomplete() 24689 * 24690 * Description: This function is used to find out why a scsi pkt sent by the 24691 * scsi watch facility was not completed. Under some scenarios this 24692 * routine will return. Otherwise it will send a bus reset to see 24693 * if the drive is still online. 24694 * 24695 * Arguments: un - driver soft state (unit) structure 24696 * pkt - incomplete scsi pkt 24697 */ 24698 24699 static void 24700 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt) 24701 { 24702 int be_chatty; 24703 int perr; 24704 24705 ASSERT(pkt != NULL); 24706 ASSERT(un != NULL); 24707 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT)); 24708 perr = (pkt->pkt_statistics & STAT_PERR); 24709 24710 mutex_enter(SD_MUTEX(un)); 24711 if (un->un_state == SD_STATE_DUMPING) { 24712 mutex_exit(SD_MUTEX(un)); 24713 return; 24714 } 24715 24716 switch (pkt->pkt_reason) { 24717 case CMD_UNX_BUS_FREE: 24718 /* 24719 * If we had a parity error that caused the target to drop BSY*, 24720 * don't be chatty about it. 24721 */ 24722 if (perr && be_chatty) { 24723 be_chatty = 0; 24724 } 24725 break; 24726 case CMD_TAG_REJECT: 24727 /* 24728 * The SCSI-2 spec states that a tag reject will be sent by the 24729 * target if tagged queuing is not supported. A tag reject may 24730 * also be sent during certain initialization periods or to 24731 * control internal resources. For the latter case the target 24732 * may also return Queue Full. 24733 * 24734 * If this driver receives a tag reject from a target that is 24735 * going through an init period or controlling internal 24736 * resources tagged queuing will be disabled. This is a less 24737 * than optimal behavior but the driver is unable to determine 24738 * the target state and assumes tagged queueing is not supported 24739 */ 24740 pkt->pkt_flags = 0; 24741 un->un_tagflags = 0; 24742 24743 if (un->un_f_opt_queueing == TRUE) { 24744 un->un_throttle = min(un->un_throttle, 3); 24745 } else { 24746 un->un_throttle = 1; 24747 } 24748 mutex_exit(SD_MUTEX(un)); 24749 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 24750 mutex_enter(SD_MUTEX(un)); 24751 break; 24752 case CMD_INCOMPLETE: 24753 /* 24754 * The transport stopped with an abnormal state, fallthrough and 24755 * reset the target and/or bus unless selection did not complete 24756 * (indicated by STATE_GOT_BUS) in which case we don't want to 24757 * go through a target/bus reset 24758 */ 24759 if (pkt->pkt_state == STATE_GOT_BUS) { 24760 break; 24761 } 24762 /*FALLTHROUGH*/ 24763 24764 case CMD_TIMEOUT: 24765 default: 24766 /* 24767 * The lun may still be running the command, so a lun reset 24768 * should be attempted. If the lun reset fails or cannot be 24769 * issued, than try a target reset. Lastly try a bus reset. 24770 */ 24771 if ((pkt->pkt_statistics & 24772 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 24773 int reset_retval = 0; 24774 mutex_exit(SD_MUTEX(un)); 24775 if (un->un_f_allow_bus_device_reset == TRUE) { 24776 if (un->un_f_lun_reset_enabled == TRUE) { 24777 reset_retval = 24778 scsi_reset(SD_ADDRESS(un), 24779 RESET_LUN); 24780 } 24781 if (reset_retval == 0) { 24782 reset_retval = 24783 scsi_reset(SD_ADDRESS(un), 24784 RESET_TARGET); 24785 } 24786 } 24787 if (reset_retval == 0) { 24788 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 24789 } 24790 mutex_enter(SD_MUTEX(un)); 24791 } 24792 break; 24793 } 24794 24795 /* A device/bus reset has occurred; update the reservation status. */ 24796 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics & 24797 (STAT_BUS_RESET | STAT_DEV_RESET))) { 24798 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 24799 un->un_resvd_status |= 24800 (SD_LOST_RESERVE | SD_WANT_RESERVE); 24801 SD_INFO(SD_LOG_IOCTL_MHD, un, 24802 "sd_mhd_watch_incomplete: Lost Reservation\n"); 24803 } 24804 } 24805 24806 /* 24807 * The disk has been turned off; Update the device state. 24808 * 24809 * Note: Should we be offlining the disk here? 24810 */ 24811 if (pkt->pkt_state == STATE_GOT_BUS) { 24812 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: " 24813 "Disk not responding to selection\n"); 24814 if (un->un_state != SD_STATE_OFFLINE) { 24815 New_state(un, SD_STATE_OFFLINE); 24816 } 24817 } else if (be_chatty) { 24818 /* 24819 * suppress messages if they are all the same pkt reason; 24820 * with TQ, many (up to 256) are returned with the same 24821 * pkt_reason 24822 */ 24823 if (pkt->pkt_reason != un->un_last_pkt_reason) { 24824 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24825 "sd_mhd_watch_incomplete: " 24826 "SCSI transport failed: reason '%s'\n", 24827 scsi_rname(pkt->pkt_reason)); 24828 } 24829 } 24830 un->un_last_pkt_reason = pkt->pkt_reason; 24831 mutex_exit(SD_MUTEX(un)); 24832 } 24833 24834 24835 /* 24836 * Function: sd_sname() 24837 * 24838 * Description: This is a simple little routine to return a string containing 24839 * a printable description of command status byte for use in 24840 * logging. 24841 * 24842 * Arguments: status - pointer to a status byte 24843 * 24844 * Return Code: char * - string containing status description. 24845 */ 24846 24847 static char * 24848 sd_sname(uchar_t status) 24849 { 24850 switch (status & STATUS_MASK) { 24851 case STATUS_GOOD: 24852 return ("good status"); 24853 case STATUS_CHECK: 24854 return ("check condition"); 24855 case STATUS_MET: 24856 return ("condition met"); 24857 case STATUS_BUSY: 24858 return ("busy"); 24859 case STATUS_INTERMEDIATE: 24860 return ("intermediate"); 24861 case STATUS_INTERMEDIATE_MET: 24862 return ("intermediate - condition met"); 24863 case STATUS_RESERVATION_CONFLICT: 24864 return ("reservation_conflict"); 24865 case STATUS_TERMINATED: 24866 return ("command terminated"); 24867 case STATUS_QFULL: 24868 return ("queue full"); 24869 default: 24870 return ("<unknown status>"); 24871 } 24872 } 24873 24874 24875 /* 24876 * Function: sd_mhd_resvd_recover() 24877 * 24878 * Description: This function adds a reservation entry to the 24879 * sd_resv_reclaim_request list and signals the reservation 24880 * reclaim thread that there is work pending. If the reservation 24881 * reclaim thread has not been previously created this function 24882 * will kick it off. 24883 * 24884 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24885 * among multiple watches that share this callback function 24886 * 24887 * Context: This routine is called by timeout() and is run in interrupt 24888 * context. It must not sleep or call other functions which may 24889 * sleep. 24890 */ 24891 24892 static void 24893 sd_mhd_resvd_recover(void *arg) 24894 { 24895 dev_t dev = (dev_t)arg; 24896 struct sd_lun *un; 24897 struct sd_thr_request *sd_treq = NULL; 24898 struct sd_thr_request *sd_cur = NULL; 24899 struct sd_thr_request *sd_prev = NULL; 24900 int already_there = 0; 24901 24902 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24903 return; 24904 } 24905 24906 mutex_enter(SD_MUTEX(un)); 24907 un->un_resvd_timeid = NULL; 24908 if (un->un_resvd_status & SD_WANT_RESERVE) { 24909 /* 24910 * There was a reset so don't issue the reserve, allow the 24911 * sd_mhd_watch_cb callback function to notice this and 24912 * reschedule the timeout for reservation. 24913 */ 24914 mutex_exit(SD_MUTEX(un)); 24915 return; 24916 } 24917 mutex_exit(SD_MUTEX(un)); 24918 24919 /* 24920 * Add this device to the sd_resv_reclaim_request list and the 24921 * sd_resv_reclaim_thread should take care of the rest. 24922 * 24923 * Note: We can't sleep in this context so if the memory allocation 24924 * fails allow the sd_mhd_watch_cb callback function to notice this and 24925 * reschedule the timeout for reservation. (4378460) 24926 */ 24927 sd_treq = (struct sd_thr_request *) 24928 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP); 24929 if (sd_treq == NULL) { 24930 return; 24931 } 24932 24933 sd_treq->sd_thr_req_next = NULL; 24934 sd_treq->dev = dev; 24935 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24936 if (sd_tr.srq_thr_req_head == NULL) { 24937 sd_tr.srq_thr_req_head = sd_treq; 24938 } else { 24939 sd_cur = sd_prev = sd_tr.srq_thr_req_head; 24940 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) { 24941 if (sd_cur->dev == dev) { 24942 /* 24943 * already in Queue so don't log 24944 * another request for the device 24945 */ 24946 already_there = 1; 24947 break; 24948 } 24949 sd_prev = sd_cur; 24950 } 24951 if (!already_there) { 24952 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: " 24953 "logging request for %lx\n", dev); 24954 sd_prev->sd_thr_req_next = sd_treq; 24955 } else { 24956 kmem_free(sd_treq, sizeof (struct sd_thr_request)); 24957 } 24958 } 24959 24960 /* 24961 * Create a kernel thread to do the reservation reclaim and free up this 24962 * thread. We cannot block this thread while we go away to do the 24963 * reservation reclaim 24964 */ 24965 if (sd_tr.srq_resv_reclaim_thread == NULL) 24966 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0, 24967 sd_resv_reclaim_thread, NULL, 24968 0, &p0, TS_RUN, v.v_maxsyspri - 2); 24969 24970 /* Tell the reservation reclaim thread that it has work to do */ 24971 cv_signal(&sd_tr.srq_resv_reclaim_cv); 24972 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24973 } 24974 24975 /* 24976 * Function: sd_resv_reclaim_thread() 24977 * 24978 * Description: This function implements the reservation reclaim operations 24979 * 24980 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24981 * among multiple watches that share this callback function 24982 */ 24983 24984 static void 24985 sd_resv_reclaim_thread() 24986 { 24987 struct sd_lun *un; 24988 struct sd_thr_request *sd_mhreq; 24989 24990 /* Wait for work */ 24991 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24992 if (sd_tr.srq_thr_req_head == NULL) { 24993 cv_wait(&sd_tr.srq_resv_reclaim_cv, 24994 &sd_tr.srq_resv_reclaim_mutex); 24995 } 24996 24997 /* Loop while we have work */ 24998 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) { 24999 un = ddi_get_soft_state(sd_state, 25000 SDUNIT(sd_tr.srq_thr_cur_req->dev)); 25001 if (un == NULL) { 25002 /* 25003 * softstate structure is NULL so just 25004 * dequeue the request and continue 25005 */ 25006 sd_tr.srq_thr_req_head = 25007 sd_tr.srq_thr_cur_req->sd_thr_req_next; 25008 kmem_free(sd_tr.srq_thr_cur_req, 25009 sizeof (struct sd_thr_request)); 25010 continue; 25011 } 25012 25013 /* dequeue the request */ 25014 sd_mhreq = sd_tr.srq_thr_cur_req; 25015 sd_tr.srq_thr_req_head = 25016 sd_tr.srq_thr_cur_req->sd_thr_req_next; 25017 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25018 25019 /* 25020 * Reclaim reservation only if SD_RESERVE is still set. There 25021 * may have been a call to MHIOCRELEASE before we got here. 25022 */ 25023 mutex_enter(SD_MUTEX(un)); 25024 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 25025 /* 25026 * Note: The SD_LOST_RESERVE flag is cleared before 25027 * reclaiming the reservation. If this is done after the 25028 * call to sd_reserve_release a reservation loss in the 25029 * window between pkt completion of reserve cmd and 25030 * mutex_enter below may not be recognized 25031 */ 25032 un->un_resvd_status &= ~SD_LOST_RESERVE; 25033 mutex_exit(SD_MUTEX(un)); 25034 25035 if (sd_reserve_release(sd_mhreq->dev, 25036 SD_RESERVE) == 0) { 25037 mutex_enter(SD_MUTEX(un)); 25038 un->un_resvd_status |= SD_RESERVE; 25039 mutex_exit(SD_MUTEX(un)); 25040 SD_INFO(SD_LOG_IOCTL_MHD, un, 25041 "sd_resv_reclaim_thread: " 25042 "Reservation Recovered\n"); 25043 } else { 25044 mutex_enter(SD_MUTEX(un)); 25045 un->un_resvd_status |= SD_LOST_RESERVE; 25046 mutex_exit(SD_MUTEX(un)); 25047 SD_INFO(SD_LOG_IOCTL_MHD, un, 25048 "sd_resv_reclaim_thread: Failed " 25049 "Reservation Recovery\n"); 25050 } 25051 } else { 25052 mutex_exit(SD_MUTEX(un)); 25053 } 25054 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25055 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req); 25056 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 25057 sd_mhreq = sd_tr.srq_thr_cur_req = NULL; 25058 /* 25059 * wakeup the destroy thread if anyone is waiting on 25060 * us to complete. 25061 */ 25062 cv_signal(&sd_tr.srq_inprocess_cv); 25063 SD_TRACE(SD_LOG_IOCTL_MHD, un, 25064 "sd_resv_reclaim_thread: cv_signalling current request \n"); 25065 } 25066 25067 /* 25068 * cleanup the sd_tr structure now that this thread will not exist 25069 */ 25070 ASSERT(sd_tr.srq_thr_req_head == NULL); 25071 ASSERT(sd_tr.srq_thr_cur_req == NULL); 25072 sd_tr.srq_resv_reclaim_thread = NULL; 25073 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25074 thread_exit(); 25075 } 25076 25077 25078 /* 25079 * Function: sd_rmv_resv_reclaim_req() 25080 * 25081 * Description: This function removes any pending reservation reclaim requests 25082 * for the specified device. 25083 * 25084 * Arguments: dev - the device 'dev_t' 25085 */ 25086 25087 static void 25088 sd_rmv_resv_reclaim_req(dev_t dev) 25089 { 25090 struct sd_thr_request *sd_mhreq; 25091 struct sd_thr_request *sd_prev; 25092 25093 /* Remove a reservation reclaim request from the list */ 25094 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25095 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) { 25096 /* 25097 * We are attempting to reinstate reservation for 25098 * this device. We wait for sd_reserve_release() 25099 * to return before we return. 25100 */ 25101 cv_wait(&sd_tr.srq_inprocess_cv, 25102 &sd_tr.srq_resv_reclaim_mutex); 25103 } else { 25104 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head; 25105 if (sd_mhreq && sd_mhreq->dev == dev) { 25106 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next; 25107 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 25108 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25109 return; 25110 } 25111 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) { 25112 if (sd_mhreq && sd_mhreq->dev == dev) { 25113 break; 25114 } 25115 sd_prev = sd_mhreq; 25116 } 25117 if (sd_mhreq != NULL) { 25118 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next; 25119 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 25120 } 25121 } 25122 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25123 } 25124 25125 25126 /* 25127 * Function: sd_mhd_reset_notify_cb() 25128 * 25129 * Description: This is a call back function for scsi_reset_notify. This 25130 * function updates the softstate reserved status and logs the 25131 * reset. The driver scsi watch facility callback function 25132 * (sd_mhd_watch_cb) and reservation reclaim thread functionality 25133 * will reclaim the reservation. 25134 * 25135 * Arguments: arg - driver soft state (unit) structure 25136 */ 25137 25138 static void 25139 sd_mhd_reset_notify_cb(caddr_t arg) 25140 { 25141 struct sd_lun *un = (struct sd_lun *)arg; 25142 25143 mutex_enter(SD_MUTEX(un)); 25144 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 25145 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE); 25146 SD_INFO(SD_LOG_IOCTL_MHD, un, 25147 "sd_mhd_reset_notify_cb: Lost Reservation\n"); 25148 } 25149 mutex_exit(SD_MUTEX(un)); 25150 } 25151 25152 25153 /* 25154 * Function: sd_take_ownership() 25155 * 25156 * Description: This routine implements an algorithm to achieve a stable 25157 * reservation on disks which don't implement priority reserve, 25158 * and makes sure that other host lose re-reservation attempts. 25159 * This algorithm contains of a loop that keeps issuing the RESERVE 25160 * for some period of time (min_ownership_delay, default 6 seconds) 25161 * During that loop, it looks to see if there has been a bus device 25162 * reset or bus reset (both of which cause an existing reservation 25163 * to be lost). If the reservation is lost issue RESERVE until a 25164 * period of min_ownership_delay with no resets has gone by, or 25165 * until max_ownership_delay has expired. This loop ensures that 25166 * the host really did manage to reserve the device, in spite of 25167 * resets. The looping for min_ownership_delay (default six 25168 * seconds) is important to early generation clustering products, 25169 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an 25170 * MHIOCENFAILFAST periodic timer of two seconds. By having 25171 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having 25172 * MHIOCENFAILFAST poll every two seconds, the idea is that by the 25173 * time the MHIOCTKOWN ioctl returns, the other host (if any) will 25174 * have already noticed, via the MHIOCENFAILFAST polling, that it 25175 * no longer "owns" the disk and will have panicked itself. Thus, 25176 * the host issuing the MHIOCTKOWN is assured (with timing 25177 * dependencies) that by the time it actually starts to use the 25178 * disk for real work, the old owner is no longer accessing it. 25179 * 25180 * min_ownership_delay is the minimum amount of time for which the 25181 * disk must be reserved continuously devoid of resets before the 25182 * MHIOCTKOWN ioctl will return success. 25183 * 25184 * max_ownership_delay indicates the amount of time by which the 25185 * take ownership should succeed or timeout with an error. 25186 * 25187 * Arguments: dev - the device 'dev_t' 25188 * *p - struct containing timing info. 25189 * 25190 * Return Code: 0 for success or error code 25191 */ 25192 25193 static int 25194 sd_take_ownership(dev_t dev, struct mhioctkown *p) 25195 { 25196 struct sd_lun *un; 25197 int rval; 25198 int err; 25199 int reservation_count = 0; 25200 int min_ownership_delay = 6000000; /* in usec */ 25201 int max_ownership_delay = 30000000; /* in usec */ 25202 clock_t start_time; /* starting time of this algorithm */ 25203 clock_t end_time; /* time limit for giving up */ 25204 clock_t ownership_time; /* time limit for stable ownership */ 25205 clock_t current_time; 25206 clock_t previous_current_time; 25207 25208 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25209 return (ENXIO); 25210 } 25211 25212 /* 25213 * Attempt a device reservation. A priority reservation is requested. 25214 */ 25215 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE)) 25216 != SD_SUCCESS) { 25217 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25218 "sd_take_ownership: return(1)=%d\n", rval); 25219 return (rval); 25220 } 25221 25222 /* Update the softstate reserved status to indicate the reservation */ 25223 mutex_enter(SD_MUTEX(un)); 25224 un->un_resvd_status |= SD_RESERVE; 25225 un->un_resvd_status &= 25226 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT); 25227 mutex_exit(SD_MUTEX(un)); 25228 25229 if (p != NULL) { 25230 if (p->min_ownership_delay != 0) { 25231 min_ownership_delay = p->min_ownership_delay * 1000; 25232 } 25233 if (p->max_ownership_delay != 0) { 25234 max_ownership_delay = p->max_ownership_delay * 1000; 25235 } 25236 } 25237 SD_INFO(SD_LOG_IOCTL_MHD, un, 25238 "sd_take_ownership: min, max delays: %d, %d\n", 25239 min_ownership_delay, max_ownership_delay); 25240 25241 start_time = ddi_get_lbolt(); 25242 current_time = start_time; 25243 ownership_time = current_time + drv_usectohz(min_ownership_delay); 25244 end_time = start_time + drv_usectohz(max_ownership_delay); 25245 25246 while (current_time - end_time < 0) { 25247 delay(drv_usectohz(500000)); 25248 25249 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) { 25250 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) { 25251 mutex_enter(SD_MUTEX(un)); 25252 rval = (un->un_resvd_status & 25253 SD_RESERVATION_CONFLICT) ? EACCES : EIO; 25254 mutex_exit(SD_MUTEX(un)); 25255 break; 25256 } 25257 } 25258 previous_current_time = current_time; 25259 current_time = ddi_get_lbolt(); 25260 mutex_enter(SD_MUTEX(un)); 25261 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) { 25262 ownership_time = ddi_get_lbolt() + 25263 drv_usectohz(min_ownership_delay); 25264 reservation_count = 0; 25265 } else { 25266 reservation_count++; 25267 } 25268 un->un_resvd_status |= SD_RESERVE; 25269 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE); 25270 mutex_exit(SD_MUTEX(un)); 25271 25272 SD_INFO(SD_LOG_IOCTL_MHD, un, 25273 "sd_take_ownership: ticks for loop iteration=%ld, " 25274 "reservation=%s\n", (current_time - previous_current_time), 25275 reservation_count ? "ok" : "reclaimed"); 25276 25277 if (current_time - ownership_time >= 0 && 25278 reservation_count >= 4) { 25279 rval = 0; /* Achieved a stable ownership */ 25280 break; 25281 } 25282 if (current_time - end_time >= 0) { 25283 rval = EACCES; /* No ownership in max possible time */ 25284 break; 25285 } 25286 } 25287 SD_TRACE(SD_LOG_IOCTL_MHD, un, 25288 "sd_take_ownership: return(2)=%d\n", rval); 25289 return (rval); 25290 } 25291 25292 25293 /* 25294 * Function: sd_reserve_release() 25295 * 25296 * Description: This function builds and sends scsi RESERVE, RELEASE, and 25297 * PRIORITY RESERVE commands based on a user specified command type 25298 * 25299 * Arguments: dev - the device 'dev_t' 25300 * cmd - user specified command type; one of SD_PRIORITY_RESERVE, 25301 * SD_RESERVE, SD_RELEASE 25302 * 25303 * Return Code: 0 or Error Code 25304 */ 25305 25306 static int 25307 sd_reserve_release(dev_t dev, int cmd) 25308 { 25309 struct uscsi_cmd *com = NULL; 25310 struct sd_lun *un = NULL; 25311 char cdb[CDB_GROUP0]; 25312 int rval; 25313 25314 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) || 25315 (cmd == SD_PRIORITY_RESERVE)); 25316 25317 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25318 return (ENXIO); 25319 } 25320 25321 /* instantiate and initialize the command and cdb */ 25322 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25323 bzero(cdb, CDB_GROUP0); 25324 com->uscsi_flags = USCSI_SILENT; 25325 com->uscsi_timeout = un->un_reserve_release_time; 25326 com->uscsi_cdblen = CDB_GROUP0; 25327 com->uscsi_cdb = cdb; 25328 if (cmd == SD_RELEASE) { 25329 cdb[0] = SCMD_RELEASE; 25330 } else { 25331 cdb[0] = SCMD_RESERVE; 25332 } 25333 25334 /* Send the command. */ 25335 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25336 SD_PATH_STANDARD); 25337 25338 /* 25339 * "break" a reservation that is held by another host, by issuing a 25340 * reset if priority reserve is desired, and we could not get the 25341 * device. 25342 */ 25343 if ((cmd == SD_PRIORITY_RESERVE) && 25344 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 25345 /* 25346 * First try to reset the LUN. If we cannot, then try a target 25347 * reset, followed by a bus reset if the target reset fails. 25348 */ 25349 int reset_retval = 0; 25350 if (un->un_f_lun_reset_enabled == TRUE) { 25351 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 25352 } 25353 if (reset_retval == 0) { 25354 /* The LUN reset either failed or was not issued */ 25355 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 25356 } 25357 if ((reset_retval == 0) && 25358 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) { 25359 rval = EIO; 25360 kmem_free(com, sizeof (*com)); 25361 return (rval); 25362 } 25363 25364 bzero(com, sizeof (struct uscsi_cmd)); 25365 com->uscsi_flags = USCSI_SILENT; 25366 com->uscsi_cdb = cdb; 25367 com->uscsi_cdblen = CDB_GROUP0; 25368 com->uscsi_timeout = 5; 25369 25370 /* 25371 * Reissue the last reserve command, this time without request 25372 * sense. Assume that it is just a regular reserve command. 25373 */ 25374 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25375 SD_PATH_STANDARD); 25376 } 25377 25378 /* Return an error if still getting a reservation conflict. */ 25379 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 25380 rval = EACCES; 25381 } 25382 25383 kmem_free(com, sizeof (*com)); 25384 return (rval); 25385 } 25386 25387 25388 #define SD_NDUMP_RETRIES 12 25389 /* 25390 * System Crash Dump routine 25391 */ 25392 25393 static int 25394 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 25395 { 25396 int instance; 25397 int partition; 25398 int i; 25399 int err; 25400 struct sd_lun *un; 25401 struct scsi_pkt *wr_pktp; 25402 struct buf *wr_bp; 25403 struct buf wr_buf; 25404 daddr_t tgt_byte_offset; /* rmw - byte offset for target */ 25405 daddr_t tgt_blkno; /* rmw - blkno for target */ 25406 size_t tgt_byte_count; /* rmw - # of bytes to xfer */ 25407 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */ 25408 size_t io_start_offset; 25409 int doing_rmw = FALSE; 25410 int rval; 25411 ssize_t dma_resid; 25412 daddr_t oblkno; 25413 diskaddr_t nblks = 0; 25414 diskaddr_t start_block; 25415 25416 instance = SDUNIT(dev); 25417 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 25418 !SD_IS_VALID_LABEL(un) || ISCD(un)) { 25419 return (ENXIO); 25420 } 25421 25422 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 25423 25424 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); 25425 25426 partition = SDPART(dev); 25427 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition); 25428 25429 if (!(NOT_DEVBSIZE(un))) { 25430 int secmask = 0; 25431 int blknomask = 0; 25432 25433 blknomask = (un->un_tgt_blocksize / DEV_BSIZE) - 1; 25434 secmask = un->un_tgt_blocksize - 1; 25435 25436 if (blkno & blknomask) { 25437 SD_TRACE(SD_LOG_DUMP, un, 25438 "sddump: dump start block not modulo %d\n", 25439 un->un_tgt_blocksize); 25440 return (EINVAL); 25441 } 25442 25443 if ((nblk * DEV_BSIZE) & secmask) { 25444 SD_TRACE(SD_LOG_DUMP, un, 25445 "sddump: dump length not modulo %d\n", 25446 un->un_tgt_blocksize); 25447 return (EINVAL); 25448 } 25449 25450 } 25451 25452 /* Validate blocks to dump at against partition size. */ 25453 25454 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 25455 &nblks, &start_block, NULL, NULL, (void *)SD_PATH_DIRECT); 25456 25457 if (NOT_DEVBSIZE(un)) { 25458 if ((blkno + nblk) > nblks) { 25459 SD_TRACE(SD_LOG_DUMP, un, 25460 "sddump: dump range larger than partition: " 25461 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 25462 blkno, nblk, nblks); 25463 return (EINVAL); 25464 } 25465 } else { 25466 if (((blkno / (un->un_tgt_blocksize / DEV_BSIZE)) + 25467 (nblk / (un->un_tgt_blocksize / DEV_BSIZE))) > nblks) { 25468 SD_TRACE(SD_LOG_DUMP, un, 25469 "sddump: dump range larger than partition: " 25470 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 25471 blkno, nblk, nblks); 25472 return (EINVAL); 25473 } 25474 } 25475 25476 mutex_enter(&un->un_pm_mutex); 25477 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 25478 struct scsi_pkt *start_pktp; 25479 25480 mutex_exit(&un->un_pm_mutex); 25481 25482 /* 25483 * use pm framework to power on HBA 1st 25484 */ 25485 (void) pm_raise_power(SD_DEVINFO(un), 0, 25486 SD_PM_STATE_ACTIVE(un)); 25487 25488 /* 25489 * Dump no long uses sdpower to power on a device, it's 25490 * in-line here so it can be done in polled mode. 25491 */ 25492 25493 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n"); 25494 25495 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL, 25496 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL); 25497 25498 if (start_pktp == NULL) { 25499 /* We were not given a SCSI packet, fail. */ 25500 return (EIO); 25501 } 25502 bzero(start_pktp->pkt_cdbp, CDB_GROUP0); 25503 start_pktp->pkt_cdbp[0] = SCMD_START_STOP; 25504 start_pktp->pkt_cdbp[4] = SD_TARGET_START; 25505 start_pktp->pkt_flags = FLAG_NOINTR; 25506 25507 mutex_enter(SD_MUTEX(un)); 25508 SD_FILL_SCSI1_LUN(un, start_pktp); 25509 mutex_exit(SD_MUTEX(un)); 25510 /* 25511 * Scsi_poll returns 0 (success) if the command completes and 25512 * the status block is STATUS_GOOD. 25513 */ 25514 if (sd_scsi_poll(un, start_pktp) != 0) { 25515 scsi_destroy_pkt(start_pktp); 25516 return (EIO); 25517 } 25518 scsi_destroy_pkt(start_pktp); 25519 (void) sd_pm_state_change(un, SD_PM_STATE_ACTIVE(un), 25520 SD_PM_STATE_CHANGE); 25521 } else { 25522 mutex_exit(&un->un_pm_mutex); 25523 } 25524 25525 mutex_enter(SD_MUTEX(un)); 25526 un->un_throttle = 0; 25527 25528 /* 25529 * The first time through, reset the specific target device. 25530 * However, when cpr calls sddump we know that sd is in a 25531 * a good state so no bus reset is required. 25532 * Clear sense data via Request Sense cmd. 25533 * In sddump we don't care about allow_bus_device_reset anymore 25534 */ 25535 25536 if ((un->un_state != SD_STATE_SUSPENDED) && 25537 (un->un_state != SD_STATE_DUMPING)) { 25538 25539 New_state(un, SD_STATE_DUMPING); 25540 25541 if (un->un_f_is_fibre == FALSE) { 25542 mutex_exit(SD_MUTEX(un)); 25543 /* 25544 * Attempt a bus reset for parallel scsi. 25545 * 25546 * Note: A bus reset is required because on some host 25547 * systems (i.e. E420R) a bus device reset is 25548 * insufficient to reset the state of the target. 25549 * 25550 * Note: Don't issue the reset for fibre-channel, 25551 * because this tends to hang the bus (loop) for 25552 * too long while everyone is logging out and in 25553 * and the deadman timer for dumping will fire 25554 * before the dump is complete. 25555 */ 25556 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) { 25557 mutex_enter(SD_MUTEX(un)); 25558 Restore_state(un); 25559 mutex_exit(SD_MUTEX(un)); 25560 return (EIO); 25561 } 25562 25563 /* Delay to give the device some recovery time. */ 25564 drv_usecwait(10000); 25565 25566 if (sd_send_polled_RQS(un) == SD_FAILURE) { 25567 SD_INFO(SD_LOG_DUMP, un, 25568 "sddump: sd_send_polled_RQS failed\n"); 25569 } 25570 mutex_enter(SD_MUTEX(un)); 25571 } 25572 } 25573 25574 /* 25575 * Convert the partition-relative block number to a 25576 * disk physical block number. 25577 */ 25578 if (NOT_DEVBSIZE(un)) { 25579 blkno += start_block; 25580 } else { 25581 blkno = blkno / (un->un_tgt_blocksize / DEV_BSIZE); 25582 blkno += start_block; 25583 } 25584 25585 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno); 25586 25587 25588 /* 25589 * Check if the device has a non-512 block size. 25590 */ 25591 wr_bp = NULL; 25592 if (NOT_DEVBSIZE(un)) { 25593 tgt_byte_offset = blkno * un->un_sys_blocksize; 25594 tgt_byte_count = nblk * un->un_sys_blocksize; 25595 if ((tgt_byte_offset % un->un_tgt_blocksize) || 25596 (tgt_byte_count % un->un_tgt_blocksize)) { 25597 doing_rmw = TRUE; 25598 /* 25599 * Calculate the block number and number of block 25600 * in terms of the media block size. 25601 */ 25602 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 25603 tgt_nblk = 25604 ((tgt_byte_offset + tgt_byte_count + 25605 (un->un_tgt_blocksize - 1)) / 25606 un->un_tgt_blocksize) - tgt_blkno; 25607 25608 /* 25609 * Invoke the routine which is going to do read part 25610 * of read-modify-write. 25611 * Note that this routine returns a pointer to 25612 * a valid bp in wr_bp. 25613 */ 25614 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk, 25615 &wr_bp); 25616 if (err) { 25617 mutex_exit(SD_MUTEX(un)); 25618 return (err); 25619 } 25620 /* 25621 * Offset is being calculated as - 25622 * (original block # * system block size) - 25623 * (new block # * target block size) 25624 */ 25625 io_start_offset = 25626 ((uint64_t)(blkno * un->un_sys_blocksize)) - 25627 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize)); 25628 25629 ASSERT((io_start_offset >= 0) && 25630 (io_start_offset < un->un_tgt_blocksize)); 25631 /* 25632 * Do the modify portion of read modify write. 25633 */ 25634 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset], 25635 (size_t)nblk * un->un_sys_blocksize); 25636 } else { 25637 doing_rmw = FALSE; 25638 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 25639 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize; 25640 } 25641 25642 /* Convert blkno and nblk to target blocks */ 25643 blkno = tgt_blkno; 25644 nblk = tgt_nblk; 25645 } else { 25646 wr_bp = &wr_buf; 25647 bzero(wr_bp, sizeof (struct buf)); 25648 wr_bp->b_flags = B_BUSY; 25649 wr_bp->b_un.b_addr = addr; 25650 wr_bp->b_bcount = nblk << DEV_BSHIFT; 25651 wr_bp->b_resid = 0; 25652 } 25653 25654 mutex_exit(SD_MUTEX(un)); 25655 25656 /* 25657 * Obtain a SCSI packet for the write command. 25658 * It should be safe to call the allocator here without 25659 * worrying about being locked for DVMA mapping because 25660 * the address we're passed is already a DVMA mapping 25661 * 25662 * We are also not going to worry about semaphore ownership 25663 * in the dump buffer. Dumping is single threaded at present. 25664 */ 25665 25666 wr_pktp = NULL; 25667 25668 dma_resid = wr_bp->b_bcount; 25669 oblkno = blkno; 25670 25671 if (!(NOT_DEVBSIZE(un))) { 25672 nblk = nblk / (un->un_tgt_blocksize / DEV_BSIZE); 25673 } 25674 25675 while (dma_resid != 0) { 25676 25677 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 25678 wr_bp->b_flags &= ~B_ERROR; 25679 25680 if (un->un_partial_dma_supported == 1) { 25681 blkno = oblkno + 25682 ((wr_bp->b_bcount - dma_resid) / 25683 un->un_tgt_blocksize); 25684 nblk = dma_resid / un->un_tgt_blocksize; 25685 25686 if (wr_pktp) { 25687 /* 25688 * Partial DMA transfers after initial transfer 25689 */ 25690 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp, 25691 blkno, nblk); 25692 } else { 25693 /* Initial transfer */ 25694 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 25695 un->un_pkt_flags, NULL_FUNC, NULL, 25696 blkno, nblk); 25697 } 25698 } else { 25699 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 25700 0, NULL_FUNC, NULL, blkno, nblk); 25701 } 25702 25703 if (rval == 0) { 25704 /* We were given a SCSI packet, continue. */ 25705 break; 25706 } 25707 25708 if (i == 0) { 25709 if (wr_bp->b_flags & B_ERROR) { 25710 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25711 "no resources for dumping; " 25712 "error code: 0x%x, retrying", 25713 geterror(wr_bp)); 25714 } else { 25715 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25716 "no resources for dumping; retrying"); 25717 } 25718 } else if (i != (SD_NDUMP_RETRIES - 1)) { 25719 if (wr_bp->b_flags & B_ERROR) { 25720 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 25721 "no resources for dumping; error code: " 25722 "0x%x, retrying\n", geterror(wr_bp)); 25723 } 25724 } else { 25725 if (wr_bp->b_flags & B_ERROR) { 25726 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 25727 "no resources for dumping; " 25728 "error code: 0x%x, retries failed, " 25729 "giving up.\n", geterror(wr_bp)); 25730 } else { 25731 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 25732 "no resources for dumping; " 25733 "retries failed, giving up.\n"); 25734 } 25735 mutex_enter(SD_MUTEX(un)); 25736 Restore_state(un); 25737 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) { 25738 mutex_exit(SD_MUTEX(un)); 25739 scsi_free_consistent_buf(wr_bp); 25740 } else { 25741 mutex_exit(SD_MUTEX(un)); 25742 } 25743 return (EIO); 25744 } 25745 drv_usecwait(10000); 25746 } 25747 25748 if (un->un_partial_dma_supported == 1) { 25749 /* 25750 * save the resid from PARTIAL_DMA 25751 */ 25752 dma_resid = wr_pktp->pkt_resid; 25753 if (dma_resid != 0) 25754 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid); 25755 wr_pktp->pkt_resid = 0; 25756 } else { 25757 dma_resid = 0; 25758 } 25759 25760 /* SunBug 1222170 */ 25761 wr_pktp->pkt_flags = FLAG_NOINTR; 25762 25763 err = EIO; 25764 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 25765 25766 /* 25767 * Scsi_poll returns 0 (success) if the command completes and 25768 * the status block is STATUS_GOOD. We should only check 25769 * errors if this condition is not true. Even then we should 25770 * send our own request sense packet only if we have a check 25771 * condition and auto request sense has not been performed by 25772 * the hba. 25773 */ 25774 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n"); 25775 25776 if ((sd_scsi_poll(un, wr_pktp) == 0) && 25777 (wr_pktp->pkt_resid == 0)) { 25778 err = SD_SUCCESS; 25779 break; 25780 } 25781 25782 /* 25783 * Check CMD_DEV_GONE 1st, give up if device is gone. 25784 */ 25785 if (wr_pktp->pkt_reason == CMD_DEV_GONE) { 25786 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25787 "Error while dumping state...Device is gone\n"); 25788 break; 25789 } 25790 25791 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) { 25792 SD_INFO(SD_LOG_DUMP, un, 25793 "sddump: write failed with CHECK, try # %d\n", i); 25794 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) { 25795 (void) sd_send_polled_RQS(un); 25796 } 25797 25798 continue; 25799 } 25800 25801 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) { 25802 int reset_retval = 0; 25803 25804 SD_INFO(SD_LOG_DUMP, un, 25805 "sddump: write failed with BUSY, try # %d\n", i); 25806 25807 if (un->un_f_lun_reset_enabled == TRUE) { 25808 reset_retval = scsi_reset(SD_ADDRESS(un), 25809 RESET_LUN); 25810 } 25811 if (reset_retval == 0) { 25812 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 25813 } 25814 (void) sd_send_polled_RQS(un); 25815 25816 } else { 25817 SD_INFO(SD_LOG_DUMP, un, 25818 "sddump: write failed with 0x%x, try # %d\n", 25819 SD_GET_PKT_STATUS(wr_pktp), i); 25820 mutex_enter(SD_MUTEX(un)); 25821 sd_reset_target(un, wr_pktp); 25822 mutex_exit(SD_MUTEX(un)); 25823 } 25824 25825 /* 25826 * If we are not getting anywhere with lun/target resets, 25827 * let's reset the bus. 25828 */ 25829 if (i == SD_NDUMP_RETRIES/2) { 25830 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 25831 (void) sd_send_polled_RQS(un); 25832 } 25833 } 25834 } 25835 25836 scsi_destroy_pkt(wr_pktp); 25837 mutex_enter(SD_MUTEX(un)); 25838 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) { 25839 mutex_exit(SD_MUTEX(un)); 25840 scsi_free_consistent_buf(wr_bp); 25841 } else { 25842 mutex_exit(SD_MUTEX(un)); 25843 } 25844 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err); 25845 return (err); 25846 } 25847 25848 /* 25849 * Function: sd_scsi_poll() 25850 * 25851 * Description: This is a wrapper for the scsi_poll call. 25852 * 25853 * Arguments: sd_lun - The unit structure 25854 * scsi_pkt - The scsi packet being sent to the device. 25855 * 25856 * Return Code: 0 - Command completed successfully with good status 25857 * -1 - Command failed. This could indicate a check condition 25858 * or other status value requiring recovery action. 25859 * 25860 * NOTE: This code is only called off sddump(). 25861 */ 25862 25863 static int 25864 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp) 25865 { 25866 int status; 25867 25868 ASSERT(un != NULL); 25869 ASSERT(!mutex_owned(SD_MUTEX(un))); 25870 ASSERT(pktp != NULL); 25871 25872 status = SD_SUCCESS; 25873 25874 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) { 25875 pktp->pkt_flags |= un->un_tagflags; 25876 pktp->pkt_flags &= ~FLAG_NODISCON; 25877 } 25878 25879 status = sd_ddi_scsi_poll(pktp); 25880 /* 25881 * Scsi_poll returns 0 (success) if the command completes and the 25882 * status block is STATUS_GOOD. We should only check errors if this 25883 * condition is not true. Even then we should send our own request 25884 * sense packet only if we have a check condition and auto 25885 * request sense has not been performed by the hba. 25886 * Don't get RQS data if pkt_reason is CMD_DEV_GONE. 25887 */ 25888 if ((status != SD_SUCCESS) && 25889 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) && 25890 (pktp->pkt_state & STATE_ARQ_DONE) == 0 && 25891 (pktp->pkt_reason != CMD_DEV_GONE)) 25892 (void) sd_send_polled_RQS(un); 25893 25894 return (status); 25895 } 25896 25897 /* 25898 * Function: sd_send_polled_RQS() 25899 * 25900 * Description: This sends the request sense command to a device. 25901 * 25902 * Arguments: sd_lun - The unit structure 25903 * 25904 * Return Code: 0 - Command completed successfully with good status 25905 * -1 - Command failed. 25906 * 25907 */ 25908 25909 static int 25910 sd_send_polled_RQS(struct sd_lun *un) 25911 { 25912 int ret_val; 25913 struct scsi_pkt *rqs_pktp; 25914 struct buf *rqs_bp; 25915 25916 ASSERT(un != NULL); 25917 ASSERT(!mutex_owned(SD_MUTEX(un))); 25918 25919 ret_val = SD_SUCCESS; 25920 25921 rqs_pktp = un->un_rqs_pktp; 25922 rqs_bp = un->un_rqs_bp; 25923 25924 mutex_enter(SD_MUTEX(un)); 25925 25926 if (un->un_sense_isbusy) { 25927 ret_val = SD_FAILURE; 25928 mutex_exit(SD_MUTEX(un)); 25929 return (ret_val); 25930 } 25931 25932 /* 25933 * If the request sense buffer (and packet) is not in use, 25934 * let's set the un_sense_isbusy and send our packet 25935 */ 25936 un->un_sense_isbusy = 1; 25937 rqs_pktp->pkt_resid = 0; 25938 rqs_pktp->pkt_reason = 0; 25939 rqs_pktp->pkt_flags |= FLAG_NOINTR; 25940 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH); 25941 25942 mutex_exit(SD_MUTEX(un)); 25943 25944 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at" 25945 " 0x%p\n", rqs_bp->b_un.b_addr); 25946 25947 /* 25948 * Can't send this to sd_scsi_poll, we wrap ourselves around the 25949 * axle - it has a call into us! 25950 */ 25951 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) { 25952 SD_INFO(SD_LOG_COMMON, un, 25953 "sd_send_polled_RQS: RQS failed\n"); 25954 } 25955 25956 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:", 25957 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX); 25958 25959 mutex_enter(SD_MUTEX(un)); 25960 un->un_sense_isbusy = 0; 25961 mutex_exit(SD_MUTEX(un)); 25962 25963 return (ret_val); 25964 } 25965 25966 /* 25967 * Defines needed for localized version of the scsi_poll routine. 25968 */ 25969 #define CSEC 10000 /* usecs */ 25970 #define SEC_TO_CSEC (1000000/CSEC) 25971 25972 /* 25973 * Function: sd_ddi_scsi_poll() 25974 * 25975 * Description: Localized version of the scsi_poll routine. The purpose is to 25976 * send a scsi_pkt to a device as a polled command. This version 25977 * is to ensure more robust handling of transport errors. 25978 * Specifically this routine cures not ready, coming ready 25979 * transition for power up and reset of sonoma's. This can take 25980 * up to 45 seconds for power-on and 20 seconds for reset of a 25981 * sonoma lun. 25982 * 25983 * Arguments: scsi_pkt - The scsi_pkt being sent to a device 25984 * 25985 * Return Code: 0 - Command completed successfully with good status 25986 * -1 - Command failed. 25987 * 25988 * NOTE: This code is almost identical to scsi_poll, however before 6668774 can 25989 * be fixed (removing this code), we need to determine how to handle the 25990 * KEY_UNIT_ATTENTION condition below in conditions not as limited as sddump(). 25991 * 25992 * NOTE: This code is only called off sddump(). 25993 */ 25994 static int 25995 sd_ddi_scsi_poll(struct scsi_pkt *pkt) 25996 { 25997 int rval = -1; 25998 int savef; 25999 long savet; 26000 void (*savec)(); 26001 int timeout; 26002 int busy_count; 26003 int poll_delay; 26004 int rc; 26005 uint8_t *sensep; 26006 struct scsi_arq_status *arqstat; 26007 extern int do_polled_io; 26008 26009 ASSERT(pkt->pkt_scbp); 26010 26011 /* 26012 * save old flags.. 26013 */ 26014 savef = pkt->pkt_flags; 26015 savec = pkt->pkt_comp; 26016 savet = pkt->pkt_time; 26017 26018 pkt->pkt_flags |= FLAG_NOINTR; 26019 26020 /* 26021 * XXX there is nothing in the SCSA spec that states that we should not 26022 * do a callback for polled cmds; however, removing this will break sd 26023 * and probably other target drivers 26024 */ 26025 pkt->pkt_comp = NULL; 26026 26027 /* 26028 * we don't like a polled command without timeout. 26029 * 60 seconds seems long enough. 26030 */ 26031 if (pkt->pkt_time == 0) 26032 pkt->pkt_time = SCSI_POLL_TIMEOUT; 26033 26034 /* 26035 * Send polled cmd. 26036 * 26037 * We do some error recovery for various errors. Tran_busy, 26038 * queue full, and non-dispatched commands are retried every 10 msec. 26039 * as they are typically transient failures. Busy status and Not 26040 * Ready are retried every second as this status takes a while to 26041 * change. 26042 */ 26043 timeout = pkt->pkt_time * SEC_TO_CSEC; 26044 26045 for (busy_count = 0; busy_count < timeout; busy_count++) { 26046 /* 26047 * Initialize pkt status variables. 26048 */ 26049 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0; 26050 26051 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) { 26052 if (rc != TRAN_BUSY) { 26053 /* Transport failed - give up. */ 26054 break; 26055 } else { 26056 /* Transport busy - try again. */ 26057 poll_delay = 1 * CSEC; /* 10 msec. */ 26058 } 26059 } else { 26060 /* 26061 * Transport accepted - check pkt status. 26062 */ 26063 rc = (*pkt->pkt_scbp) & STATUS_MASK; 26064 if ((pkt->pkt_reason == CMD_CMPLT) && 26065 (rc == STATUS_CHECK) && 26066 (pkt->pkt_state & STATE_ARQ_DONE)) { 26067 arqstat = 26068 (struct scsi_arq_status *)(pkt->pkt_scbp); 26069 sensep = (uint8_t *)&arqstat->sts_sensedata; 26070 } else { 26071 sensep = NULL; 26072 } 26073 26074 if ((pkt->pkt_reason == CMD_CMPLT) && 26075 (rc == STATUS_GOOD)) { 26076 /* No error - we're done */ 26077 rval = 0; 26078 break; 26079 26080 } else if (pkt->pkt_reason == CMD_DEV_GONE) { 26081 /* Lost connection - give up */ 26082 break; 26083 26084 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) && 26085 (pkt->pkt_state == 0)) { 26086 /* Pkt not dispatched - try again. */ 26087 poll_delay = 1 * CSEC; /* 10 msec. */ 26088 26089 } else if ((pkt->pkt_reason == CMD_CMPLT) && 26090 (rc == STATUS_QFULL)) { 26091 /* Queue full - try again. */ 26092 poll_delay = 1 * CSEC; /* 10 msec. */ 26093 26094 } else if ((pkt->pkt_reason == CMD_CMPLT) && 26095 (rc == STATUS_BUSY)) { 26096 /* Busy - try again. */ 26097 poll_delay = 100 * CSEC; /* 1 sec. */ 26098 busy_count += (SEC_TO_CSEC - 1); 26099 26100 } else if ((sensep != NULL) && 26101 (scsi_sense_key(sensep) == KEY_UNIT_ATTENTION)) { 26102 /* 26103 * Unit Attention - try again. 26104 * Pretend it took 1 sec. 26105 * NOTE: 'continue' avoids poll_delay 26106 */ 26107 busy_count += (SEC_TO_CSEC - 1); 26108 continue; 26109 26110 } else if ((sensep != NULL) && 26111 (scsi_sense_key(sensep) == KEY_NOT_READY) && 26112 (scsi_sense_asc(sensep) == 0x04) && 26113 (scsi_sense_ascq(sensep) == 0x01)) { 26114 /* 26115 * Not ready -> ready - try again. 26116 * 04h/01h: LUN IS IN PROCESS OF BECOMING READY 26117 * ...same as STATUS_BUSY 26118 */ 26119 poll_delay = 100 * CSEC; /* 1 sec. */ 26120 busy_count += (SEC_TO_CSEC - 1); 26121 26122 } else { 26123 /* BAD status - give up. */ 26124 break; 26125 } 26126 } 26127 26128 if (((curthread->t_flag & T_INTR_THREAD) == 0) && 26129 !do_polled_io) { 26130 delay(drv_usectohz(poll_delay)); 26131 } else { 26132 /* we busy wait during cpr_dump or interrupt threads */ 26133 drv_usecwait(poll_delay); 26134 } 26135 } 26136 26137 pkt->pkt_flags = savef; 26138 pkt->pkt_comp = savec; 26139 pkt->pkt_time = savet; 26140 26141 /* return on error */ 26142 if (rval) 26143 return (rval); 26144 26145 /* 26146 * This is not a performance critical code path. 26147 * 26148 * As an accommodation for scsi_poll callers, to avoid ddi_dma_sync() 26149 * issues associated with looking at DMA memory prior to 26150 * scsi_pkt_destroy(), we scsi_sync_pkt() prior to return. 26151 */ 26152 scsi_sync_pkt(pkt); 26153 return (0); 26154 } 26155 26156 26157 26158 /* 26159 * Function: sd_persistent_reservation_in_read_keys 26160 * 26161 * Description: This routine is the driver entry point for handling CD-ROM 26162 * multi-host persistent reservation requests (MHIOCGRP_INKEYS) 26163 * by sending the SCSI-3 PRIN commands to the device. 26164 * Processes the read keys command response by copying the 26165 * reservation key information into the user provided buffer. 26166 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented. 26167 * 26168 * Arguments: un - Pointer to soft state struct for the target. 26169 * usrp - user provided pointer to multihost Persistent In Read 26170 * Keys structure (mhioc_inkeys_t) 26171 * flag - this argument is a pass through to ddi_copyxxx() 26172 * directly from the mode argument of ioctl(). 26173 * 26174 * Return Code: 0 - Success 26175 * EACCES 26176 * ENOTSUP 26177 * errno return code from sd_send_scsi_cmd() 26178 * 26179 * Context: Can sleep. Does not return until command is completed. 26180 */ 26181 26182 static int 26183 sd_persistent_reservation_in_read_keys(struct sd_lun *un, 26184 mhioc_inkeys_t *usrp, int flag) 26185 { 26186 #ifdef _MULTI_DATAMODEL 26187 struct mhioc_key_list32 li32; 26188 #endif 26189 sd_prin_readkeys_t *in; 26190 mhioc_inkeys_t *ptr; 26191 mhioc_key_list_t li; 26192 uchar_t *data_bufp; 26193 int data_len; 26194 int rval = 0; 26195 size_t copysz; 26196 sd_ssc_t *ssc; 26197 26198 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) { 26199 return (EINVAL); 26200 } 26201 bzero(&li, sizeof (mhioc_key_list_t)); 26202 26203 ssc = sd_ssc_init(un); 26204 26205 /* 26206 * Get the listsize from user 26207 */ 26208 #ifdef _MULTI_DATAMODEL 26209 26210 switch (ddi_model_convert_from(flag & FMODELS)) { 26211 case DDI_MODEL_ILP32: 26212 copysz = sizeof (struct mhioc_key_list32); 26213 if (ddi_copyin(ptr->li, &li32, copysz, flag)) { 26214 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26215 "sd_persistent_reservation_in_read_keys: " 26216 "failed ddi_copyin: mhioc_key_list32_t\n"); 26217 rval = EFAULT; 26218 goto done; 26219 } 26220 li.listsize = li32.listsize; 26221 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list; 26222 break; 26223 26224 case DDI_MODEL_NONE: 26225 copysz = sizeof (mhioc_key_list_t); 26226 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 26227 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26228 "sd_persistent_reservation_in_read_keys: " 26229 "failed ddi_copyin: mhioc_key_list_t\n"); 26230 rval = EFAULT; 26231 goto done; 26232 } 26233 break; 26234 } 26235 26236 #else /* ! _MULTI_DATAMODEL */ 26237 copysz = sizeof (mhioc_key_list_t); 26238 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 26239 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26240 "sd_persistent_reservation_in_read_keys: " 26241 "failed ddi_copyin: mhioc_key_list_t\n"); 26242 rval = EFAULT; 26243 goto done; 26244 } 26245 #endif 26246 26247 data_len = li.listsize * MHIOC_RESV_KEY_SIZE; 26248 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t)); 26249 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 26250 26251 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS, 26252 data_len, data_bufp); 26253 if (rval != 0) { 26254 if (rval == EIO) 26255 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 26256 else 26257 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 26258 goto done; 26259 } 26260 in = (sd_prin_readkeys_t *)data_bufp; 26261 ptr->generation = BE_32(in->generation); 26262 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE; 26263 26264 /* 26265 * Return the min(listsize, listlen) keys 26266 */ 26267 #ifdef _MULTI_DATAMODEL 26268 26269 switch (ddi_model_convert_from(flag & FMODELS)) { 26270 case DDI_MODEL_ILP32: 26271 li32.listlen = li.listlen; 26272 if (ddi_copyout(&li32, ptr->li, copysz, flag)) { 26273 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26274 "sd_persistent_reservation_in_read_keys: " 26275 "failed ddi_copyout: mhioc_key_list32_t\n"); 26276 rval = EFAULT; 26277 goto done; 26278 } 26279 break; 26280 26281 case DDI_MODEL_NONE: 26282 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 26283 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26284 "sd_persistent_reservation_in_read_keys: " 26285 "failed ddi_copyout: mhioc_key_list_t\n"); 26286 rval = EFAULT; 26287 goto done; 26288 } 26289 break; 26290 } 26291 26292 #else /* ! _MULTI_DATAMODEL */ 26293 26294 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 26295 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26296 "sd_persistent_reservation_in_read_keys: " 26297 "failed ddi_copyout: mhioc_key_list_t\n"); 26298 rval = EFAULT; 26299 goto done; 26300 } 26301 26302 #endif /* _MULTI_DATAMODEL */ 26303 26304 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE, 26305 li.listsize * MHIOC_RESV_KEY_SIZE); 26306 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) { 26307 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26308 "sd_persistent_reservation_in_read_keys: " 26309 "failed ddi_copyout: keylist\n"); 26310 rval = EFAULT; 26311 } 26312 done: 26313 sd_ssc_fini(ssc); 26314 kmem_free(data_bufp, data_len); 26315 return (rval); 26316 } 26317 26318 26319 /* 26320 * Function: sd_persistent_reservation_in_read_resv 26321 * 26322 * Description: This routine is the driver entry point for handling CD-ROM 26323 * multi-host persistent reservation requests (MHIOCGRP_INRESV) 26324 * by sending the SCSI-3 PRIN commands to the device. 26325 * Process the read persistent reservations command response by 26326 * copying the reservation information into the user provided 26327 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented. 26328 * 26329 * Arguments: un - Pointer to soft state struct for the target. 26330 * usrp - user provided pointer to multihost Persistent In Read 26331 * Keys structure (mhioc_inkeys_t) 26332 * flag - this argument is a pass through to ddi_copyxxx() 26333 * directly from the mode argument of ioctl(). 26334 * 26335 * Return Code: 0 - Success 26336 * EACCES 26337 * ENOTSUP 26338 * errno return code from sd_send_scsi_cmd() 26339 * 26340 * Context: Can sleep. Does not return until command is completed. 26341 */ 26342 26343 static int 26344 sd_persistent_reservation_in_read_resv(struct sd_lun *un, 26345 mhioc_inresvs_t *usrp, int flag) 26346 { 26347 #ifdef _MULTI_DATAMODEL 26348 struct mhioc_resv_desc_list32 resvlist32; 26349 #endif 26350 sd_prin_readresv_t *in; 26351 mhioc_inresvs_t *ptr; 26352 sd_readresv_desc_t *readresv_ptr; 26353 mhioc_resv_desc_list_t resvlist; 26354 mhioc_resv_desc_t resvdesc; 26355 uchar_t *data_bufp = NULL; 26356 int data_len; 26357 int rval = 0; 26358 int i; 26359 size_t copysz; 26360 mhioc_resv_desc_t *bufp; 26361 sd_ssc_t *ssc; 26362 26363 if ((ptr = usrp) == NULL) { 26364 return (EINVAL); 26365 } 26366 26367 ssc = sd_ssc_init(un); 26368 26369 /* 26370 * Get the listsize from user 26371 */ 26372 #ifdef _MULTI_DATAMODEL 26373 switch (ddi_model_convert_from(flag & FMODELS)) { 26374 case DDI_MODEL_ILP32: 26375 copysz = sizeof (struct mhioc_resv_desc_list32); 26376 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) { 26377 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26378 "sd_persistent_reservation_in_read_resv: " 26379 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26380 rval = EFAULT; 26381 goto done; 26382 } 26383 resvlist.listsize = resvlist32.listsize; 26384 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list; 26385 break; 26386 26387 case DDI_MODEL_NONE: 26388 copysz = sizeof (mhioc_resv_desc_list_t); 26389 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 26390 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26391 "sd_persistent_reservation_in_read_resv: " 26392 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26393 rval = EFAULT; 26394 goto done; 26395 } 26396 break; 26397 } 26398 #else /* ! _MULTI_DATAMODEL */ 26399 copysz = sizeof (mhioc_resv_desc_list_t); 26400 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 26401 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26402 "sd_persistent_reservation_in_read_resv: " 26403 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26404 rval = EFAULT; 26405 goto done; 26406 } 26407 #endif /* ! _MULTI_DATAMODEL */ 26408 26409 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN; 26410 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t)); 26411 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 26412 26413 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_RESV, 26414 data_len, data_bufp); 26415 if (rval != 0) { 26416 if (rval == EIO) 26417 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 26418 else 26419 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 26420 goto done; 26421 } 26422 in = (sd_prin_readresv_t *)data_bufp; 26423 ptr->generation = BE_32(in->generation); 26424 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN; 26425 26426 /* 26427 * Return the min(listsize, listlen( keys 26428 */ 26429 #ifdef _MULTI_DATAMODEL 26430 26431 switch (ddi_model_convert_from(flag & FMODELS)) { 26432 case DDI_MODEL_ILP32: 26433 resvlist32.listlen = resvlist.listlen; 26434 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) { 26435 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26436 "sd_persistent_reservation_in_read_resv: " 26437 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26438 rval = EFAULT; 26439 goto done; 26440 } 26441 break; 26442 26443 case DDI_MODEL_NONE: 26444 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 26445 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26446 "sd_persistent_reservation_in_read_resv: " 26447 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26448 rval = EFAULT; 26449 goto done; 26450 } 26451 break; 26452 } 26453 26454 #else /* ! _MULTI_DATAMODEL */ 26455 26456 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 26457 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26458 "sd_persistent_reservation_in_read_resv: " 26459 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26460 rval = EFAULT; 26461 goto done; 26462 } 26463 26464 #endif /* ! _MULTI_DATAMODEL */ 26465 26466 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc; 26467 bufp = resvlist.list; 26468 copysz = sizeof (mhioc_resv_desc_t); 26469 for (i = 0; i < min(resvlist.listlen, resvlist.listsize); 26470 i++, readresv_ptr++, bufp++) { 26471 26472 bcopy(&readresv_ptr->resvkey, &resvdesc.key, 26473 MHIOC_RESV_KEY_SIZE); 26474 resvdesc.type = readresv_ptr->type; 26475 resvdesc.scope = readresv_ptr->scope; 26476 resvdesc.scope_specific_addr = 26477 BE_32(readresv_ptr->scope_specific_addr); 26478 26479 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) { 26480 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26481 "sd_persistent_reservation_in_read_resv: " 26482 "failed ddi_copyout: resvlist\n"); 26483 rval = EFAULT; 26484 goto done; 26485 } 26486 } 26487 done: 26488 sd_ssc_fini(ssc); 26489 /* only if data_bufp is allocated, we need to free it */ 26490 if (data_bufp) { 26491 kmem_free(data_bufp, data_len); 26492 } 26493 return (rval); 26494 } 26495 26496 26497 /* 26498 * Function: sr_change_blkmode() 26499 * 26500 * Description: This routine is the driver entry point for handling CD-ROM 26501 * block mode ioctl requests. Support for returning and changing 26502 * the current block size in use by the device is implemented. The 26503 * LBA size is changed via a MODE SELECT Block Descriptor. 26504 * 26505 * This routine issues a mode sense with an allocation length of 26506 * 12 bytes for the mode page header and a single block descriptor. 26507 * 26508 * Arguments: dev - the device 'dev_t' 26509 * cmd - the request type; one of CDROMGBLKMODE (get) or 26510 * CDROMSBLKMODE (set) 26511 * data - current block size or requested block size 26512 * flag - this argument is a pass through to ddi_copyxxx() directly 26513 * from the mode argument of ioctl(). 26514 * 26515 * Return Code: the code returned by sd_send_scsi_cmd() 26516 * EINVAL if invalid arguments are provided 26517 * EFAULT if ddi_copyxxx() fails 26518 * ENXIO if fail ddi_get_soft_state 26519 * EIO if invalid mode sense block descriptor length 26520 * 26521 */ 26522 26523 static int 26524 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag) 26525 { 26526 struct sd_lun *un = NULL; 26527 struct mode_header *sense_mhp, *select_mhp; 26528 struct block_descriptor *sense_desc, *select_desc; 26529 int current_bsize; 26530 int rval = EINVAL; 26531 uchar_t *sense = NULL; 26532 uchar_t *select = NULL; 26533 sd_ssc_t *ssc; 26534 26535 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE)); 26536 26537 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26538 return (ENXIO); 26539 } 26540 26541 /* 26542 * The block length is changed via the Mode Select block descriptor, the 26543 * "Read/Write Error Recovery" mode page (0x1) contents are not actually 26544 * required as part of this routine. Therefore the mode sense allocation 26545 * length is specified to be the length of a mode page header and a 26546 * block descriptor. 26547 */ 26548 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 26549 26550 ssc = sd_ssc_init(un); 26551 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 26552 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD); 26553 sd_ssc_fini(ssc); 26554 if (rval != 0) { 26555 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26556 "sr_change_blkmode: Mode Sense Failed\n"); 26557 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26558 return (rval); 26559 } 26560 26561 /* Check the block descriptor len to handle only 1 block descriptor */ 26562 sense_mhp = (struct mode_header *)sense; 26563 if ((sense_mhp->bdesc_length == 0) || 26564 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) { 26565 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26566 "sr_change_blkmode: Mode Sense returned invalid block" 26567 " descriptor length\n"); 26568 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26569 return (EIO); 26570 } 26571 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH); 26572 current_bsize = ((sense_desc->blksize_hi << 16) | 26573 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo); 26574 26575 /* Process command */ 26576 switch (cmd) { 26577 case CDROMGBLKMODE: 26578 /* Return the block size obtained during the mode sense */ 26579 if (ddi_copyout(¤t_bsize, (void *)data, 26580 sizeof (int), flag) != 0) 26581 rval = EFAULT; 26582 break; 26583 case CDROMSBLKMODE: 26584 /* Validate the requested block size */ 26585 switch (data) { 26586 case CDROM_BLK_512: 26587 case CDROM_BLK_1024: 26588 case CDROM_BLK_2048: 26589 case CDROM_BLK_2056: 26590 case CDROM_BLK_2336: 26591 case CDROM_BLK_2340: 26592 case CDROM_BLK_2352: 26593 case CDROM_BLK_2368: 26594 case CDROM_BLK_2448: 26595 case CDROM_BLK_2646: 26596 case CDROM_BLK_2647: 26597 break; 26598 default: 26599 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26600 "sr_change_blkmode: " 26601 "Block Size '%ld' Not Supported\n", data); 26602 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26603 return (EINVAL); 26604 } 26605 26606 /* 26607 * The current block size matches the requested block size so 26608 * there is no need to send the mode select to change the size 26609 */ 26610 if (current_bsize == data) { 26611 break; 26612 } 26613 26614 /* Build the select data for the requested block size */ 26615 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 26616 select_mhp = (struct mode_header *)select; 26617 select_desc = 26618 (struct block_descriptor *)(select + MODE_HEADER_LENGTH); 26619 /* 26620 * The LBA size is changed via the block descriptor, so the 26621 * descriptor is built according to the user data 26622 */ 26623 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH; 26624 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16); 26625 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8); 26626 select_desc->blksize_lo = (char)((data) & 0x000000ff); 26627 26628 /* Send the mode select for the requested block size */ 26629 ssc = sd_ssc_init(un); 26630 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, 26631 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 26632 SD_PATH_STANDARD); 26633 sd_ssc_fini(ssc); 26634 if (rval != 0) { 26635 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26636 "sr_change_blkmode: Mode Select Failed\n"); 26637 /* 26638 * The mode select failed for the requested block size, 26639 * so reset the data for the original block size and 26640 * send it to the target. The error is indicated by the 26641 * return value for the failed mode select. 26642 */ 26643 select_desc->blksize_hi = sense_desc->blksize_hi; 26644 select_desc->blksize_mid = sense_desc->blksize_mid; 26645 select_desc->blksize_lo = sense_desc->blksize_lo; 26646 ssc = sd_ssc_init(un); 26647 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, 26648 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 26649 SD_PATH_STANDARD); 26650 sd_ssc_fini(ssc); 26651 } else { 26652 ASSERT(!mutex_owned(SD_MUTEX(un))); 26653 mutex_enter(SD_MUTEX(un)); 26654 sd_update_block_info(un, (uint32_t)data, 0); 26655 mutex_exit(SD_MUTEX(un)); 26656 } 26657 break; 26658 default: 26659 /* should not reach here, but check anyway */ 26660 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26661 "sr_change_blkmode: Command '%x' Not Supported\n", cmd); 26662 rval = EINVAL; 26663 break; 26664 } 26665 26666 if (select) { 26667 kmem_free(select, BUFLEN_CHG_BLK_MODE); 26668 } 26669 if (sense) { 26670 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26671 } 26672 return (rval); 26673 } 26674 26675 26676 /* 26677 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines 26678 * implement driver support for getting and setting the CD speed. The command 26679 * set used will be based on the device type. If the device has not been 26680 * identified as MMC the Toshiba vendor specific mode page will be used. If 26681 * the device is MMC but does not support the Real Time Streaming feature 26682 * the SET CD SPEED command will be used to set speed and mode page 0x2A will 26683 * be used to read the speed. 26684 */ 26685 26686 /* 26687 * Function: sr_change_speed() 26688 * 26689 * Description: This routine is the driver entry point for handling CD-ROM 26690 * drive speed ioctl requests for devices supporting the Toshiba 26691 * vendor specific drive speed mode page. Support for returning 26692 * and changing the current drive speed in use by the device is 26693 * implemented. 26694 * 26695 * Arguments: dev - the device 'dev_t' 26696 * cmd - the request type; one of CDROMGDRVSPEED (get) or 26697 * CDROMSDRVSPEED (set) 26698 * data - current drive speed or requested drive speed 26699 * flag - this argument is a pass through to ddi_copyxxx() directly 26700 * from the mode argument of ioctl(). 26701 * 26702 * Return Code: the code returned by sd_send_scsi_cmd() 26703 * EINVAL if invalid arguments are provided 26704 * EFAULT if ddi_copyxxx() fails 26705 * ENXIO if fail ddi_get_soft_state 26706 * EIO if invalid mode sense block descriptor length 26707 */ 26708 26709 static int 26710 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 26711 { 26712 struct sd_lun *un = NULL; 26713 struct mode_header *sense_mhp, *select_mhp; 26714 struct mode_speed *sense_page, *select_page; 26715 int current_speed; 26716 int rval = EINVAL; 26717 int bd_len; 26718 uchar_t *sense = NULL; 26719 uchar_t *select = NULL; 26720 sd_ssc_t *ssc; 26721 26722 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 26723 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26724 return (ENXIO); 26725 } 26726 26727 /* 26728 * Note: The drive speed is being modified here according to a Toshiba 26729 * vendor specific mode page (0x31). 26730 */ 26731 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 26732 26733 ssc = sd_ssc_init(un); 26734 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 26735 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED, 26736 SD_PATH_STANDARD); 26737 sd_ssc_fini(ssc); 26738 if (rval != 0) { 26739 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26740 "sr_change_speed: Mode Sense Failed\n"); 26741 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26742 return (rval); 26743 } 26744 sense_mhp = (struct mode_header *)sense; 26745 26746 /* Check the block descriptor len to handle only 1 block descriptor */ 26747 bd_len = sense_mhp->bdesc_length; 26748 if (bd_len > MODE_BLK_DESC_LENGTH) { 26749 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26750 "sr_change_speed: Mode Sense returned invalid block " 26751 "descriptor length\n"); 26752 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26753 return (EIO); 26754 } 26755 26756 sense_page = (struct mode_speed *) 26757 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 26758 current_speed = sense_page->speed; 26759 26760 /* Process command */ 26761 switch (cmd) { 26762 case CDROMGDRVSPEED: 26763 /* Return the drive speed obtained during the mode sense */ 26764 if (current_speed == 0x2) { 26765 current_speed = CDROM_TWELVE_SPEED; 26766 } 26767 if (ddi_copyout(¤t_speed, (void *)data, 26768 sizeof (int), flag) != 0) { 26769 rval = EFAULT; 26770 } 26771 break; 26772 case CDROMSDRVSPEED: 26773 /* Validate the requested drive speed */ 26774 switch ((uchar_t)data) { 26775 case CDROM_TWELVE_SPEED: 26776 data = 0x2; 26777 /*FALLTHROUGH*/ 26778 case CDROM_NORMAL_SPEED: 26779 case CDROM_DOUBLE_SPEED: 26780 case CDROM_QUAD_SPEED: 26781 case CDROM_MAXIMUM_SPEED: 26782 break; 26783 default: 26784 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26785 "sr_change_speed: " 26786 "Drive Speed '%d' Not Supported\n", (uchar_t)data); 26787 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26788 return (EINVAL); 26789 } 26790 26791 /* 26792 * The current drive speed matches the requested drive speed so 26793 * there is no need to send the mode select to change the speed 26794 */ 26795 if (current_speed == data) { 26796 break; 26797 } 26798 26799 /* Build the select data for the requested drive speed */ 26800 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 26801 select_mhp = (struct mode_header *)select; 26802 select_mhp->bdesc_length = 0; 26803 select_page = 26804 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 26805 select_page = 26806 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 26807 select_page->mode_page.code = CDROM_MODE_SPEED; 26808 select_page->mode_page.length = 2; 26809 select_page->speed = (uchar_t)data; 26810 26811 /* Send the mode select for the requested block size */ 26812 ssc = sd_ssc_init(un); 26813 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 26814 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 26815 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 26816 sd_ssc_fini(ssc); 26817 if (rval != 0) { 26818 /* 26819 * The mode select failed for the requested drive speed, 26820 * so reset the data for the original drive speed and 26821 * send it to the target. The error is indicated by the 26822 * return value for the failed mode select. 26823 */ 26824 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26825 "sr_drive_speed: Mode Select Failed\n"); 26826 select_page->speed = sense_page->speed; 26827 ssc = sd_ssc_init(un); 26828 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 26829 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 26830 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 26831 sd_ssc_fini(ssc); 26832 } 26833 break; 26834 default: 26835 /* should not reach here, but check anyway */ 26836 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26837 "sr_change_speed: Command '%x' Not Supported\n", cmd); 26838 rval = EINVAL; 26839 break; 26840 } 26841 26842 if (select) { 26843 kmem_free(select, BUFLEN_MODE_CDROM_SPEED); 26844 } 26845 if (sense) { 26846 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26847 } 26848 26849 return (rval); 26850 } 26851 26852 26853 /* 26854 * Function: sr_atapi_change_speed() 26855 * 26856 * Description: This routine is the driver entry point for handling CD-ROM 26857 * drive speed ioctl requests for MMC devices that do not support 26858 * the Real Time Streaming feature (0x107). 26859 * 26860 * Note: This routine will use the SET SPEED command which may not 26861 * be supported by all devices. 26862 * 26863 * Arguments: dev- the device 'dev_t' 26864 * cmd- the request type; one of CDROMGDRVSPEED (get) or 26865 * CDROMSDRVSPEED (set) 26866 * data- current drive speed or requested drive speed 26867 * flag- this argument is a pass through to ddi_copyxxx() directly 26868 * from the mode argument of ioctl(). 26869 * 26870 * Return Code: the code returned by sd_send_scsi_cmd() 26871 * EINVAL if invalid arguments are provided 26872 * EFAULT if ddi_copyxxx() fails 26873 * ENXIO if fail ddi_get_soft_state 26874 * EIO if invalid mode sense block descriptor length 26875 */ 26876 26877 static int 26878 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 26879 { 26880 struct sd_lun *un; 26881 struct uscsi_cmd *com = NULL; 26882 struct mode_header_grp2 *sense_mhp; 26883 uchar_t *sense_page; 26884 uchar_t *sense = NULL; 26885 char cdb[CDB_GROUP5]; 26886 int bd_len; 26887 int current_speed = 0; 26888 int max_speed = 0; 26889 int rval; 26890 sd_ssc_t *ssc; 26891 26892 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 26893 26894 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26895 return (ENXIO); 26896 } 26897 26898 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 26899 26900 ssc = sd_ssc_init(un); 26901 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, 26902 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, 26903 SD_PATH_STANDARD); 26904 sd_ssc_fini(ssc); 26905 if (rval != 0) { 26906 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26907 "sr_atapi_change_speed: Mode Sense Failed\n"); 26908 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26909 return (rval); 26910 } 26911 26912 /* Check the block descriptor len to handle only 1 block descriptor */ 26913 sense_mhp = (struct mode_header_grp2 *)sense; 26914 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 26915 if (bd_len > MODE_BLK_DESC_LENGTH) { 26916 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26917 "sr_atapi_change_speed: Mode Sense returned invalid " 26918 "block descriptor length\n"); 26919 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26920 return (EIO); 26921 } 26922 26923 /* Calculate the current and maximum drive speeds */ 26924 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 26925 current_speed = (sense_page[14] << 8) | sense_page[15]; 26926 max_speed = (sense_page[8] << 8) | sense_page[9]; 26927 26928 /* Process the command */ 26929 switch (cmd) { 26930 case CDROMGDRVSPEED: 26931 current_speed /= SD_SPEED_1X; 26932 if (ddi_copyout(¤t_speed, (void *)data, 26933 sizeof (int), flag) != 0) 26934 rval = EFAULT; 26935 break; 26936 case CDROMSDRVSPEED: 26937 /* Convert the speed code to KB/sec */ 26938 switch ((uchar_t)data) { 26939 case CDROM_NORMAL_SPEED: 26940 current_speed = SD_SPEED_1X; 26941 break; 26942 case CDROM_DOUBLE_SPEED: 26943 current_speed = 2 * SD_SPEED_1X; 26944 break; 26945 case CDROM_QUAD_SPEED: 26946 current_speed = 4 * SD_SPEED_1X; 26947 break; 26948 case CDROM_TWELVE_SPEED: 26949 current_speed = 12 * SD_SPEED_1X; 26950 break; 26951 case CDROM_MAXIMUM_SPEED: 26952 current_speed = 0xffff; 26953 break; 26954 default: 26955 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26956 "sr_atapi_change_speed: invalid drive speed %d\n", 26957 (uchar_t)data); 26958 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26959 return (EINVAL); 26960 } 26961 26962 /* Check the request against the drive's max speed. */ 26963 if (current_speed != 0xffff) { 26964 if (current_speed > max_speed) { 26965 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26966 return (EINVAL); 26967 } 26968 } 26969 26970 /* 26971 * Build and send the SET SPEED command 26972 * 26973 * Note: The SET SPEED (0xBB) command used in this routine is 26974 * obsolete per the SCSI MMC spec but still supported in the 26975 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 26976 * therefore the command is still implemented in this routine. 26977 */ 26978 bzero(cdb, sizeof (cdb)); 26979 cdb[0] = (char)SCMD_SET_CDROM_SPEED; 26980 cdb[2] = (uchar_t)(current_speed >> 8); 26981 cdb[3] = (uchar_t)current_speed; 26982 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26983 com->uscsi_cdb = (caddr_t)cdb; 26984 com->uscsi_cdblen = CDB_GROUP5; 26985 com->uscsi_bufaddr = NULL; 26986 com->uscsi_buflen = 0; 26987 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26988 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, 0, SD_PATH_STANDARD); 26989 break; 26990 default: 26991 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26992 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd); 26993 rval = EINVAL; 26994 } 26995 26996 if (sense) { 26997 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26998 } 26999 if (com) { 27000 kmem_free(com, sizeof (*com)); 27001 } 27002 return (rval); 27003 } 27004 27005 27006 /* 27007 * Function: sr_pause_resume() 27008 * 27009 * Description: This routine is the driver entry point for handling CD-ROM 27010 * pause/resume ioctl requests. This only affects the audio play 27011 * operation. 27012 * 27013 * Arguments: dev - the device 'dev_t' 27014 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used 27015 * for setting the resume bit of the cdb. 27016 * 27017 * Return Code: the code returned by sd_send_scsi_cmd() 27018 * EINVAL if invalid mode specified 27019 * 27020 */ 27021 27022 static int 27023 sr_pause_resume(dev_t dev, int cmd) 27024 { 27025 struct sd_lun *un; 27026 struct uscsi_cmd *com; 27027 char cdb[CDB_GROUP1]; 27028 int rval; 27029 27030 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27031 return (ENXIO); 27032 } 27033 27034 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27035 bzero(cdb, CDB_GROUP1); 27036 cdb[0] = SCMD_PAUSE_RESUME; 27037 switch (cmd) { 27038 case CDROMRESUME: 27039 cdb[8] = 1; 27040 break; 27041 case CDROMPAUSE: 27042 cdb[8] = 0; 27043 break; 27044 default: 27045 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:" 27046 " Command '%x' Not Supported\n", cmd); 27047 rval = EINVAL; 27048 goto done; 27049 } 27050 27051 com->uscsi_cdb = cdb; 27052 com->uscsi_cdblen = CDB_GROUP1; 27053 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 27054 27055 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27056 SD_PATH_STANDARD); 27057 27058 done: 27059 kmem_free(com, sizeof (*com)); 27060 return (rval); 27061 } 27062 27063 27064 /* 27065 * Function: sr_play_msf() 27066 * 27067 * Description: This routine is the driver entry point for handling CD-ROM 27068 * ioctl requests to output the audio signals at the specified 27069 * starting address and continue the audio play until the specified 27070 * ending address (CDROMPLAYMSF) The address is in Minute Second 27071 * Frame (MSF) format. 27072 * 27073 * Arguments: dev - the device 'dev_t' 27074 * data - pointer to user provided audio msf structure, 27075 * specifying start/end addresses. 27076 * flag - this argument is a pass through to ddi_copyxxx() 27077 * directly from the mode argument of ioctl(). 27078 * 27079 * Return Code: the code returned by sd_send_scsi_cmd() 27080 * EFAULT if ddi_copyxxx() fails 27081 * ENXIO if fail ddi_get_soft_state 27082 * EINVAL if data pointer is NULL 27083 */ 27084 27085 static int 27086 sr_play_msf(dev_t dev, caddr_t data, int flag) 27087 { 27088 struct sd_lun *un; 27089 struct uscsi_cmd *com; 27090 struct cdrom_msf msf_struct; 27091 struct cdrom_msf *msf = &msf_struct; 27092 char cdb[CDB_GROUP1]; 27093 int rval; 27094 27095 if (data == NULL) { 27096 return (EINVAL); 27097 } 27098 27099 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27100 return (ENXIO); 27101 } 27102 27103 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) { 27104 return (EFAULT); 27105 } 27106 27107 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27108 bzero(cdb, CDB_GROUP1); 27109 cdb[0] = SCMD_PLAYAUDIO_MSF; 27110 if (un->un_f_cfg_playmsf_bcd == TRUE) { 27111 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0); 27112 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0); 27113 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0); 27114 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1); 27115 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1); 27116 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1); 27117 } else { 27118 cdb[3] = msf->cdmsf_min0; 27119 cdb[4] = msf->cdmsf_sec0; 27120 cdb[5] = msf->cdmsf_frame0; 27121 cdb[6] = msf->cdmsf_min1; 27122 cdb[7] = msf->cdmsf_sec1; 27123 cdb[8] = msf->cdmsf_frame1; 27124 } 27125 com->uscsi_cdb = cdb; 27126 com->uscsi_cdblen = CDB_GROUP1; 27127 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 27128 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27129 SD_PATH_STANDARD); 27130 kmem_free(com, sizeof (*com)); 27131 return (rval); 27132 } 27133 27134 27135 /* 27136 * Function: sr_play_trkind() 27137 * 27138 * Description: This routine is the driver entry point for handling CD-ROM 27139 * ioctl requests to output the audio signals at the specified 27140 * starting address and continue the audio play until the specified 27141 * ending address (CDROMPLAYTRKIND). The address is in Track Index 27142 * format. 27143 * 27144 * Arguments: dev - the device 'dev_t' 27145 * data - pointer to user provided audio track/index structure, 27146 * specifying start/end addresses. 27147 * flag - this argument is a pass through to ddi_copyxxx() 27148 * directly from the mode argument of ioctl(). 27149 * 27150 * Return Code: the code returned by sd_send_scsi_cmd() 27151 * EFAULT if ddi_copyxxx() fails 27152 * ENXIO if fail ddi_get_soft_state 27153 * EINVAL if data pointer is NULL 27154 */ 27155 27156 static int 27157 sr_play_trkind(dev_t dev, caddr_t data, int flag) 27158 { 27159 struct cdrom_ti ti_struct; 27160 struct cdrom_ti *ti = &ti_struct; 27161 struct uscsi_cmd *com = NULL; 27162 char cdb[CDB_GROUP1]; 27163 int rval; 27164 27165 if (data == NULL) { 27166 return (EINVAL); 27167 } 27168 27169 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) { 27170 return (EFAULT); 27171 } 27172 27173 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27174 bzero(cdb, CDB_GROUP1); 27175 cdb[0] = SCMD_PLAYAUDIO_TI; 27176 cdb[4] = ti->cdti_trk0; 27177 cdb[5] = ti->cdti_ind0; 27178 cdb[7] = ti->cdti_trk1; 27179 cdb[8] = ti->cdti_ind1; 27180 com->uscsi_cdb = cdb; 27181 com->uscsi_cdblen = CDB_GROUP1; 27182 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 27183 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27184 SD_PATH_STANDARD); 27185 kmem_free(com, sizeof (*com)); 27186 return (rval); 27187 } 27188 27189 27190 /* 27191 * Function: sr_read_all_subcodes() 27192 * 27193 * Description: This routine is the driver entry point for handling CD-ROM 27194 * ioctl requests to return raw subcode data while the target is 27195 * playing audio (CDROMSUBCODE). 27196 * 27197 * Arguments: dev - the device 'dev_t' 27198 * data - pointer to user provided cdrom subcode structure, 27199 * specifying the transfer length and address. 27200 * flag - this argument is a pass through to ddi_copyxxx() 27201 * directly from the mode argument of ioctl(). 27202 * 27203 * Return Code: the code returned by sd_send_scsi_cmd() 27204 * EFAULT if ddi_copyxxx() fails 27205 * ENXIO if fail ddi_get_soft_state 27206 * EINVAL if data pointer is NULL 27207 */ 27208 27209 static int 27210 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag) 27211 { 27212 struct sd_lun *un = NULL; 27213 struct uscsi_cmd *com = NULL; 27214 struct cdrom_subcode *subcode = NULL; 27215 int rval; 27216 size_t buflen; 27217 char cdb[CDB_GROUP5]; 27218 27219 #ifdef _MULTI_DATAMODEL 27220 /* To support ILP32 applications in an LP64 world */ 27221 struct cdrom_subcode32 cdrom_subcode32; 27222 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32; 27223 #endif 27224 if (data == NULL) { 27225 return (EINVAL); 27226 } 27227 27228 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27229 return (ENXIO); 27230 } 27231 27232 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP); 27233 27234 #ifdef _MULTI_DATAMODEL 27235 switch (ddi_model_convert_from(flag & FMODELS)) { 27236 case DDI_MODEL_ILP32: 27237 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) { 27238 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27239 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27240 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27241 return (EFAULT); 27242 } 27243 /* Convert the ILP32 uscsi data from the application to LP64 */ 27244 cdrom_subcode32tocdrom_subcode(cdsc32, subcode); 27245 break; 27246 case DDI_MODEL_NONE: 27247 if (ddi_copyin(data, subcode, 27248 sizeof (struct cdrom_subcode), flag)) { 27249 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27250 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27251 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27252 return (EFAULT); 27253 } 27254 break; 27255 } 27256 #else /* ! _MULTI_DATAMODEL */ 27257 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) { 27258 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27259 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27260 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27261 return (EFAULT); 27262 } 27263 #endif /* _MULTI_DATAMODEL */ 27264 27265 /* 27266 * Since MMC-2 expects max 3 bytes for length, check if the 27267 * length input is greater than 3 bytes 27268 */ 27269 if ((subcode->cdsc_length & 0xFF000000) != 0) { 27270 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27271 "sr_read_all_subcodes: " 27272 "cdrom transfer length too large: %d (limit %d)\n", 27273 subcode->cdsc_length, 0xFFFFFF); 27274 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27275 return (EINVAL); 27276 } 27277 27278 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length; 27279 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27280 bzero(cdb, CDB_GROUP5); 27281 27282 if (un->un_f_mmc_cap == TRUE) { 27283 cdb[0] = (char)SCMD_READ_CD; 27284 cdb[2] = (char)0xff; 27285 cdb[3] = (char)0xff; 27286 cdb[4] = (char)0xff; 27287 cdb[5] = (char)0xff; 27288 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 27289 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 27290 cdb[8] = ((subcode->cdsc_length) & 0x000000ff); 27291 cdb[10] = 1; 27292 } else { 27293 /* 27294 * Note: A vendor specific command (0xDF) is being used her to 27295 * request a read of all subcodes. 27296 */ 27297 cdb[0] = (char)SCMD_READ_ALL_SUBCODES; 27298 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24); 27299 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 27300 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 27301 cdb[9] = ((subcode->cdsc_length) & 0x000000ff); 27302 } 27303 com->uscsi_cdb = cdb; 27304 com->uscsi_cdblen = CDB_GROUP5; 27305 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr; 27306 com->uscsi_buflen = buflen; 27307 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27308 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27309 SD_PATH_STANDARD); 27310 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27311 kmem_free(com, sizeof (*com)); 27312 return (rval); 27313 } 27314 27315 27316 /* 27317 * Function: sr_read_subchannel() 27318 * 27319 * Description: This routine is the driver entry point for handling CD-ROM 27320 * ioctl requests to return the Q sub-channel data of the CD 27321 * current position block. (CDROMSUBCHNL) The data includes the 27322 * track number, index number, absolute CD-ROM address (LBA or MSF 27323 * format per the user) , track relative CD-ROM address (LBA or MSF 27324 * format per the user), control data and audio status. 27325 * 27326 * Arguments: dev - the device 'dev_t' 27327 * data - pointer to user provided cdrom sub-channel structure 27328 * flag - this argument is a pass through to ddi_copyxxx() 27329 * directly from the mode argument of ioctl(). 27330 * 27331 * Return Code: the code returned by sd_send_scsi_cmd() 27332 * EFAULT if ddi_copyxxx() fails 27333 * ENXIO if fail ddi_get_soft_state 27334 * EINVAL if data pointer is NULL 27335 */ 27336 27337 static int 27338 sr_read_subchannel(dev_t dev, caddr_t data, int flag) 27339 { 27340 struct sd_lun *un; 27341 struct uscsi_cmd *com; 27342 struct cdrom_subchnl subchanel; 27343 struct cdrom_subchnl *subchnl = &subchanel; 27344 char cdb[CDB_GROUP1]; 27345 caddr_t buffer; 27346 int rval; 27347 27348 if (data == NULL) { 27349 return (EINVAL); 27350 } 27351 27352 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27353 (un->un_state == SD_STATE_OFFLINE)) { 27354 return (ENXIO); 27355 } 27356 27357 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) { 27358 return (EFAULT); 27359 } 27360 27361 buffer = kmem_zalloc((size_t)16, KM_SLEEP); 27362 bzero(cdb, CDB_GROUP1); 27363 cdb[0] = SCMD_READ_SUBCHANNEL; 27364 /* Set the MSF bit based on the user requested address format */ 27365 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02; 27366 /* 27367 * Set the Q bit in byte 2 to indicate that Q sub-channel data be 27368 * returned 27369 */ 27370 cdb[2] = 0x40; 27371 /* 27372 * Set byte 3 to specify the return data format. A value of 0x01 27373 * indicates that the CD-ROM current position should be returned. 27374 */ 27375 cdb[3] = 0x01; 27376 cdb[8] = 0x10; 27377 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27378 com->uscsi_cdb = cdb; 27379 com->uscsi_cdblen = CDB_GROUP1; 27380 com->uscsi_bufaddr = buffer; 27381 com->uscsi_buflen = 16; 27382 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27383 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27384 SD_PATH_STANDARD); 27385 if (rval != 0) { 27386 kmem_free(buffer, 16); 27387 kmem_free(com, sizeof (*com)); 27388 return (rval); 27389 } 27390 27391 /* Process the returned Q sub-channel data */ 27392 subchnl->cdsc_audiostatus = buffer[1]; 27393 subchnl->cdsc_adr = (buffer[5] & 0xF0); 27394 subchnl->cdsc_ctrl = (buffer[5] & 0x0F); 27395 subchnl->cdsc_trk = buffer[6]; 27396 subchnl->cdsc_ind = buffer[7]; 27397 if (subchnl->cdsc_format & CDROM_LBA) { 27398 subchnl->cdsc_absaddr.lba = 27399 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 27400 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 27401 subchnl->cdsc_reladdr.lba = 27402 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) + 27403 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]); 27404 } else if (un->un_f_cfg_readsub_bcd == TRUE) { 27405 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]); 27406 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]); 27407 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]); 27408 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]); 27409 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]); 27410 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]); 27411 } else { 27412 subchnl->cdsc_absaddr.msf.minute = buffer[9]; 27413 subchnl->cdsc_absaddr.msf.second = buffer[10]; 27414 subchnl->cdsc_absaddr.msf.frame = buffer[11]; 27415 subchnl->cdsc_reladdr.msf.minute = buffer[13]; 27416 subchnl->cdsc_reladdr.msf.second = buffer[14]; 27417 subchnl->cdsc_reladdr.msf.frame = buffer[15]; 27418 } 27419 kmem_free(buffer, 16); 27420 kmem_free(com, sizeof (*com)); 27421 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag) 27422 != 0) { 27423 return (EFAULT); 27424 } 27425 return (rval); 27426 } 27427 27428 27429 /* 27430 * Function: sr_read_tocentry() 27431 * 27432 * Description: This routine is the driver entry point for handling CD-ROM 27433 * ioctl requests to read from the Table of Contents (TOC) 27434 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL 27435 * fields, the starting address (LBA or MSF format per the user) 27436 * and the data mode if the user specified track is a data track. 27437 * 27438 * Note: The READ HEADER (0x44) command used in this routine is 27439 * obsolete per the SCSI MMC spec but still supported in the 27440 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 27441 * therefore the command is still implemented in this routine. 27442 * 27443 * Arguments: dev - the device 'dev_t' 27444 * data - pointer to user provided toc entry structure, 27445 * specifying the track # and the address format 27446 * (LBA or MSF). 27447 * flag - this argument is a pass through to ddi_copyxxx() 27448 * directly from the mode argument of ioctl(). 27449 * 27450 * Return Code: the code returned by sd_send_scsi_cmd() 27451 * EFAULT if ddi_copyxxx() fails 27452 * ENXIO if fail ddi_get_soft_state 27453 * EINVAL if data pointer is NULL 27454 */ 27455 27456 static int 27457 sr_read_tocentry(dev_t dev, caddr_t data, int flag) 27458 { 27459 struct sd_lun *un = NULL; 27460 struct uscsi_cmd *com; 27461 struct cdrom_tocentry toc_entry; 27462 struct cdrom_tocentry *entry = &toc_entry; 27463 caddr_t buffer; 27464 int rval; 27465 char cdb[CDB_GROUP1]; 27466 27467 if (data == NULL) { 27468 return (EINVAL); 27469 } 27470 27471 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27472 (un->un_state == SD_STATE_OFFLINE)) { 27473 return (ENXIO); 27474 } 27475 27476 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) { 27477 return (EFAULT); 27478 } 27479 27480 /* Validate the requested track and address format */ 27481 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) { 27482 return (EINVAL); 27483 } 27484 27485 if (entry->cdte_track == 0) { 27486 return (EINVAL); 27487 } 27488 27489 buffer = kmem_zalloc((size_t)12, KM_SLEEP); 27490 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27491 bzero(cdb, CDB_GROUP1); 27492 27493 cdb[0] = SCMD_READ_TOC; 27494 /* Set the MSF bit based on the user requested address format */ 27495 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2); 27496 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 27497 cdb[6] = BYTE_TO_BCD(entry->cdte_track); 27498 } else { 27499 cdb[6] = entry->cdte_track; 27500 } 27501 27502 /* 27503 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 27504 * (4 byte TOC response header + 8 byte track descriptor) 27505 */ 27506 cdb[8] = 12; 27507 com->uscsi_cdb = cdb; 27508 com->uscsi_cdblen = CDB_GROUP1; 27509 com->uscsi_bufaddr = buffer; 27510 com->uscsi_buflen = 0x0C; 27511 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ); 27512 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27513 SD_PATH_STANDARD); 27514 if (rval != 0) { 27515 kmem_free(buffer, 12); 27516 kmem_free(com, sizeof (*com)); 27517 return (rval); 27518 } 27519 27520 /* Process the toc entry */ 27521 entry->cdte_adr = (buffer[5] & 0xF0) >> 4; 27522 entry->cdte_ctrl = (buffer[5] & 0x0F); 27523 if (entry->cdte_format & CDROM_LBA) { 27524 entry->cdte_addr.lba = 27525 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 27526 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 27527 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) { 27528 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]); 27529 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]); 27530 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]); 27531 /* 27532 * Send a READ TOC command using the LBA address format to get 27533 * the LBA for the track requested so it can be used in the 27534 * READ HEADER request 27535 * 27536 * Note: The MSF bit of the READ HEADER command specifies the 27537 * output format. The block address specified in that command 27538 * must be in LBA format. 27539 */ 27540 cdb[1] = 0; 27541 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27542 SD_PATH_STANDARD); 27543 if (rval != 0) { 27544 kmem_free(buffer, 12); 27545 kmem_free(com, sizeof (*com)); 27546 return (rval); 27547 } 27548 } else { 27549 entry->cdte_addr.msf.minute = buffer[9]; 27550 entry->cdte_addr.msf.second = buffer[10]; 27551 entry->cdte_addr.msf.frame = buffer[11]; 27552 /* 27553 * Send a READ TOC command using the LBA address format to get 27554 * the LBA for the track requested so it can be used in the 27555 * READ HEADER request 27556 * 27557 * Note: The MSF bit of the READ HEADER command specifies the 27558 * output format. The block address specified in that command 27559 * must be in LBA format. 27560 */ 27561 cdb[1] = 0; 27562 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27563 SD_PATH_STANDARD); 27564 if (rval != 0) { 27565 kmem_free(buffer, 12); 27566 kmem_free(com, sizeof (*com)); 27567 return (rval); 27568 } 27569 } 27570 27571 /* 27572 * Build and send the READ HEADER command to determine the data mode of 27573 * the user specified track. 27574 */ 27575 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) && 27576 (entry->cdte_track != CDROM_LEADOUT)) { 27577 bzero(cdb, CDB_GROUP1); 27578 cdb[0] = SCMD_READ_HEADER; 27579 cdb[2] = buffer[8]; 27580 cdb[3] = buffer[9]; 27581 cdb[4] = buffer[10]; 27582 cdb[5] = buffer[11]; 27583 cdb[8] = 0x08; 27584 com->uscsi_buflen = 0x08; 27585 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27586 SD_PATH_STANDARD); 27587 if (rval == 0) { 27588 entry->cdte_datamode = buffer[0]; 27589 } else { 27590 /* 27591 * READ HEADER command failed, since this is 27592 * obsoleted in one spec, its better to return 27593 * -1 for an invlid track so that we can still 27594 * receive the rest of the TOC data. 27595 */ 27596 entry->cdte_datamode = (uchar_t)-1; 27597 } 27598 } else { 27599 entry->cdte_datamode = (uchar_t)-1; 27600 } 27601 27602 kmem_free(buffer, 12); 27603 kmem_free(com, sizeof (*com)); 27604 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0) 27605 return (EFAULT); 27606 27607 return (rval); 27608 } 27609 27610 27611 /* 27612 * Function: sr_read_tochdr() 27613 * 27614 * Description: This routine is the driver entry point for handling CD-ROM 27615 * ioctl requests to read the Table of Contents (TOC) header 27616 * (CDROMREADTOHDR). The TOC header consists of the disk starting 27617 * and ending track numbers 27618 * 27619 * Arguments: dev - the device 'dev_t' 27620 * data - pointer to user provided toc header structure, 27621 * specifying the starting and ending track numbers. 27622 * flag - this argument is a pass through to ddi_copyxxx() 27623 * directly from the mode argument of ioctl(). 27624 * 27625 * Return Code: the code returned by sd_send_scsi_cmd() 27626 * EFAULT if ddi_copyxxx() fails 27627 * ENXIO if fail ddi_get_soft_state 27628 * EINVAL if data pointer is NULL 27629 */ 27630 27631 static int 27632 sr_read_tochdr(dev_t dev, caddr_t data, int flag) 27633 { 27634 struct sd_lun *un; 27635 struct uscsi_cmd *com; 27636 struct cdrom_tochdr toc_header; 27637 struct cdrom_tochdr *hdr = &toc_header; 27638 char cdb[CDB_GROUP1]; 27639 int rval; 27640 caddr_t buffer; 27641 27642 if (data == NULL) { 27643 return (EINVAL); 27644 } 27645 27646 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27647 (un->un_state == SD_STATE_OFFLINE)) { 27648 return (ENXIO); 27649 } 27650 27651 buffer = kmem_zalloc(4, KM_SLEEP); 27652 bzero(cdb, CDB_GROUP1); 27653 cdb[0] = SCMD_READ_TOC; 27654 /* 27655 * Specifying a track number of 0x00 in the READ TOC command indicates 27656 * that the TOC header should be returned 27657 */ 27658 cdb[6] = 0x00; 27659 /* 27660 * Bytes 7 & 8 are the 4 byte allocation length for TOC header. 27661 * (2 byte data len + 1 byte starting track # + 1 byte ending track #) 27662 */ 27663 cdb[8] = 0x04; 27664 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27665 com->uscsi_cdb = cdb; 27666 com->uscsi_cdblen = CDB_GROUP1; 27667 com->uscsi_bufaddr = buffer; 27668 com->uscsi_buflen = 0x04; 27669 com->uscsi_timeout = 300; 27670 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27671 27672 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27673 SD_PATH_STANDARD); 27674 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 27675 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]); 27676 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]); 27677 } else { 27678 hdr->cdth_trk0 = buffer[2]; 27679 hdr->cdth_trk1 = buffer[3]; 27680 } 27681 kmem_free(buffer, 4); 27682 kmem_free(com, sizeof (*com)); 27683 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) { 27684 return (EFAULT); 27685 } 27686 return (rval); 27687 } 27688 27689 27690 /* 27691 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(), 27692 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for 27693 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data, 27694 * digital audio and extended architecture digital audio. These modes are 27695 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3 27696 * MMC specs. 27697 * 27698 * In addition to support for the various data formats these routines also 27699 * include support for devices that implement only the direct access READ 27700 * commands (0x08, 0x28), devices that implement the READ_CD commands 27701 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and 27702 * READ CDXA commands (0xD8, 0xDB) 27703 */ 27704 27705 /* 27706 * Function: sr_read_mode1() 27707 * 27708 * Description: This routine is the driver entry point for handling CD-ROM 27709 * ioctl read mode1 requests (CDROMREADMODE1). 27710 * 27711 * Arguments: dev - the device 'dev_t' 27712 * data - pointer to user provided cd read structure specifying 27713 * the lba buffer address and length. 27714 * flag - this argument is a pass through to ddi_copyxxx() 27715 * directly from the mode argument of ioctl(). 27716 * 27717 * Return Code: the code returned by sd_send_scsi_cmd() 27718 * EFAULT if ddi_copyxxx() fails 27719 * ENXIO if fail ddi_get_soft_state 27720 * EINVAL if data pointer is NULL 27721 */ 27722 27723 static int 27724 sr_read_mode1(dev_t dev, caddr_t data, int flag) 27725 { 27726 struct sd_lun *un; 27727 struct cdrom_read mode1_struct; 27728 struct cdrom_read *mode1 = &mode1_struct; 27729 int rval; 27730 sd_ssc_t *ssc; 27731 27732 #ifdef _MULTI_DATAMODEL 27733 /* To support ILP32 applications in an LP64 world */ 27734 struct cdrom_read32 cdrom_read32; 27735 struct cdrom_read32 *cdrd32 = &cdrom_read32; 27736 #endif /* _MULTI_DATAMODEL */ 27737 27738 if (data == NULL) { 27739 return (EINVAL); 27740 } 27741 27742 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27743 (un->un_state == SD_STATE_OFFLINE)) { 27744 return (ENXIO); 27745 } 27746 27747 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27748 "sd_read_mode1: entry: un:0x%p\n", un); 27749 27750 #ifdef _MULTI_DATAMODEL 27751 switch (ddi_model_convert_from(flag & FMODELS)) { 27752 case DDI_MODEL_ILP32: 27753 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 27754 return (EFAULT); 27755 } 27756 /* Convert the ILP32 uscsi data from the application to LP64 */ 27757 cdrom_read32tocdrom_read(cdrd32, mode1); 27758 break; 27759 case DDI_MODEL_NONE: 27760 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 27761 return (EFAULT); 27762 } 27763 } 27764 #else /* ! _MULTI_DATAMODEL */ 27765 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 27766 return (EFAULT); 27767 } 27768 #endif /* _MULTI_DATAMODEL */ 27769 27770 ssc = sd_ssc_init(un); 27771 rval = sd_send_scsi_READ(ssc, mode1->cdread_bufaddr, 27772 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD); 27773 sd_ssc_fini(ssc); 27774 27775 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27776 "sd_read_mode1: exit: un:0x%p\n", un); 27777 27778 return (rval); 27779 } 27780 27781 27782 /* 27783 * Function: sr_read_cd_mode2() 27784 * 27785 * Description: This routine is the driver entry point for handling CD-ROM 27786 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 27787 * support the READ CD (0xBE) command or the 1st generation 27788 * READ CD (0xD4) command. 27789 * 27790 * Arguments: dev - the device 'dev_t' 27791 * data - pointer to user provided cd read structure specifying 27792 * the lba buffer address and length. 27793 * flag - this argument is a pass through to ddi_copyxxx() 27794 * directly from the mode argument of ioctl(). 27795 * 27796 * Return Code: the code returned by sd_send_scsi_cmd() 27797 * EFAULT if ddi_copyxxx() fails 27798 * ENXIO if fail ddi_get_soft_state 27799 * EINVAL if data pointer is NULL 27800 */ 27801 27802 static int 27803 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag) 27804 { 27805 struct sd_lun *un; 27806 struct uscsi_cmd *com; 27807 struct cdrom_read mode2_struct; 27808 struct cdrom_read *mode2 = &mode2_struct; 27809 uchar_t cdb[CDB_GROUP5]; 27810 int nblocks; 27811 int rval; 27812 #ifdef _MULTI_DATAMODEL 27813 /* To support ILP32 applications in an LP64 world */ 27814 struct cdrom_read32 cdrom_read32; 27815 struct cdrom_read32 *cdrd32 = &cdrom_read32; 27816 #endif /* _MULTI_DATAMODEL */ 27817 27818 if (data == NULL) { 27819 return (EINVAL); 27820 } 27821 27822 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27823 (un->un_state == SD_STATE_OFFLINE)) { 27824 return (ENXIO); 27825 } 27826 27827 #ifdef _MULTI_DATAMODEL 27828 switch (ddi_model_convert_from(flag & FMODELS)) { 27829 case DDI_MODEL_ILP32: 27830 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 27831 return (EFAULT); 27832 } 27833 /* Convert the ILP32 uscsi data from the application to LP64 */ 27834 cdrom_read32tocdrom_read(cdrd32, mode2); 27835 break; 27836 case DDI_MODEL_NONE: 27837 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 27838 return (EFAULT); 27839 } 27840 break; 27841 } 27842 27843 #else /* ! _MULTI_DATAMODEL */ 27844 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 27845 return (EFAULT); 27846 } 27847 #endif /* _MULTI_DATAMODEL */ 27848 27849 bzero(cdb, sizeof (cdb)); 27850 if (un->un_f_cfg_read_cd_xd4 == TRUE) { 27851 /* Read command supported by 1st generation atapi drives */ 27852 cdb[0] = SCMD_READ_CDD4; 27853 } else { 27854 /* Universal CD Access Command */ 27855 cdb[0] = SCMD_READ_CD; 27856 } 27857 27858 /* 27859 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book 27860 */ 27861 cdb[1] = CDROM_SECTOR_TYPE_MODE2; 27862 27863 /* set the start address */ 27864 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF); 27865 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF); 27866 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 27867 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF); 27868 27869 /* set the transfer length */ 27870 nblocks = mode2->cdread_buflen / 2336; 27871 cdb[6] = (uchar_t)(nblocks >> 16); 27872 cdb[7] = (uchar_t)(nblocks >> 8); 27873 cdb[8] = (uchar_t)nblocks; 27874 27875 /* set the filter bits */ 27876 cdb[9] = CDROM_READ_CD_USERDATA; 27877 27878 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27879 com->uscsi_cdb = (caddr_t)cdb; 27880 com->uscsi_cdblen = sizeof (cdb); 27881 com->uscsi_bufaddr = mode2->cdread_bufaddr; 27882 com->uscsi_buflen = mode2->cdread_buflen; 27883 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27884 27885 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27886 SD_PATH_STANDARD); 27887 kmem_free(com, sizeof (*com)); 27888 return (rval); 27889 } 27890 27891 27892 /* 27893 * Function: sr_read_mode2() 27894 * 27895 * Description: This routine is the driver entry point for handling CD-ROM 27896 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 27897 * do not support the READ CD (0xBE) command. 27898 * 27899 * Arguments: dev - the device 'dev_t' 27900 * data - pointer to user provided cd read structure specifying 27901 * the lba buffer address and length. 27902 * flag - this argument is a pass through to ddi_copyxxx() 27903 * directly from the mode argument of ioctl(). 27904 * 27905 * Return Code: the code returned by sd_send_scsi_cmd() 27906 * EFAULT if ddi_copyxxx() fails 27907 * ENXIO if fail ddi_get_soft_state 27908 * EINVAL if data pointer is NULL 27909 * EIO if fail to reset block size 27910 * EAGAIN if commands are in progress in the driver 27911 */ 27912 27913 static int 27914 sr_read_mode2(dev_t dev, caddr_t data, int flag) 27915 { 27916 struct sd_lun *un; 27917 struct cdrom_read mode2_struct; 27918 struct cdrom_read *mode2 = &mode2_struct; 27919 int rval; 27920 uint32_t restore_blksize; 27921 struct uscsi_cmd *com; 27922 uchar_t cdb[CDB_GROUP0]; 27923 int nblocks; 27924 27925 #ifdef _MULTI_DATAMODEL 27926 /* To support ILP32 applications in an LP64 world */ 27927 struct cdrom_read32 cdrom_read32; 27928 struct cdrom_read32 *cdrd32 = &cdrom_read32; 27929 #endif /* _MULTI_DATAMODEL */ 27930 27931 if (data == NULL) { 27932 return (EINVAL); 27933 } 27934 27935 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27936 (un->un_state == SD_STATE_OFFLINE)) { 27937 return (ENXIO); 27938 } 27939 27940 /* 27941 * Because this routine will update the device and driver block size 27942 * being used we want to make sure there are no commands in progress. 27943 * If commands are in progress the user will have to try again. 27944 * 27945 * We check for 1 instead of 0 because we increment un_ncmds_in_driver 27946 * in sdioctl to protect commands from sdioctl through to the top of 27947 * sd_uscsi_strategy. See sdioctl for details. 27948 */ 27949 mutex_enter(SD_MUTEX(un)); 27950 if (un->un_ncmds_in_driver != 1) { 27951 mutex_exit(SD_MUTEX(un)); 27952 return (EAGAIN); 27953 } 27954 mutex_exit(SD_MUTEX(un)); 27955 27956 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27957 "sd_read_mode2: entry: un:0x%p\n", un); 27958 27959 #ifdef _MULTI_DATAMODEL 27960 switch (ddi_model_convert_from(flag & FMODELS)) { 27961 case DDI_MODEL_ILP32: 27962 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 27963 return (EFAULT); 27964 } 27965 /* Convert the ILP32 uscsi data from the application to LP64 */ 27966 cdrom_read32tocdrom_read(cdrd32, mode2); 27967 break; 27968 case DDI_MODEL_NONE: 27969 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 27970 return (EFAULT); 27971 } 27972 break; 27973 } 27974 #else /* ! _MULTI_DATAMODEL */ 27975 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) { 27976 return (EFAULT); 27977 } 27978 #endif /* _MULTI_DATAMODEL */ 27979 27980 /* Store the current target block size for restoration later */ 27981 restore_blksize = un->un_tgt_blocksize; 27982 27983 /* Change the device and soft state target block size to 2336 */ 27984 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) { 27985 rval = EIO; 27986 goto done; 27987 } 27988 27989 27990 bzero(cdb, sizeof (cdb)); 27991 27992 /* set READ operation */ 27993 cdb[0] = SCMD_READ; 27994 27995 /* adjust lba for 2kbyte blocks from 512 byte blocks */ 27996 mode2->cdread_lba >>= 2; 27997 27998 /* set the start address */ 27999 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F); 28000 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 28001 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF); 28002 28003 /* set the transfer length */ 28004 nblocks = mode2->cdread_buflen / 2336; 28005 cdb[4] = (uchar_t)nblocks & 0xFF; 28006 28007 /* build command */ 28008 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28009 com->uscsi_cdb = (caddr_t)cdb; 28010 com->uscsi_cdblen = sizeof (cdb); 28011 com->uscsi_bufaddr = mode2->cdread_bufaddr; 28012 com->uscsi_buflen = mode2->cdread_buflen; 28013 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28014 28015 /* 28016 * Issue SCSI command with user space address for read buffer. 28017 * 28018 * This sends the command through main channel in the driver. 28019 * 28020 * Since this is accessed via an IOCTL call, we go through the 28021 * standard path, so that if the device was powered down, then 28022 * it would be 'awakened' to handle the command. 28023 */ 28024 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 28025 SD_PATH_STANDARD); 28026 28027 kmem_free(com, sizeof (*com)); 28028 28029 /* Restore the device and soft state target block size */ 28030 if (sr_sector_mode(dev, restore_blksize) != 0) { 28031 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28032 "can't do switch back to mode 1\n"); 28033 /* 28034 * If sd_send_scsi_READ succeeded we still need to report 28035 * an error because we failed to reset the block size 28036 */ 28037 if (rval == 0) { 28038 rval = EIO; 28039 } 28040 } 28041 28042 done: 28043 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 28044 "sd_read_mode2: exit: un:0x%p\n", un); 28045 28046 return (rval); 28047 } 28048 28049 28050 /* 28051 * Function: sr_sector_mode() 28052 * 28053 * Description: This utility function is used by sr_read_mode2 to set the target 28054 * block size based on the user specified size. This is a legacy 28055 * implementation based upon a vendor specific mode page 28056 * 28057 * Arguments: dev - the device 'dev_t' 28058 * data - flag indicating if block size is being set to 2336 or 28059 * 512. 28060 * 28061 * Return Code: the code returned by sd_send_scsi_cmd() 28062 * EFAULT if ddi_copyxxx() fails 28063 * ENXIO if fail ddi_get_soft_state 28064 * EINVAL if data pointer is NULL 28065 */ 28066 28067 static int 28068 sr_sector_mode(dev_t dev, uint32_t blksize) 28069 { 28070 struct sd_lun *un; 28071 uchar_t *sense; 28072 uchar_t *select; 28073 int rval; 28074 sd_ssc_t *ssc; 28075 28076 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28077 (un->un_state == SD_STATE_OFFLINE)) { 28078 return (ENXIO); 28079 } 28080 28081 sense = kmem_zalloc(20, KM_SLEEP); 28082 28083 /* Note: This is a vendor specific mode page (0x81) */ 28084 ssc = sd_ssc_init(un); 28085 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 20, 0x81, 28086 SD_PATH_STANDARD); 28087 sd_ssc_fini(ssc); 28088 if (rval != 0) { 28089 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 28090 "sr_sector_mode: Mode Sense failed\n"); 28091 kmem_free(sense, 20); 28092 return (rval); 28093 } 28094 select = kmem_zalloc(20, KM_SLEEP); 28095 select[3] = 0x08; 28096 select[10] = ((blksize >> 8) & 0xff); 28097 select[11] = (blksize & 0xff); 28098 select[12] = 0x01; 28099 select[13] = 0x06; 28100 select[14] = sense[14]; 28101 select[15] = sense[15]; 28102 if (blksize == SD_MODE2_BLKSIZE) { 28103 select[14] |= 0x01; 28104 } 28105 28106 ssc = sd_ssc_init(un); 28107 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 20, 28108 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 28109 sd_ssc_fini(ssc); 28110 if (rval != 0) { 28111 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 28112 "sr_sector_mode: Mode Select failed\n"); 28113 } else { 28114 /* 28115 * Only update the softstate block size if we successfully 28116 * changed the device block mode. 28117 */ 28118 mutex_enter(SD_MUTEX(un)); 28119 sd_update_block_info(un, blksize, 0); 28120 mutex_exit(SD_MUTEX(un)); 28121 } 28122 kmem_free(sense, 20); 28123 kmem_free(select, 20); 28124 return (rval); 28125 } 28126 28127 28128 /* 28129 * Function: sr_read_cdda() 28130 * 28131 * Description: This routine is the driver entry point for handling CD-ROM 28132 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If 28133 * the target supports CDDA these requests are handled via a vendor 28134 * specific command (0xD8) If the target does not support CDDA 28135 * these requests are handled via the READ CD command (0xBE). 28136 * 28137 * Arguments: dev - the device 'dev_t' 28138 * data - pointer to user provided CD-DA structure specifying 28139 * the track starting address, transfer length, and 28140 * subcode options. 28141 * flag - this argument is a pass through to ddi_copyxxx() 28142 * directly from the mode argument of ioctl(). 28143 * 28144 * Return Code: the code returned by sd_send_scsi_cmd() 28145 * EFAULT if ddi_copyxxx() fails 28146 * ENXIO if fail ddi_get_soft_state 28147 * EINVAL if invalid arguments are provided 28148 * ENOTTY 28149 */ 28150 28151 static int 28152 sr_read_cdda(dev_t dev, caddr_t data, int flag) 28153 { 28154 struct sd_lun *un; 28155 struct uscsi_cmd *com; 28156 struct cdrom_cdda *cdda; 28157 int rval; 28158 size_t buflen; 28159 char cdb[CDB_GROUP5]; 28160 28161 #ifdef _MULTI_DATAMODEL 28162 /* To support ILP32 applications in an LP64 world */ 28163 struct cdrom_cdda32 cdrom_cdda32; 28164 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32; 28165 #endif /* _MULTI_DATAMODEL */ 28166 28167 if (data == NULL) { 28168 return (EINVAL); 28169 } 28170 28171 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28172 return (ENXIO); 28173 } 28174 28175 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP); 28176 28177 #ifdef _MULTI_DATAMODEL 28178 switch (ddi_model_convert_from(flag & FMODELS)) { 28179 case DDI_MODEL_ILP32: 28180 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) { 28181 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28182 "sr_read_cdda: ddi_copyin Failed\n"); 28183 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28184 return (EFAULT); 28185 } 28186 /* Convert the ILP32 uscsi data from the application to LP64 */ 28187 cdrom_cdda32tocdrom_cdda(cdda32, cdda); 28188 break; 28189 case DDI_MODEL_NONE: 28190 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 28191 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28192 "sr_read_cdda: ddi_copyin Failed\n"); 28193 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28194 return (EFAULT); 28195 } 28196 break; 28197 } 28198 #else /* ! _MULTI_DATAMODEL */ 28199 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 28200 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28201 "sr_read_cdda: ddi_copyin Failed\n"); 28202 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28203 return (EFAULT); 28204 } 28205 #endif /* _MULTI_DATAMODEL */ 28206 28207 /* 28208 * Since MMC-2 expects max 3 bytes for length, check if the 28209 * length input is greater than 3 bytes 28210 */ 28211 if ((cdda->cdda_length & 0xFF000000) != 0) { 28212 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: " 28213 "cdrom transfer length too large: %d (limit %d)\n", 28214 cdda->cdda_length, 0xFFFFFF); 28215 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28216 return (EINVAL); 28217 } 28218 28219 switch (cdda->cdda_subcode) { 28220 case CDROM_DA_NO_SUBCODE: 28221 buflen = CDROM_BLK_2352 * cdda->cdda_length; 28222 break; 28223 case CDROM_DA_SUBQ: 28224 buflen = CDROM_BLK_2368 * cdda->cdda_length; 28225 break; 28226 case CDROM_DA_ALL_SUBCODE: 28227 buflen = CDROM_BLK_2448 * cdda->cdda_length; 28228 break; 28229 case CDROM_DA_SUBCODE_ONLY: 28230 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length; 28231 break; 28232 default: 28233 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28234 "sr_read_cdda: Subcode '0x%x' Not Supported\n", 28235 cdda->cdda_subcode); 28236 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28237 return (EINVAL); 28238 } 28239 28240 /* Build and send the command */ 28241 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28242 bzero(cdb, CDB_GROUP5); 28243 28244 if (un->un_f_cfg_cdda == TRUE) { 28245 cdb[0] = (char)SCMD_READ_CD; 28246 cdb[1] = 0x04; 28247 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 28248 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 28249 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 28250 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 28251 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 28252 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 28253 cdb[8] = ((cdda->cdda_length) & 0x000000ff); 28254 cdb[9] = 0x10; 28255 switch (cdda->cdda_subcode) { 28256 case CDROM_DA_NO_SUBCODE : 28257 cdb[10] = 0x0; 28258 break; 28259 case CDROM_DA_SUBQ : 28260 cdb[10] = 0x2; 28261 break; 28262 case CDROM_DA_ALL_SUBCODE : 28263 cdb[10] = 0x1; 28264 break; 28265 case CDROM_DA_SUBCODE_ONLY : 28266 /* FALLTHROUGH */ 28267 default : 28268 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28269 kmem_free(com, sizeof (*com)); 28270 return (ENOTTY); 28271 } 28272 } else { 28273 cdb[0] = (char)SCMD_READ_CDDA; 28274 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 28275 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 28276 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 28277 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 28278 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24); 28279 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 28280 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 28281 cdb[9] = ((cdda->cdda_length) & 0x000000ff); 28282 cdb[10] = cdda->cdda_subcode; 28283 } 28284 28285 com->uscsi_cdb = cdb; 28286 com->uscsi_cdblen = CDB_GROUP5; 28287 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data; 28288 com->uscsi_buflen = buflen; 28289 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28290 28291 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 28292 SD_PATH_STANDARD); 28293 28294 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28295 kmem_free(com, sizeof (*com)); 28296 return (rval); 28297 } 28298 28299 28300 /* 28301 * Function: sr_read_cdxa() 28302 * 28303 * Description: This routine is the driver entry point for handling CD-ROM 28304 * ioctl requests to return CD-XA (Extended Architecture) data. 28305 * (CDROMCDXA). 28306 * 28307 * Arguments: dev - the device 'dev_t' 28308 * data - pointer to user provided CD-XA structure specifying 28309 * the data starting address, transfer length, and format 28310 * flag - this argument is a pass through to ddi_copyxxx() 28311 * directly from the mode argument of ioctl(). 28312 * 28313 * Return Code: the code returned by sd_send_scsi_cmd() 28314 * EFAULT if ddi_copyxxx() fails 28315 * ENXIO if fail ddi_get_soft_state 28316 * EINVAL if data pointer is NULL 28317 */ 28318 28319 static int 28320 sr_read_cdxa(dev_t dev, caddr_t data, int flag) 28321 { 28322 struct sd_lun *un; 28323 struct uscsi_cmd *com; 28324 struct cdrom_cdxa *cdxa; 28325 int rval; 28326 size_t buflen; 28327 char cdb[CDB_GROUP5]; 28328 uchar_t read_flags; 28329 28330 #ifdef _MULTI_DATAMODEL 28331 /* To support ILP32 applications in an LP64 world */ 28332 struct cdrom_cdxa32 cdrom_cdxa32; 28333 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32; 28334 #endif /* _MULTI_DATAMODEL */ 28335 28336 if (data == NULL) { 28337 return (EINVAL); 28338 } 28339 28340 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28341 return (ENXIO); 28342 } 28343 28344 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP); 28345 28346 #ifdef _MULTI_DATAMODEL 28347 switch (ddi_model_convert_from(flag & FMODELS)) { 28348 case DDI_MODEL_ILP32: 28349 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) { 28350 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28351 return (EFAULT); 28352 } 28353 /* 28354 * Convert the ILP32 uscsi data from the 28355 * application to LP64 for internal use. 28356 */ 28357 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa); 28358 break; 28359 case DDI_MODEL_NONE: 28360 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 28361 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28362 return (EFAULT); 28363 } 28364 break; 28365 } 28366 #else /* ! _MULTI_DATAMODEL */ 28367 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 28368 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28369 return (EFAULT); 28370 } 28371 #endif /* _MULTI_DATAMODEL */ 28372 28373 /* 28374 * Since MMC-2 expects max 3 bytes for length, check if the 28375 * length input is greater than 3 bytes 28376 */ 28377 if ((cdxa->cdxa_length & 0xFF000000) != 0) { 28378 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: " 28379 "cdrom transfer length too large: %d (limit %d)\n", 28380 cdxa->cdxa_length, 0xFFFFFF); 28381 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28382 return (EINVAL); 28383 } 28384 28385 switch (cdxa->cdxa_format) { 28386 case CDROM_XA_DATA: 28387 buflen = CDROM_BLK_2048 * cdxa->cdxa_length; 28388 read_flags = 0x10; 28389 break; 28390 case CDROM_XA_SECTOR_DATA: 28391 buflen = CDROM_BLK_2352 * cdxa->cdxa_length; 28392 read_flags = 0xf8; 28393 break; 28394 case CDROM_XA_DATA_W_ERROR: 28395 buflen = CDROM_BLK_2646 * cdxa->cdxa_length; 28396 read_flags = 0xfc; 28397 break; 28398 default: 28399 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28400 "sr_read_cdxa: Format '0x%x' Not Supported\n", 28401 cdxa->cdxa_format); 28402 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28403 return (EINVAL); 28404 } 28405 28406 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28407 bzero(cdb, CDB_GROUP5); 28408 if (un->un_f_mmc_cap == TRUE) { 28409 cdb[0] = (char)SCMD_READ_CD; 28410 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 28411 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 28412 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 28413 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 28414 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 28415 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 28416 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff); 28417 cdb[9] = (char)read_flags; 28418 } else { 28419 /* 28420 * Note: A vendor specific command (0xDB) is being used her to 28421 * request a read of all subcodes. 28422 */ 28423 cdb[0] = (char)SCMD_READ_CDXA; 28424 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 28425 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 28426 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 28427 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 28428 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24); 28429 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 28430 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 28431 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff); 28432 cdb[10] = cdxa->cdxa_format; 28433 } 28434 com->uscsi_cdb = cdb; 28435 com->uscsi_cdblen = CDB_GROUP5; 28436 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data; 28437 com->uscsi_buflen = buflen; 28438 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28439 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 28440 SD_PATH_STANDARD); 28441 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28442 kmem_free(com, sizeof (*com)); 28443 return (rval); 28444 } 28445 28446 28447 /* 28448 * Function: sr_eject() 28449 * 28450 * Description: This routine is the driver entry point for handling CD-ROM 28451 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT) 28452 * 28453 * Arguments: dev - the device 'dev_t' 28454 * 28455 * Return Code: the code returned by sd_send_scsi_cmd() 28456 */ 28457 28458 static int 28459 sr_eject(dev_t dev) 28460 { 28461 struct sd_lun *un; 28462 int rval; 28463 sd_ssc_t *ssc; 28464 28465 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28466 (un->un_state == SD_STATE_OFFLINE)) { 28467 return (ENXIO); 28468 } 28469 28470 /* 28471 * To prevent race conditions with the eject 28472 * command, keep track of an eject command as 28473 * it progresses. If we are already handling 28474 * an eject command in the driver for the given 28475 * unit and another request to eject is received 28476 * immediately return EAGAIN so we don't lose 28477 * the command if the current eject command fails. 28478 */ 28479 mutex_enter(SD_MUTEX(un)); 28480 if (un->un_f_ejecting == TRUE) { 28481 mutex_exit(SD_MUTEX(un)); 28482 return (EAGAIN); 28483 } 28484 un->un_f_ejecting = TRUE; 28485 mutex_exit(SD_MUTEX(un)); 28486 28487 ssc = sd_ssc_init(un); 28488 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW, 28489 SD_PATH_STANDARD); 28490 sd_ssc_fini(ssc); 28491 28492 if (rval != 0) { 28493 mutex_enter(SD_MUTEX(un)); 28494 un->un_f_ejecting = FALSE; 28495 mutex_exit(SD_MUTEX(un)); 28496 return (rval); 28497 } 28498 28499 ssc = sd_ssc_init(un); 28500 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 28501 SD_TARGET_EJECT, SD_PATH_STANDARD); 28502 sd_ssc_fini(ssc); 28503 28504 if (rval == 0) { 28505 mutex_enter(SD_MUTEX(un)); 28506 sr_ejected(un); 28507 un->un_mediastate = DKIO_EJECTED; 28508 un->un_f_ejecting = FALSE; 28509 cv_broadcast(&un->un_state_cv); 28510 mutex_exit(SD_MUTEX(un)); 28511 } else { 28512 mutex_enter(SD_MUTEX(un)); 28513 un->un_f_ejecting = FALSE; 28514 mutex_exit(SD_MUTEX(un)); 28515 } 28516 return (rval); 28517 } 28518 28519 28520 /* 28521 * Function: sr_ejected() 28522 * 28523 * Description: This routine updates the soft state structure to invalidate the 28524 * geometry information after the media has been ejected or a 28525 * media eject has been detected. 28526 * 28527 * Arguments: un - driver soft state (unit) structure 28528 */ 28529 28530 static void 28531 sr_ejected(struct sd_lun *un) 28532 { 28533 struct sd_errstats *stp; 28534 28535 ASSERT(un != NULL); 28536 ASSERT(mutex_owned(SD_MUTEX(un))); 28537 28538 un->un_f_blockcount_is_valid = FALSE; 28539 un->un_f_tgt_blocksize_is_valid = FALSE; 28540 mutex_exit(SD_MUTEX(un)); 28541 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 28542 mutex_enter(SD_MUTEX(un)); 28543 28544 if (un->un_errstats != NULL) { 28545 stp = (struct sd_errstats *)un->un_errstats->ks_data; 28546 stp->sd_capacity.value.ui64 = 0; 28547 } 28548 } 28549 28550 28551 /* 28552 * Function: sr_check_wp() 28553 * 28554 * Description: This routine checks the write protection of a removable 28555 * media disk and hotpluggable devices via the write protect bit of 28556 * the Mode Page Header device specific field. Some devices choke 28557 * on unsupported mode page. In order to workaround this issue, 28558 * this routine has been implemented to use 0x3f mode page(request 28559 * for all pages) for all device types. 28560 * 28561 * Arguments: dev - the device 'dev_t' 28562 * 28563 * Return Code: int indicating if the device is write protected (1) or not (0) 28564 * 28565 * Context: Kernel thread. 28566 * 28567 */ 28568 28569 static int 28570 sr_check_wp(dev_t dev) 28571 { 28572 struct sd_lun *un; 28573 uchar_t device_specific; 28574 uchar_t *sense; 28575 int hdrlen; 28576 int rval = FALSE; 28577 int status; 28578 sd_ssc_t *ssc; 28579 28580 /* 28581 * Note: The return codes for this routine should be reworked to 28582 * properly handle the case of a NULL softstate. 28583 */ 28584 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28585 return (FALSE); 28586 } 28587 28588 if (un->un_f_cfg_is_atapi == TRUE) { 28589 /* 28590 * The mode page contents are not required; set the allocation 28591 * length for the mode page header only 28592 */ 28593 hdrlen = MODE_HEADER_LENGTH_GRP2; 28594 sense = kmem_zalloc(hdrlen, KM_SLEEP); 28595 ssc = sd_ssc_init(un); 28596 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, hdrlen, 28597 MODEPAGE_ALLPAGES, SD_PATH_STANDARD); 28598 sd_ssc_fini(ssc); 28599 if (status != 0) 28600 goto err_exit; 28601 device_specific = 28602 ((struct mode_header_grp2 *)sense)->device_specific; 28603 } else { 28604 hdrlen = MODE_HEADER_LENGTH; 28605 sense = kmem_zalloc(hdrlen, KM_SLEEP); 28606 ssc = sd_ssc_init(un); 28607 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, hdrlen, 28608 MODEPAGE_ALLPAGES, SD_PATH_STANDARD); 28609 sd_ssc_fini(ssc); 28610 if (status != 0) 28611 goto err_exit; 28612 device_specific = 28613 ((struct mode_header *)sense)->device_specific; 28614 } 28615 28616 28617 /* 28618 * Write protect mode sense failed; not all disks 28619 * understand this query. Return FALSE assuming that 28620 * these devices are not writable. 28621 */ 28622 if (device_specific & WRITE_PROTECT) { 28623 rval = TRUE; 28624 } 28625 28626 err_exit: 28627 kmem_free(sense, hdrlen); 28628 return (rval); 28629 } 28630 28631 /* 28632 * Function: sr_volume_ctrl() 28633 * 28634 * Description: This routine is the driver entry point for handling CD-ROM 28635 * audio output volume ioctl requests. (CDROMVOLCTRL) 28636 * 28637 * Arguments: dev - the device 'dev_t' 28638 * data - pointer to user audio volume control structure 28639 * flag - this argument is a pass through to ddi_copyxxx() 28640 * directly from the mode argument of ioctl(). 28641 * 28642 * Return Code: the code returned by sd_send_scsi_cmd() 28643 * EFAULT if ddi_copyxxx() fails 28644 * ENXIO if fail ddi_get_soft_state 28645 * EINVAL if data pointer is NULL 28646 * 28647 */ 28648 28649 static int 28650 sr_volume_ctrl(dev_t dev, caddr_t data, int flag) 28651 { 28652 struct sd_lun *un; 28653 struct cdrom_volctrl volume; 28654 struct cdrom_volctrl *vol = &volume; 28655 uchar_t *sense_page; 28656 uchar_t *select_page; 28657 uchar_t *sense; 28658 uchar_t *select; 28659 int sense_buflen; 28660 int select_buflen; 28661 int rval; 28662 sd_ssc_t *ssc; 28663 28664 if (data == NULL) { 28665 return (EINVAL); 28666 } 28667 28668 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28669 (un->un_state == SD_STATE_OFFLINE)) { 28670 return (ENXIO); 28671 } 28672 28673 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) { 28674 return (EFAULT); 28675 } 28676 28677 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 28678 struct mode_header_grp2 *sense_mhp; 28679 struct mode_header_grp2 *select_mhp; 28680 int bd_len; 28681 28682 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN; 28683 select_buflen = MODE_HEADER_LENGTH_GRP2 + 28684 MODEPAGE_AUDIO_CTRL_LEN; 28685 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 28686 select = kmem_zalloc(select_buflen, KM_SLEEP); 28687 ssc = sd_ssc_init(un); 28688 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, 28689 sense_buflen, MODEPAGE_AUDIO_CTRL, 28690 SD_PATH_STANDARD); 28691 sd_ssc_fini(ssc); 28692 28693 if (rval != 0) { 28694 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 28695 "sr_volume_ctrl: Mode Sense Failed\n"); 28696 kmem_free(sense, sense_buflen); 28697 kmem_free(select, select_buflen); 28698 return (rval); 28699 } 28700 sense_mhp = (struct mode_header_grp2 *)sense; 28701 select_mhp = (struct mode_header_grp2 *)select; 28702 bd_len = (sense_mhp->bdesc_length_hi << 8) | 28703 sense_mhp->bdesc_length_lo; 28704 if (bd_len > MODE_BLK_DESC_LENGTH) { 28705 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28706 "sr_volume_ctrl: Mode Sense returned invalid " 28707 "block descriptor length\n"); 28708 kmem_free(sense, sense_buflen); 28709 kmem_free(select, select_buflen); 28710 return (EIO); 28711 } 28712 sense_page = (uchar_t *) 28713 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 28714 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2); 28715 select_mhp->length_msb = 0; 28716 select_mhp->length_lsb = 0; 28717 select_mhp->bdesc_length_hi = 0; 28718 select_mhp->bdesc_length_lo = 0; 28719 } else { 28720 struct mode_header *sense_mhp, *select_mhp; 28721 28722 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 28723 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 28724 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 28725 select = kmem_zalloc(select_buflen, KM_SLEEP); 28726 ssc = sd_ssc_init(un); 28727 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 28728 sense_buflen, MODEPAGE_AUDIO_CTRL, 28729 SD_PATH_STANDARD); 28730 sd_ssc_fini(ssc); 28731 28732 if (rval != 0) { 28733 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28734 "sr_volume_ctrl: Mode Sense Failed\n"); 28735 kmem_free(sense, sense_buflen); 28736 kmem_free(select, select_buflen); 28737 return (rval); 28738 } 28739 sense_mhp = (struct mode_header *)sense; 28740 select_mhp = (struct mode_header *)select; 28741 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) { 28742 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28743 "sr_volume_ctrl: Mode Sense returned invalid " 28744 "block descriptor length\n"); 28745 kmem_free(sense, sense_buflen); 28746 kmem_free(select, select_buflen); 28747 return (EIO); 28748 } 28749 sense_page = (uchar_t *) 28750 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 28751 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH); 28752 select_mhp->length = 0; 28753 select_mhp->bdesc_length = 0; 28754 } 28755 /* 28756 * Note: An audio control data structure could be created and overlayed 28757 * on the following in place of the array indexing method implemented. 28758 */ 28759 28760 /* Build the select data for the user volume data */ 28761 select_page[0] = MODEPAGE_AUDIO_CTRL; 28762 select_page[1] = 0xE; 28763 /* Set the immediate bit */ 28764 select_page[2] = 0x04; 28765 /* Zero out reserved fields */ 28766 select_page[3] = 0x00; 28767 select_page[4] = 0x00; 28768 /* Return sense data for fields not to be modified */ 28769 select_page[5] = sense_page[5]; 28770 select_page[6] = sense_page[6]; 28771 select_page[7] = sense_page[7]; 28772 /* Set the user specified volume levels for channel 0 and 1 */ 28773 select_page[8] = 0x01; 28774 select_page[9] = vol->channel0; 28775 select_page[10] = 0x02; 28776 select_page[11] = vol->channel1; 28777 /* Channel 2 and 3 are currently unsupported so return the sense data */ 28778 select_page[12] = sense_page[12]; 28779 select_page[13] = sense_page[13]; 28780 select_page[14] = sense_page[14]; 28781 select_page[15] = sense_page[15]; 28782 28783 ssc = sd_ssc_init(un); 28784 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 28785 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP1, select, 28786 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 28787 } else { 28788 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 28789 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 28790 } 28791 sd_ssc_fini(ssc); 28792 28793 kmem_free(sense, sense_buflen); 28794 kmem_free(select, select_buflen); 28795 return (rval); 28796 } 28797 28798 28799 /* 28800 * Function: sr_read_sony_session_offset() 28801 * 28802 * Description: This routine is the driver entry point for handling CD-ROM 28803 * ioctl requests for session offset information. (CDROMREADOFFSET) 28804 * The address of the first track in the last session of a 28805 * multi-session CD-ROM is returned 28806 * 28807 * Note: This routine uses a vendor specific key value in the 28808 * command control field without implementing any vendor check here 28809 * or in the ioctl routine. 28810 * 28811 * Arguments: dev - the device 'dev_t' 28812 * data - pointer to an int to hold the requested address 28813 * flag - this argument is a pass through to ddi_copyxxx() 28814 * directly from the mode argument of ioctl(). 28815 * 28816 * Return Code: the code returned by sd_send_scsi_cmd() 28817 * EFAULT if ddi_copyxxx() fails 28818 * ENXIO if fail ddi_get_soft_state 28819 * EINVAL if data pointer is NULL 28820 */ 28821 28822 static int 28823 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag) 28824 { 28825 struct sd_lun *un; 28826 struct uscsi_cmd *com; 28827 caddr_t buffer; 28828 char cdb[CDB_GROUP1]; 28829 int session_offset = 0; 28830 int rval; 28831 28832 if (data == NULL) { 28833 return (EINVAL); 28834 } 28835 28836 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28837 (un->un_state == SD_STATE_OFFLINE)) { 28838 return (ENXIO); 28839 } 28840 28841 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP); 28842 bzero(cdb, CDB_GROUP1); 28843 cdb[0] = SCMD_READ_TOC; 28844 /* 28845 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 28846 * (4 byte TOC response header + 8 byte response data) 28847 */ 28848 cdb[8] = SONY_SESSION_OFFSET_LEN; 28849 /* Byte 9 is the control byte. A vendor specific value is used */ 28850 cdb[9] = SONY_SESSION_OFFSET_KEY; 28851 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28852 com->uscsi_cdb = cdb; 28853 com->uscsi_cdblen = CDB_GROUP1; 28854 com->uscsi_bufaddr = buffer; 28855 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN; 28856 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28857 28858 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 28859 SD_PATH_STANDARD); 28860 if (rval != 0) { 28861 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 28862 kmem_free(com, sizeof (*com)); 28863 return (rval); 28864 } 28865 if (buffer[1] == SONY_SESSION_OFFSET_VALID) { 28866 session_offset = 28867 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 28868 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 28869 /* 28870 * Offset returned offset in current lbasize block's. Convert to 28871 * 2k block's to return to the user 28872 */ 28873 if (un->un_tgt_blocksize == CDROM_BLK_512) { 28874 session_offset >>= 2; 28875 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) { 28876 session_offset >>= 1; 28877 } 28878 } 28879 28880 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) { 28881 rval = EFAULT; 28882 } 28883 28884 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 28885 kmem_free(com, sizeof (*com)); 28886 return (rval); 28887 } 28888 28889 28890 /* 28891 * Function: sd_wm_cache_constructor() 28892 * 28893 * Description: Cache Constructor for the wmap cache for the read/modify/write 28894 * devices. 28895 * 28896 * Arguments: wm - A pointer to the sd_w_map to be initialized. 28897 * un - sd_lun structure for the device. 28898 * flag - the km flags passed to constructor 28899 * 28900 * Return Code: 0 on success. 28901 * -1 on failure. 28902 */ 28903 28904 /*ARGSUSED*/ 28905 static int 28906 sd_wm_cache_constructor(void *wm, void *un, int flags) 28907 { 28908 bzero(wm, sizeof (struct sd_w_map)); 28909 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL); 28910 return (0); 28911 } 28912 28913 28914 /* 28915 * Function: sd_wm_cache_destructor() 28916 * 28917 * Description: Cache destructor for the wmap cache for the read/modify/write 28918 * devices. 28919 * 28920 * Arguments: wm - A pointer to the sd_w_map to be initialized. 28921 * un - sd_lun structure for the device. 28922 */ 28923 /*ARGSUSED*/ 28924 static void 28925 sd_wm_cache_destructor(void *wm, void *un) 28926 { 28927 cv_destroy(&((struct sd_w_map *)wm)->wm_avail); 28928 } 28929 28930 28931 /* 28932 * Function: sd_range_lock() 28933 * 28934 * Description: Lock the range of blocks specified as parameter to ensure 28935 * that read, modify write is atomic and no other i/o writes 28936 * to the same location. The range is specified in terms 28937 * of start and end blocks. Block numbers are the actual 28938 * media block numbers and not system. 28939 * 28940 * Arguments: un - sd_lun structure for the device. 28941 * startb - The starting block number 28942 * endb - The end block number 28943 * typ - type of i/o - simple/read_modify_write 28944 * 28945 * Return Code: wm - pointer to the wmap structure. 28946 * 28947 * Context: This routine can sleep. 28948 */ 28949 28950 static struct sd_w_map * 28951 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ) 28952 { 28953 struct sd_w_map *wmp = NULL; 28954 struct sd_w_map *sl_wmp = NULL; 28955 struct sd_w_map *tmp_wmp; 28956 wm_state state = SD_WM_CHK_LIST; 28957 28958 28959 ASSERT(un != NULL); 28960 ASSERT(!mutex_owned(SD_MUTEX(un))); 28961 28962 mutex_enter(SD_MUTEX(un)); 28963 28964 while (state != SD_WM_DONE) { 28965 28966 switch (state) { 28967 case SD_WM_CHK_LIST: 28968 /* 28969 * This is the starting state. Check the wmap list 28970 * to see if the range is currently available. 28971 */ 28972 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) { 28973 /* 28974 * If this is a simple write and no rmw 28975 * i/o is pending then try to lock the 28976 * range as the range should be available. 28977 */ 28978 state = SD_WM_LOCK_RANGE; 28979 } else { 28980 tmp_wmp = sd_get_range(un, startb, endb); 28981 if (tmp_wmp != NULL) { 28982 if ((wmp != NULL) && ONLIST(un, wmp)) { 28983 /* 28984 * Should not keep onlist wmps 28985 * while waiting this macro 28986 * will also do wmp = NULL; 28987 */ 28988 FREE_ONLIST_WMAP(un, wmp); 28989 } 28990 /* 28991 * sl_wmp is the wmap on which wait 28992 * is done, since the tmp_wmp points 28993 * to the inuse wmap, set sl_wmp to 28994 * tmp_wmp and change the state to sleep 28995 */ 28996 sl_wmp = tmp_wmp; 28997 state = SD_WM_WAIT_MAP; 28998 } else { 28999 state = SD_WM_LOCK_RANGE; 29000 } 29001 29002 } 29003 break; 29004 29005 case SD_WM_LOCK_RANGE: 29006 ASSERT(un->un_wm_cache); 29007 /* 29008 * The range need to be locked, try to get a wmap. 29009 * First attempt it with NO_SLEEP, want to avoid a sleep 29010 * if possible as we will have to release the sd mutex 29011 * if we have to sleep. 29012 */ 29013 if (wmp == NULL) 29014 wmp = kmem_cache_alloc(un->un_wm_cache, 29015 KM_NOSLEEP); 29016 if (wmp == NULL) { 29017 mutex_exit(SD_MUTEX(un)); 29018 _NOTE(DATA_READABLE_WITHOUT_LOCK 29019 (sd_lun::un_wm_cache)) 29020 wmp = kmem_cache_alloc(un->un_wm_cache, 29021 KM_SLEEP); 29022 mutex_enter(SD_MUTEX(un)); 29023 /* 29024 * we released the mutex so recheck and go to 29025 * check list state. 29026 */ 29027 state = SD_WM_CHK_LIST; 29028 } else { 29029 /* 29030 * We exit out of state machine since we 29031 * have the wmap. Do the housekeeping first. 29032 * place the wmap on the wmap list if it is not 29033 * on it already and then set the state to done. 29034 */ 29035 wmp->wm_start = startb; 29036 wmp->wm_end = endb; 29037 wmp->wm_flags = typ | SD_WM_BUSY; 29038 if (typ & SD_WTYPE_RMW) { 29039 un->un_rmw_count++; 29040 } 29041 /* 29042 * If not already on the list then link 29043 */ 29044 if (!ONLIST(un, wmp)) { 29045 wmp->wm_next = un->un_wm; 29046 wmp->wm_prev = NULL; 29047 if (wmp->wm_next) 29048 wmp->wm_next->wm_prev = wmp; 29049 un->un_wm = wmp; 29050 } 29051 state = SD_WM_DONE; 29052 } 29053 break; 29054 29055 case SD_WM_WAIT_MAP: 29056 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY); 29057 /* 29058 * Wait is done on sl_wmp, which is set in the 29059 * check_list state. 29060 */ 29061 sl_wmp->wm_wanted_count++; 29062 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un)); 29063 sl_wmp->wm_wanted_count--; 29064 /* 29065 * We can reuse the memory from the completed sl_wmp 29066 * lock range for our new lock, but only if noone is 29067 * waiting for it. 29068 */ 29069 ASSERT(!(sl_wmp->wm_flags & SD_WM_BUSY)); 29070 if (sl_wmp->wm_wanted_count == 0) { 29071 if (wmp != NULL) 29072 CHK_N_FREEWMP(un, wmp); 29073 wmp = sl_wmp; 29074 } 29075 sl_wmp = NULL; 29076 /* 29077 * After waking up, need to recheck for availability of 29078 * range. 29079 */ 29080 state = SD_WM_CHK_LIST; 29081 break; 29082 29083 default: 29084 panic("sd_range_lock: " 29085 "Unknown state %d in sd_range_lock", state); 29086 /*NOTREACHED*/ 29087 } /* switch(state) */ 29088 29089 } /* while(state != SD_WM_DONE) */ 29090 29091 mutex_exit(SD_MUTEX(un)); 29092 29093 ASSERT(wmp != NULL); 29094 29095 return (wmp); 29096 } 29097 29098 29099 /* 29100 * Function: sd_get_range() 29101 * 29102 * Description: Find if there any overlapping I/O to this one 29103 * Returns the write-map of 1st such I/O, NULL otherwise. 29104 * 29105 * Arguments: un - sd_lun structure for the device. 29106 * startb - The starting block number 29107 * endb - The end block number 29108 * 29109 * Return Code: wm - pointer to the wmap structure. 29110 */ 29111 29112 static struct sd_w_map * 29113 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb) 29114 { 29115 struct sd_w_map *wmp; 29116 29117 ASSERT(un != NULL); 29118 29119 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) { 29120 if (!(wmp->wm_flags & SD_WM_BUSY)) { 29121 continue; 29122 } 29123 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) { 29124 break; 29125 } 29126 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) { 29127 break; 29128 } 29129 } 29130 29131 return (wmp); 29132 } 29133 29134 29135 /* 29136 * Function: sd_free_inlist_wmap() 29137 * 29138 * Description: Unlink and free a write map struct. 29139 * 29140 * Arguments: un - sd_lun structure for the device. 29141 * wmp - sd_w_map which needs to be unlinked. 29142 */ 29143 29144 static void 29145 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp) 29146 { 29147 ASSERT(un != NULL); 29148 29149 if (un->un_wm == wmp) { 29150 un->un_wm = wmp->wm_next; 29151 } else { 29152 wmp->wm_prev->wm_next = wmp->wm_next; 29153 } 29154 29155 if (wmp->wm_next) { 29156 wmp->wm_next->wm_prev = wmp->wm_prev; 29157 } 29158 29159 wmp->wm_next = wmp->wm_prev = NULL; 29160 29161 kmem_cache_free(un->un_wm_cache, wmp); 29162 } 29163 29164 29165 /* 29166 * Function: sd_range_unlock() 29167 * 29168 * Description: Unlock the range locked by wm. 29169 * Free write map if nobody else is waiting on it. 29170 * 29171 * Arguments: un - sd_lun structure for the device. 29172 * wmp - sd_w_map which needs to be unlinked. 29173 */ 29174 29175 static void 29176 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm) 29177 { 29178 ASSERT(un != NULL); 29179 ASSERT(wm != NULL); 29180 ASSERT(!mutex_owned(SD_MUTEX(un))); 29181 29182 mutex_enter(SD_MUTEX(un)); 29183 29184 if (wm->wm_flags & SD_WTYPE_RMW) { 29185 un->un_rmw_count--; 29186 } 29187 29188 if (wm->wm_wanted_count) { 29189 wm->wm_flags = 0; 29190 /* 29191 * Broadcast that the wmap is available now. 29192 */ 29193 cv_broadcast(&wm->wm_avail); 29194 } else { 29195 /* 29196 * If no one is waiting on the map, it should be free'ed. 29197 */ 29198 sd_free_inlist_wmap(un, wm); 29199 } 29200 29201 mutex_exit(SD_MUTEX(un)); 29202 } 29203 29204 29205 /* 29206 * Function: sd_read_modify_write_task 29207 * 29208 * Description: Called from a taskq thread to initiate the write phase of 29209 * a read-modify-write request. This is used for targets where 29210 * un->un_sys_blocksize != un->un_tgt_blocksize. 29211 * 29212 * Arguments: arg - a pointer to the buf(9S) struct for the write command. 29213 * 29214 * Context: Called under taskq thread context. 29215 */ 29216 29217 static void 29218 sd_read_modify_write_task(void *arg) 29219 { 29220 struct sd_mapblocksize_info *bsp; 29221 struct buf *bp; 29222 struct sd_xbuf *xp; 29223 struct sd_lun *un; 29224 29225 bp = arg; /* The bp is given in arg */ 29226 ASSERT(bp != NULL); 29227 29228 /* Get the pointer to the layer-private data struct */ 29229 xp = SD_GET_XBUF(bp); 29230 ASSERT(xp != NULL); 29231 bsp = xp->xb_private; 29232 ASSERT(bsp != NULL); 29233 29234 un = SD_GET_UN(bp); 29235 ASSERT(un != NULL); 29236 ASSERT(!mutex_owned(SD_MUTEX(un))); 29237 29238 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 29239 "sd_read_modify_write_task: entry: buf:0x%p\n", bp); 29240 29241 /* 29242 * This is the write phase of a read-modify-write request, called 29243 * under the context of a taskq thread in response to the completion 29244 * of the read portion of the rmw request completing under interrupt 29245 * context. The write request must be sent from here down the iostart 29246 * chain as if it were being sent from sd_mapblocksize_iostart(), so 29247 * we use the layer index saved in the layer-private data area. 29248 */ 29249 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp); 29250 29251 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 29252 "sd_read_modify_write_task: exit: buf:0x%p\n", bp); 29253 } 29254 29255 29256 /* 29257 * Function: sddump_do_read_of_rmw() 29258 * 29259 * Description: This routine will be called from sddump, If sddump is called 29260 * with an I/O which not aligned on device blocksize boundary 29261 * then the write has to be converted to read-modify-write. 29262 * Do the read part here in order to keep sddump simple. 29263 * Note - That the sd_mutex is held across the call to this 29264 * routine. 29265 * 29266 * Arguments: un - sd_lun 29267 * blkno - block number in terms of media block size. 29268 * nblk - number of blocks. 29269 * bpp - pointer to pointer to the buf structure. On return 29270 * from this function, *bpp points to the valid buffer 29271 * to which the write has to be done. 29272 * 29273 * Return Code: 0 for success or errno-type return code 29274 */ 29275 29276 static int 29277 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 29278 struct buf **bpp) 29279 { 29280 int err; 29281 int i; 29282 int rval; 29283 struct buf *bp; 29284 struct scsi_pkt *pkt = NULL; 29285 uint32_t target_blocksize; 29286 29287 ASSERT(un != NULL); 29288 ASSERT(mutex_owned(SD_MUTEX(un))); 29289 29290 target_blocksize = un->un_tgt_blocksize; 29291 29292 mutex_exit(SD_MUTEX(un)); 29293 29294 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL, 29295 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL); 29296 if (bp == NULL) { 29297 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29298 "no resources for dumping; giving up"); 29299 err = ENOMEM; 29300 goto done; 29301 } 29302 29303 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL, 29304 blkno, nblk); 29305 if (rval != 0) { 29306 scsi_free_consistent_buf(bp); 29307 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29308 "no resources for dumping; giving up"); 29309 err = ENOMEM; 29310 goto done; 29311 } 29312 29313 pkt->pkt_flags |= FLAG_NOINTR; 29314 29315 err = EIO; 29316 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 29317 29318 /* 29319 * Scsi_poll returns 0 (success) if the command completes and 29320 * the status block is STATUS_GOOD. We should only check 29321 * errors if this condition is not true. Even then we should 29322 * send our own request sense packet only if we have a check 29323 * condition and auto request sense has not been performed by 29324 * the hba. 29325 */ 29326 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n"); 29327 29328 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) { 29329 err = 0; 29330 break; 29331 } 29332 29333 /* 29334 * Check CMD_DEV_GONE 1st, give up if device is gone, 29335 * no need to read RQS data. 29336 */ 29337 if (pkt->pkt_reason == CMD_DEV_GONE) { 29338 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29339 "Error while dumping state with rmw..." 29340 "Device is gone\n"); 29341 break; 29342 } 29343 29344 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) { 29345 SD_INFO(SD_LOG_DUMP, un, 29346 "sddump: read failed with CHECK, try # %d\n", i); 29347 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) { 29348 (void) sd_send_polled_RQS(un); 29349 } 29350 29351 continue; 29352 } 29353 29354 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) { 29355 int reset_retval = 0; 29356 29357 SD_INFO(SD_LOG_DUMP, un, 29358 "sddump: read failed with BUSY, try # %d\n", i); 29359 29360 if (un->un_f_lun_reset_enabled == TRUE) { 29361 reset_retval = scsi_reset(SD_ADDRESS(un), 29362 RESET_LUN); 29363 } 29364 if (reset_retval == 0) { 29365 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 29366 } 29367 (void) sd_send_polled_RQS(un); 29368 29369 } else { 29370 SD_INFO(SD_LOG_DUMP, un, 29371 "sddump: read failed with 0x%x, try # %d\n", 29372 SD_GET_PKT_STATUS(pkt), i); 29373 mutex_enter(SD_MUTEX(un)); 29374 sd_reset_target(un, pkt); 29375 mutex_exit(SD_MUTEX(un)); 29376 } 29377 29378 /* 29379 * If we are not getting anywhere with lun/target resets, 29380 * let's reset the bus. 29381 */ 29382 if (i > SD_NDUMP_RETRIES/2) { 29383 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 29384 (void) sd_send_polled_RQS(un); 29385 } 29386 29387 } 29388 scsi_destroy_pkt(pkt); 29389 29390 if (err != 0) { 29391 scsi_free_consistent_buf(bp); 29392 *bpp = NULL; 29393 } else { 29394 *bpp = bp; 29395 } 29396 29397 done: 29398 mutex_enter(SD_MUTEX(un)); 29399 return (err); 29400 } 29401 29402 29403 /* 29404 * Function: sd_failfast_flushq 29405 * 29406 * Description: Take all bp's on the wait queue that have B_FAILFAST set 29407 * in b_flags and move them onto the failfast queue, then kick 29408 * off a thread to return all bp's on the failfast queue to 29409 * their owners with an error set. 29410 * 29411 * Arguments: un - pointer to the soft state struct for the instance. 29412 * 29413 * Context: may execute in interrupt context. 29414 */ 29415 29416 static void 29417 sd_failfast_flushq(struct sd_lun *un) 29418 { 29419 struct buf *bp; 29420 struct buf *next_waitq_bp; 29421 struct buf *prev_waitq_bp = NULL; 29422 29423 ASSERT(un != NULL); 29424 ASSERT(mutex_owned(SD_MUTEX(un))); 29425 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE); 29426 ASSERT(un->un_failfast_bp == NULL); 29427 29428 SD_TRACE(SD_LOG_IO_FAILFAST, un, 29429 "sd_failfast_flushq: entry: un:0x%p\n", un); 29430 29431 /* 29432 * Check if we should flush all bufs when entering failfast state, or 29433 * just those with B_FAILFAST set. 29434 */ 29435 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) { 29436 /* 29437 * Move *all* bp's on the wait queue to the failfast flush 29438 * queue, including those that do NOT have B_FAILFAST set. 29439 */ 29440 if (un->un_failfast_headp == NULL) { 29441 ASSERT(un->un_failfast_tailp == NULL); 29442 un->un_failfast_headp = un->un_waitq_headp; 29443 } else { 29444 ASSERT(un->un_failfast_tailp != NULL); 29445 un->un_failfast_tailp->av_forw = un->un_waitq_headp; 29446 } 29447 29448 un->un_failfast_tailp = un->un_waitq_tailp; 29449 29450 /* update kstat for each bp moved out of the waitq */ 29451 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) { 29452 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 29453 } 29454 29455 /* empty the waitq */ 29456 un->un_waitq_headp = un->un_waitq_tailp = NULL; 29457 29458 } else { 29459 /* 29460 * Go thru the wait queue, pick off all entries with 29461 * B_FAILFAST set, and move these onto the failfast queue. 29462 */ 29463 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) { 29464 /* 29465 * Save the pointer to the next bp on the wait queue, 29466 * so we get to it on the next iteration of this loop. 29467 */ 29468 next_waitq_bp = bp->av_forw; 29469 29470 /* 29471 * If this bp from the wait queue does NOT have 29472 * B_FAILFAST set, just move on to the next element 29473 * in the wait queue. Note, this is the only place 29474 * where it is correct to set prev_waitq_bp. 29475 */ 29476 if ((bp->b_flags & B_FAILFAST) == 0) { 29477 prev_waitq_bp = bp; 29478 continue; 29479 } 29480 29481 /* 29482 * Remove the bp from the wait queue. 29483 */ 29484 if (bp == un->un_waitq_headp) { 29485 /* The bp is the first element of the waitq. */ 29486 un->un_waitq_headp = next_waitq_bp; 29487 if (un->un_waitq_headp == NULL) { 29488 /* The wait queue is now empty */ 29489 un->un_waitq_tailp = NULL; 29490 } 29491 } else { 29492 /* 29493 * The bp is either somewhere in the middle 29494 * or at the end of the wait queue. 29495 */ 29496 ASSERT(un->un_waitq_headp != NULL); 29497 ASSERT(prev_waitq_bp != NULL); 29498 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST) 29499 == 0); 29500 if (bp == un->un_waitq_tailp) { 29501 /* bp is the last entry on the waitq. */ 29502 ASSERT(next_waitq_bp == NULL); 29503 un->un_waitq_tailp = prev_waitq_bp; 29504 } 29505 prev_waitq_bp->av_forw = next_waitq_bp; 29506 } 29507 bp->av_forw = NULL; 29508 29509 /* 29510 * update kstat since the bp is moved out of 29511 * the waitq 29512 */ 29513 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 29514 29515 /* 29516 * Now put the bp onto the failfast queue. 29517 */ 29518 if (un->un_failfast_headp == NULL) { 29519 /* failfast queue is currently empty */ 29520 ASSERT(un->un_failfast_tailp == NULL); 29521 un->un_failfast_headp = 29522 un->un_failfast_tailp = bp; 29523 } else { 29524 /* Add the bp to the end of the failfast q */ 29525 ASSERT(un->un_failfast_tailp != NULL); 29526 ASSERT(un->un_failfast_tailp->b_flags & 29527 B_FAILFAST); 29528 un->un_failfast_tailp->av_forw = bp; 29529 un->un_failfast_tailp = bp; 29530 } 29531 } 29532 } 29533 29534 /* 29535 * Now return all bp's on the failfast queue to their owners. 29536 */ 29537 while ((bp = un->un_failfast_headp) != NULL) { 29538 29539 un->un_failfast_headp = bp->av_forw; 29540 if (un->un_failfast_headp == NULL) { 29541 un->un_failfast_tailp = NULL; 29542 } 29543 29544 /* 29545 * We want to return the bp with a failure error code, but 29546 * we do not want a call to sd_start_cmds() to occur here, 29547 * so use sd_return_failed_command_no_restart() instead of 29548 * sd_return_failed_command(). 29549 */ 29550 sd_return_failed_command_no_restart(un, bp, EIO); 29551 } 29552 29553 /* Flush the xbuf queues if required. */ 29554 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) { 29555 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback); 29556 } 29557 29558 SD_TRACE(SD_LOG_IO_FAILFAST, un, 29559 "sd_failfast_flushq: exit: un:0x%p\n", un); 29560 } 29561 29562 29563 /* 29564 * Function: sd_failfast_flushq_callback 29565 * 29566 * Description: Return TRUE if the given bp meets the criteria for failfast 29567 * flushing. Used with ddi_xbuf_flushq(9F). 29568 * 29569 * Arguments: bp - ptr to buf struct to be examined. 29570 * 29571 * Context: Any 29572 */ 29573 29574 static int 29575 sd_failfast_flushq_callback(struct buf *bp) 29576 { 29577 /* 29578 * Return TRUE if (1) we want to flush ALL bufs when the failfast 29579 * state is entered; OR (2) the given bp has B_FAILFAST set. 29580 */ 29581 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) || 29582 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE); 29583 } 29584 29585 29586 29587 /* 29588 * Function: sd_setup_next_xfer 29589 * 29590 * Description: Prepare next I/O operation using DMA_PARTIAL 29591 * 29592 */ 29593 29594 static int 29595 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 29596 struct scsi_pkt *pkt, struct sd_xbuf *xp) 29597 { 29598 ssize_t num_blks_not_xfered; 29599 daddr_t strt_blk_num; 29600 ssize_t bytes_not_xfered; 29601 int rval; 29602 29603 ASSERT(pkt->pkt_resid == 0); 29604 29605 /* 29606 * Calculate next block number and amount to be transferred. 29607 * 29608 * How much data NOT transfered to the HBA yet. 29609 */ 29610 bytes_not_xfered = xp->xb_dma_resid; 29611 29612 /* 29613 * figure how many blocks NOT transfered to the HBA yet. 29614 */ 29615 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered); 29616 29617 /* 29618 * set starting block number to the end of what WAS transfered. 29619 */ 29620 strt_blk_num = xp->xb_blkno + 29621 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered); 29622 29623 /* 29624 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt 29625 * will call scsi_initpkt with NULL_FUNC so we do not have to release 29626 * the disk mutex here. 29627 */ 29628 rval = sd_setup_next_rw_pkt(un, pkt, bp, 29629 strt_blk_num, num_blks_not_xfered); 29630 29631 if (rval == 0) { 29632 29633 /* 29634 * Success. 29635 * 29636 * Adjust things if there are still more blocks to be 29637 * transfered. 29638 */ 29639 xp->xb_dma_resid = pkt->pkt_resid; 29640 pkt->pkt_resid = 0; 29641 29642 return (1); 29643 } 29644 29645 /* 29646 * There's really only one possible return value from 29647 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt 29648 * returns NULL. 29649 */ 29650 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 29651 29652 bp->b_resid = bp->b_bcount; 29653 bp->b_flags |= B_ERROR; 29654 29655 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29656 "Error setting up next portion of DMA transfer\n"); 29657 29658 return (0); 29659 } 29660 29661 /* 29662 * Function: sd_panic_for_res_conflict 29663 * 29664 * Description: Call panic with a string formatted with "Reservation Conflict" 29665 * and a human readable identifier indicating the SD instance 29666 * that experienced the reservation conflict. 29667 * 29668 * Arguments: un - pointer to the soft state struct for the instance. 29669 * 29670 * Context: may execute in interrupt context. 29671 */ 29672 29673 #define SD_RESV_CONFLICT_FMT_LEN 40 29674 void 29675 sd_panic_for_res_conflict(struct sd_lun *un) 29676 { 29677 char panic_str[SD_RESV_CONFLICT_FMT_LEN+MAXPATHLEN]; 29678 char path_str[MAXPATHLEN]; 29679 29680 (void) snprintf(panic_str, sizeof (panic_str), 29681 "Reservation Conflict\nDisk: %s", 29682 ddi_pathname(SD_DEVINFO(un), path_str)); 29683 29684 panic(panic_str); 29685 } 29686 29687 /* 29688 * Note: The following sd_faultinjection_ioctl( ) routines implement 29689 * driver support for handling fault injection for error analysis 29690 * causing faults in multiple layers of the driver. 29691 * 29692 */ 29693 29694 #ifdef SD_FAULT_INJECTION 29695 static uint_t sd_fault_injection_on = 0; 29696 29697 /* 29698 * Function: sd_faultinjection_ioctl() 29699 * 29700 * Description: This routine is the driver entry point for handling 29701 * faultinjection ioctls to inject errors into the 29702 * layer model 29703 * 29704 * Arguments: cmd - the ioctl cmd received 29705 * arg - the arguments from user and returns 29706 */ 29707 29708 static void 29709 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) { 29710 29711 uint_t i = 0; 29712 uint_t rval; 29713 29714 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); 29715 29716 mutex_enter(SD_MUTEX(un)); 29717 29718 switch (cmd) { 29719 case SDIOCRUN: 29720 /* Allow pushed faults to be injected */ 29721 SD_INFO(SD_LOG_SDTEST, un, 29722 "sd_faultinjection_ioctl: Injecting Fault Run\n"); 29723 29724 sd_fault_injection_on = 1; 29725 29726 SD_INFO(SD_LOG_IOERR, un, 29727 "sd_faultinjection_ioctl: run finished\n"); 29728 break; 29729 29730 case SDIOCSTART: 29731 /* Start Injection Session */ 29732 SD_INFO(SD_LOG_SDTEST, un, 29733 "sd_faultinjection_ioctl: Injecting Fault Start\n"); 29734 29735 sd_fault_injection_on = 0; 29736 un->sd_injection_mask = 0xFFFFFFFF; 29737 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 29738 un->sd_fi_fifo_pkt[i] = NULL; 29739 un->sd_fi_fifo_xb[i] = NULL; 29740 un->sd_fi_fifo_un[i] = NULL; 29741 un->sd_fi_fifo_arq[i] = NULL; 29742 } 29743 un->sd_fi_fifo_start = 0; 29744 un->sd_fi_fifo_end = 0; 29745 29746 mutex_enter(&(un->un_fi_mutex)); 29747 un->sd_fi_log[0] = '\0'; 29748 un->sd_fi_buf_len = 0; 29749 mutex_exit(&(un->un_fi_mutex)); 29750 29751 SD_INFO(SD_LOG_IOERR, un, 29752 "sd_faultinjection_ioctl: start finished\n"); 29753 break; 29754 29755 case SDIOCSTOP: 29756 /* Stop Injection Session */ 29757 SD_INFO(SD_LOG_SDTEST, un, 29758 "sd_faultinjection_ioctl: Injecting Fault Stop\n"); 29759 sd_fault_injection_on = 0; 29760 un->sd_injection_mask = 0x0; 29761 29762 /* Empty stray or unuseds structs from fifo */ 29763 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 29764 if (un->sd_fi_fifo_pkt[i] != NULL) { 29765 kmem_free(un->sd_fi_fifo_pkt[i], 29766 sizeof (struct sd_fi_pkt)); 29767 } 29768 if (un->sd_fi_fifo_xb[i] != NULL) { 29769 kmem_free(un->sd_fi_fifo_xb[i], 29770 sizeof (struct sd_fi_xb)); 29771 } 29772 if (un->sd_fi_fifo_un[i] != NULL) { 29773 kmem_free(un->sd_fi_fifo_un[i], 29774 sizeof (struct sd_fi_un)); 29775 } 29776 if (un->sd_fi_fifo_arq[i] != NULL) { 29777 kmem_free(un->sd_fi_fifo_arq[i], 29778 sizeof (struct sd_fi_arq)); 29779 } 29780 un->sd_fi_fifo_pkt[i] = NULL; 29781 un->sd_fi_fifo_un[i] = NULL; 29782 un->sd_fi_fifo_xb[i] = NULL; 29783 un->sd_fi_fifo_arq[i] = NULL; 29784 } 29785 un->sd_fi_fifo_start = 0; 29786 un->sd_fi_fifo_end = 0; 29787 29788 SD_INFO(SD_LOG_IOERR, un, 29789 "sd_faultinjection_ioctl: stop finished\n"); 29790 break; 29791 29792 case SDIOCINSERTPKT: 29793 /* Store a packet struct to be pushed onto fifo */ 29794 SD_INFO(SD_LOG_SDTEST, un, 29795 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n"); 29796 29797 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29798 29799 sd_fault_injection_on = 0; 29800 29801 /* No more that SD_FI_MAX_ERROR allowed in Queue */ 29802 if (un->sd_fi_fifo_pkt[i] != NULL) { 29803 kmem_free(un->sd_fi_fifo_pkt[i], 29804 sizeof (struct sd_fi_pkt)); 29805 } 29806 if (arg != NULL) { 29807 un->sd_fi_fifo_pkt[i] = 29808 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP); 29809 if (un->sd_fi_fifo_pkt[i] == NULL) { 29810 /* Alloc failed don't store anything */ 29811 break; 29812 } 29813 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i], 29814 sizeof (struct sd_fi_pkt), 0); 29815 if (rval == -1) { 29816 kmem_free(un->sd_fi_fifo_pkt[i], 29817 sizeof (struct sd_fi_pkt)); 29818 un->sd_fi_fifo_pkt[i] = NULL; 29819 } 29820 } else { 29821 SD_INFO(SD_LOG_IOERR, un, 29822 "sd_faultinjection_ioctl: pkt null\n"); 29823 } 29824 break; 29825 29826 case SDIOCINSERTXB: 29827 /* Store a xb struct to be pushed onto fifo */ 29828 SD_INFO(SD_LOG_SDTEST, un, 29829 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n"); 29830 29831 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29832 29833 sd_fault_injection_on = 0; 29834 29835 if (un->sd_fi_fifo_xb[i] != NULL) { 29836 kmem_free(un->sd_fi_fifo_xb[i], 29837 sizeof (struct sd_fi_xb)); 29838 un->sd_fi_fifo_xb[i] = NULL; 29839 } 29840 if (arg != NULL) { 29841 un->sd_fi_fifo_xb[i] = 29842 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP); 29843 if (un->sd_fi_fifo_xb[i] == NULL) { 29844 /* Alloc failed don't store anything */ 29845 break; 29846 } 29847 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i], 29848 sizeof (struct sd_fi_xb), 0); 29849 29850 if (rval == -1) { 29851 kmem_free(un->sd_fi_fifo_xb[i], 29852 sizeof (struct sd_fi_xb)); 29853 un->sd_fi_fifo_xb[i] = NULL; 29854 } 29855 } else { 29856 SD_INFO(SD_LOG_IOERR, un, 29857 "sd_faultinjection_ioctl: xb null\n"); 29858 } 29859 break; 29860 29861 case SDIOCINSERTUN: 29862 /* Store a un struct to be pushed onto fifo */ 29863 SD_INFO(SD_LOG_SDTEST, un, 29864 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n"); 29865 29866 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29867 29868 sd_fault_injection_on = 0; 29869 29870 if (un->sd_fi_fifo_un[i] != NULL) { 29871 kmem_free(un->sd_fi_fifo_un[i], 29872 sizeof (struct sd_fi_un)); 29873 un->sd_fi_fifo_un[i] = NULL; 29874 } 29875 if (arg != NULL) { 29876 un->sd_fi_fifo_un[i] = 29877 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP); 29878 if (un->sd_fi_fifo_un[i] == NULL) { 29879 /* Alloc failed don't store anything */ 29880 break; 29881 } 29882 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i], 29883 sizeof (struct sd_fi_un), 0); 29884 if (rval == -1) { 29885 kmem_free(un->sd_fi_fifo_un[i], 29886 sizeof (struct sd_fi_un)); 29887 un->sd_fi_fifo_un[i] = NULL; 29888 } 29889 29890 } else { 29891 SD_INFO(SD_LOG_IOERR, un, 29892 "sd_faultinjection_ioctl: un null\n"); 29893 } 29894 29895 break; 29896 29897 case SDIOCINSERTARQ: 29898 /* Store a arq struct to be pushed onto fifo */ 29899 SD_INFO(SD_LOG_SDTEST, un, 29900 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n"); 29901 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29902 29903 sd_fault_injection_on = 0; 29904 29905 if (un->sd_fi_fifo_arq[i] != NULL) { 29906 kmem_free(un->sd_fi_fifo_arq[i], 29907 sizeof (struct sd_fi_arq)); 29908 un->sd_fi_fifo_arq[i] = NULL; 29909 } 29910 if (arg != NULL) { 29911 un->sd_fi_fifo_arq[i] = 29912 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP); 29913 if (un->sd_fi_fifo_arq[i] == NULL) { 29914 /* Alloc failed don't store anything */ 29915 break; 29916 } 29917 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i], 29918 sizeof (struct sd_fi_arq), 0); 29919 if (rval == -1) { 29920 kmem_free(un->sd_fi_fifo_arq[i], 29921 sizeof (struct sd_fi_arq)); 29922 un->sd_fi_fifo_arq[i] = NULL; 29923 } 29924 29925 } else { 29926 SD_INFO(SD_LOG_IOERR, un, 29927 "sd_faultinjection_ioctl: arq null\n"); 29928 } 29929 29930 break; 29931 29932 case SDIOCPUSH: 29933 /* Push stored xb, pkt, un, and arq onto fifo */ 29934 sd_fault_injection_on = 0; 29935 29936 if (arg != NULL) { 29937 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); 29938 if (rval != -1 && 29939 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 29940 un->sd_fi_fifo_end += i; 29941 } 29942 } else { 29943 SD_INFO(SD_LOG_IOERR, un, 29944 "sd_faultinjection_ioctl: push arg null\n"); 29945 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 29946 un->sd_fi_fifo_end++; 29947 } 29948 } 29949 SD_INFO(SD_LOG_IOERR, un, 29950 "sd_faultinjection_ioctl: push to end=%d\n", 29951 un->sd_fi_fifo_end); 29952 break; 29953 29954 case SDIOCRETRIEVE: 29955 /* Return buffer of log from Injection session */ 29956 SD_INFO(SD_LOG_SDTEST, un, 29957 "sd_faultinjection_ioctl: Injecting Fault Retreive"); 29958 29959 sd_fault_injection_on = 0; 29960 29961 mutex_enter(&(un->un_fi_mutex)); 29962 rval = ddi_copyout(un->sd_fi_log, (void *)arg, 29963 un->sd_fi_buf_len+1, 0); 29964 mutex_exit(&(un->un_fi_mutex)); 29965 29966 if (rval == -1) { 29967 /* 29968 * arg is possibly invalid setting 29969 * it to NULL for return 29970 */ 29971 arg = NULL; 29972 } 29973 break; 29974 } 29975 29976 mutex_exit(SD_MUTEX(un)); 29977 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl:" 29978 " exit\n"); 29979 } 29980 29981 29982 /* 29983 * Function: sd_injection_log() 29984 * 29985 * Description: This routine adds buff to the already existing injection log 29986 * for retrieval via faultinjection_ioctl for use in fault 29987 * detection and recovery 29988 * 29989 * Arguments: buf - the string to add to the log 29990 */ 29991 29992 static void 29993 sd_injection_log(char *buf, struct sd_lun *un) 29994 { 29995 uint_t len; 29996 29997 ASSERT(un != NULL); 29998 ASSERT(buf != NULL); 29999 30000 mutex_enter(&(un->un_fi_mutex)); 30001 30002 len = min(strlen(buf), 255); 30003 /* Add logged value to Injection log to be returned later */ 30004 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) { 30005 uint_t offset = strlen((char *)un->sd_fi_log); 30006 char *destp = (char *)un->sd_fi_log + offset; 30007 int i; 30008 for (i = 0; i < len; i++) { 30009 *destp++ = *buf++; 30010 } 30011 un->sd_fi_buf_len += len; 30012 un->sd_fi_log[un->sd_fi_buf_len] = '\0'; 30013 } 30014 30015 mutex_exit(&(un->un_fi_mutex)); 30016 } 30017 30018 30019 /* 30020 * Function: sd_faultinjection() 30021 * 30022 * Description: This routine takes the pkt and changes its 30023 * content based on error injection scenerio. 30024 * 30025 * Arguments: pktp - packet to be changed 30026 */ 30027 30028 static void 30029 sd_faultinjection(struct scsi_pkt *pktp) 30030 { 30031 uint_t i; 30032 struct sd_fi_pkt *fi_pkt; 30033 struct sd_fi_xb *fi_xb; 30034 struct sd_fi_un *fi_un; 30035 struct sd_fi_arq *fi_arq; 30036 struct buf *bp; 30037 struct sd_xbuf *xb; 30038 struct sd_lun *un; 30039 30040 ASSERT(pktp != NULL); 30041 30042 /* pull bp xb and un from pktp */ 30043 bp = (struct buf *)pktp->pkt_private; 30044 xb = SD_GET_XBUF(bp); 30045 un = SD_GET_UN(bp); 30046 30047 ASSERT(un != NULL); 30048 30049 mutex_enter(SD_MUTEX(un)); 30050 30051 SD_TRACE(SD_LOG_SDTEST, un, 30052 "sd_faultinjection: entry Injection from sdintr\n"); 30053 30054 /* if injection is off return */ 30055 if (sd_fault_injection_on == 0 || 30056 un->sd_fi_fifo_start == un->sd_fi_fifo_end) { 30057 mutex_exit(SD_MUTEX(un)); 30058 return; 30059 } 30060 30061 SD_INFO(SD_LOG_SDTEST, un, 30062 "sd_faultinjection: is working for copying\n"); 30063 30064 /* take next set off fifo */ 30065 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR; 30066 30067 fi_pkt = un->sd_fi_fifo_pkt[i]; 30068 fi_xb = un->sd_fi_fifo_xb[i]; 30069 fi_un = un->sd_fi_fifo_un[i]; 30070 fi_arq = un->sd_fi_fifo_arq[i]; 30071 30072 30073 /* set variables accordingly */ 30074 /* set pkt if it was on fifo */ 30075 if (fi_pkt != NULL) { 30076 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags"); 30077 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp"); 30078 if (fi_pkt->pkt_cdbp != 0xff) 30079 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp"); 30080 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state"); 30081 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics"); 30082 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason"); 30083 30084 } 30085 /* set xb if it was on fifo */ 30086 if (fi_xb != NULL) { 30087 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno"); 30088 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid"); 30089 if (fi_xb->xb_retry_count != 0) 30090 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count"); 30091 SD_CONDSET(xb, xb, xb_victim_retry_count, 30092 "xb_victim_retry_count"); 30093 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status"); 30094 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state"); 30095 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid"); 30096 30097 /* copy in block data from sense */ 30098 /* 30099 * if (fi_xb->xb_sense_data[0] != -1) { 30100 * bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, 30101 * SENSE_LENGTH); 30102 * } 30103 */ 30104 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, SENSE_LENGTH); 30105 30106 /* copy in extended sense codes */ 30107 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 30108 xb, es_code, "es_code"); 30109 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 30110 xb, es_key, "es_key"); 30111 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 30112 xb, es_add_code, "es_add_code"); 30113 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 30114 xb, es_qual_code, "es_qual_code"); 30115 struct scsi_extended_sense *esp; 30116 esp = (struct scsi_extended_sense *)xb->xb_sense_data; 30117 esp->es_class = CLASS_EXTENDED_SENSE; 30118 } 30119 30120 /* set un if it was on fifo */ 30121 if (fi_un != NULL) { 30122 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb"); 30123 SD_CONDSET(un, un, un_ctype, "un_ctype"); 30124 SD_CONDSET(un, un, un_reset_retry_count, 30125 "un_reset_retry_count"); 30126 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type"); 30127 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status"); 30128 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled"); 30129 SD_CONDSET(un, un, un_f_allow_bus_device_reset, 30130 "un_f_allow_bus_device_reset"); 30131 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing"); 30132 30133 } 30134 30135 /* copy in auto request sense if it was on fifo */ 30136 if (fi_arq != NULL) { 30137 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq)); 30138 } 30139 30140 /* free structs */ 30141 if (un->sd_fi_fifo_pkt[i] != NULL) { 30142 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt)); 30143 } 30144 if (un->sd_fi_fifo_xb[i] != NULL) { 30145 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb)); 30146 } 30147 if (un->sd_fi_fifo_un[i] != NULL) { 30148 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un)); 30149 } 30150 if (un->sd_fi_fifo_arq[i] != NULL) { 30151 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); 30152 } 30153 30154 /* 30155 * kmem_free does not gurantee to set to NULL 30156 * since we uses these to determine if we set 30157 * values or not lets confirm they are always 30158 * NULL after free 30159 */ 30160 un->sd_fi_fifo_pkt[i] = NULL; 30161 un->sd_fi_fifo_un[i] = NULL; 30162 un->sd_fi_fifo_xb[i] = NULL; 30163 un->sd_fi_fifo_arq[i] = NULL; 30164 30165 un->sd_fi_fifo_start++; 30166 30167 mutex_exit(SD_MUTEX(un)); 30168 30169 SD_INFO(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n"); 30170 } 30171 30172 #endif /* SD_FAULT_INJECTION */ 30173 30174 /* 30175 * This routine is invoked in sd_unit_attach(). Before calling it, the 30176 * properties in conf file should be processed already, and "hotpluggable" 30177 * property was processed also. 30178 * 30179 * The sd driver distinguishes 3 different type of devices: removable media, 30180 * non-removable media, and hotpluggable. Below the differences are defined: 30181 * 30182 * 1. Device ID 30183 * 30184 * The device ID of a device is used to identify this device. Refer to 30185 * ddi_devid_register(9F). 30186 * 30187 * For a non-removable media disk device which can provide 0x80 or 0x83 30188 * VPD page (refer to INQUIRY command of SCSI SPC specification), a unique 30189 * device ID is created to identify this device. For other non-removable 30190 * media devices, a default device ID is created only if this device has 30191 * at least 2 alter cylinders. Otherwise, this device has no devid. 30192 * 30193 * ------------------------------------------------------- 30194 * removable media hotpluggable | Can Have Device ID 30195 * ------------------------------------------------------- 30196 * false false | Yes 30197 * false true | Yes 30198 * true x | No 30199 * ------------------------------------------------------ 30200 * 30201 * 30202 * 2. SCSI group 4 commands 30203 * 30204 * In SCSI specs, only some commands in group 4 command set can use 30205 * 8-byte addresses that can be used to access >2TB storage spaces. 30206 * Other commands have no such capability. Without supporting group4, 30207 * it is impossible to make full use of storage spaces of a disk with 30208 * capacity larger than 2TB. 30209 * 30210 * ----------------------------------------------- 30211 * removable media hotpluggable LP64 | Group 30212 * ----------------------------------------------- 30213 * false false false | 1 30214 * false false true | 4 30215 * false true false | 1 30216 * false true true | 4 30217 * true x x | 5 30218 * ----------------------------------------------- 30219 * 30220 * 30221 * 3. Check for VTOC Label 30222 * 30223 * If a direct-access disk has no EFI label, sd will check if it has a 30224 * valid VTOC label. Now, sd also does that check for removable media 30225 * and hotpluggable devices. 30226 * 30227 * -------------------------------------------------------------- 30228 * Direct-Access removable media hotpluggable | Check Label 30229 * ------------------------------------------------------------- 30230 * false false false | No 30231 * false false true | No 30232 * false true false | Yes 30233 * false true true | Yes 30234 * true x x | Yes 30235 * -------------------------------------------------------------- 30236 * 30237 * 30238 * 4. Building default VTOC label 30239 * 30240 * As section 3 says, sd checks if some kinds of devices have VTOC label. 30241 * If those devices have no valid VTOC label, sd(7d) will attempt to 30242 * create default VTOC for them. Currently sd creates default VTOC label 30243 * for all devices on x86 platform (VTOC_16), but only for removable 30244 * media devices on SPARC (VTOC_8). 30245 * 30246 * ----------------------------------------------------------- 30247 * removable media hotpluggable platform | Default Label 30248 * ----------------------------------------------------------- 30249 * false false sparc | No 30250 * false true x86 | Yes 30251 * false true sparc | Yes 30252 * true x x | Yes 30253 * ---------------------------------------------------------- 30254 * 30255 * 30256 * 5. Supported blocksizes of target devices 30257 * 30258 * Sd supports non-512-byte blocksize for removable media devices only. 30259 * For other devices, only 512-byte blocksize is supported. This may be 30260 * changed in near future because some RAID devices require non-512-byte 30261 * blocksize 30262 * 30263 * ----------------------------------------------------------- 30264 * removable media hotpluggable | non-512-byte blocksize 30265 * ----------------------------------------------------------- 30266 * false false | No 30267 * false true | No 30268 * true x | Yes 30269 * ----------------------------------------------------------- 30270 * 30271 * 30272 * 6. Automatic mount & unmount 30273 * 30274 * Sd(7d) driver provides DKIOCREMOVABLE ioctl. This ioctl is used to query 30275 * if a device is removable media device. It return 1 for removable media 30276 * devices, and 0 for others. 30277 * 30278 * The automatic mounting subsystem should distinguish between the types 30279 * of devices and apply automounting policies to each. 30280 * 30281 * 30282 * 7. fdisk partition management 30283 * 30284 * Fdisk is traditional partition method on x86 platform. Sd(7d) driver 30285 * just supports fdisk partitions on x86 platform. On sparc platform, sd 30286 * doesn't support fdisk partitions at all. Note: pcfs(7fs) can recognize 30287 * fdisk partitions on both x86 and SPARC platform. 30288 * 30289 * ----------------------------------------------------------- 30290 * platform removable media USB/1394 | fdisk supported 30291 * ----------------------------------------------------------- 30292 * x86 X X | true 30293 * ------------------------------------------------------------ 30294 * sparc X X | false 30295 * ------------------------------------------------------------ 30296 * 30297 * 30298 * 8. MBOOT/MBR 30299 * 30300 * Although sd(7d) doesn't support fdisk on SPARC platform, it does support 30301 * read/write mboot for removable media devices on sparc platform. 30302 * 30303 * ----------------------------------------------------------- 30304 * platform removable media USB/1394 | mboot supported 30305 * ----------------------------------------------------------- 30306 * x86 X X | true 30307 * ------------------------------------------------------------ 30308 * sparc false false | false 30309 * sparc false true | true 30310 * sparc true false | true 30311 * sparc true true | true 30312 * ------------------------------------------------------------ 30313 * 30314 * 30315 * 9. error handling during opening device 30316 * 30317 * If failed to open a disk device, an errno is returned. For some kinds 30318 * of errors, different errno is returned depending on if this device is 30319 * a removable media device. This brings USB/1394 hard disks in line with 30320 * expected hard disk behavior. It is not expected that this breaks any 30321 * application. 30322 * 30323 * ------------------------------------------------------ 30324 * removable media hotpluggable | errno 30325 * ------------------------------------------------------ 30326 * false false | EIO 30327 * false true | EIO 30328 * true x | ENXIO 30329 * ------------------------------------------------------ 30330 * 30331 * 30332 * 11. ioctls: DKIOCEJECT, CDROMEJECT 30333 * 30334 * These IOCTLs are applicable only to removable media devices. 30335 * 30336 * ----------------------------------------------------------- 30337 * removable media hotpluggable |DKIOCEJECT, CDROMEJECT 30338 * ----------------------------------------------------------- 30339 * false false | No 30340 * false true | No 30341 * true x | Yes 30342 * ----------------------------------------------------------- 30343 * 30344 * 30345 * 12. Kstats for partitions 30346 * 30347 * sd creates partition kstat for non-removable media devices. USB and 30348 * Firewire hard disks now have partition kstats 30349 * 30350 * ------------------------------------------------------ 30351 * removable media hotpluggable | kstat 30352 * ------------------------------------------------------ 30353 * false false | Yes 30354 * false true | Yes 30355 * true x | No 30356 * ------------------------------------------------------ 30357 * 30358 * 30359 * 13. Removable media & hotpluggable properties 30360 * 30361 * Sd driver creates a "removable-media" property for removable media 30362 * devices. Parent nexus drivers create a "hotpluggable" property if 30363 * it supports hotplugging. 30364 * 30365 * --------------------------------------------------------------------- 30366 * removable media hotpluggable | "removable-media" " hotpluggable" 30367 * --------------------------------------------------------------------- 30368 * false false | No No 30369 * false true | No Yes 30370 * true false | Yes No 30371 * true true | Yes Yes 30372 * --------------------------------------------------------------------- 30373 * 30374 * 30375 * 14. Power Management 30376 * 30377 * sd only power manages removable media devices or devices that support 30378 * LOG_SENSE or have a "pm-capable" property (PSARC/2002/250) 30379 * 30380 * A parent nexus that supports hotplugging can also set "pm-capable" 30381 * if the disk can be power managed. 30382 * 30383 * ------------------------------------------------------------ 30384 * removable media hotpluggable pm-capable | power manage 30385 * ------------------------------------------------------------ 30386 * false false false | No 30387 * false false true | Yes 30388 * false true false | No 30389 * false true true | Yes 30390 * true x x | Yes 30391 * ------------------------------------------------------------ 30392 * 30393 * USB and firewire hard disks can now be power managed independently 30394 * of the framebuffer 30395 * 30396 * 30397 * 15. Support for USB disks with capacity larger than 1TB 30398 * 30399 * Currently, sd doesn't permit a fixed disk device with capacity 30400 * larger than 1TB to be used in a 32-bit operating system environment. 30401 * However, sd doesn't do that for removable media devices. Instead, it 30402 * assumes that removable media devices cannot have a capacity larger 30403 * than 1TB. Therefore, using those devices on 32-bit system is partially 30404 * supported, which can cause some unexpected results. 30405 * 30406 * --------------------------------------------------------------------- 30407 * removable media USB/1394 | Capacity > 1TB | Used in 32-bit env 30408 * --------------------------------------------------------------------- 30409 * false false | true | no 30410 * false true | true | no 30411 * true false | true | Yes 30412 * true true | true | Yes 30413 * --------------------------------------------------------------------- 30414 * 30415 * 30416 * 16. Check write-protection at open time 30417 * 30418 * When a removable media device is being opened for writing without NDELAY 30419 * flag, sd will check if this device is writable. If attempting to open 30420 * without NDELAY flag a write-protected device, this operation will abort. 30421 * 30422 * ------------------------------------------------------------ 30423 * removable media USB/1394 | WP Check 30424 * ------------------------------------------------------------ 30425 * false false | No 30426 * false true | No 30427 * true false | Yes 30428 * true true | Yes 30429 * ------------------------------------------------------------ 30430 * 30431 * 30432 * 17. syslog when corrupted VTOC is encountered 30433 * 30434 * Currently, if an invalid VTOC is encountered, sd only print syslog 30435 * for fixed SCSI disks. 30436 * ------------------------------------------------------------ 30437 * removable media USB/1394 | print syslog 30438 * ------------------------------------------------------------ 30439 * false false | Yes 30440 * false true | No 30441 * true false | No 30442 * true true | No 30443 * ------------------------------------------------------------ 30444 */ 30445 static void 30446 sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi) 30447 { 30448 int pm_cap; 30449 30450 ASSERT(un->un_sd); 30451 ASSERT(un->un_sd->sd_inq); 30452 30453 /* 30454 * Enable SYNC CACHE support for all devices. 30455 */ 30456 un->un_f_sync_cache_supported = TRUE; 30457 30458 /* 30459 * Set the sync cache required flag to false. 30460 * This would ensure that there is no SYNC CACHE 30461 * sent when there are no writes 30462 */ 30463 un->un_f_sync_cache_required = FALSE; 30464 30465 if (un->un_sd->sd_inq->inq_rmb) { 30466 /* 30467 * The media of this device is removable. And for this kind 30468 * of devices, it is possible to change medium after opening 30469 * devices. Thus we should support this operation. 30470 */ 30471 un->un_f_has_removable_media = TRUE; 30472 30473 /* 30474 * support non-512-byte blocksize of removable media devices 30475 */ 30476 un->un_f_non_devbsize_supported = TRUE; 30477 30478 /* 30479 * Assume that all removable media devices support DOOR_LOCK 30480 */ 30481 un->un_f_doorlock_supported = TRUE; 30482 30483 /* 30484 * For a removable media device, it is possible to be opened 30485 * with NDELAY flag when there is no media in drive, in this 30486 * case we don't care if device is writable. But if without 30487 * NDELAY flag, we need to check if media is write-protected. 30488 */ 30489 un->un_f_chk_wp_open = TRUE; 30490 30491 /* 30492 * need to start a SCSI watch thread to monitor media state, 30493 * when media is being inserted or ejected, notify syseventd. 30494 */ 30495 un->un_f_monitor_media_state = TRUE; 30496 30497 /* 30498 * Some devices don't support START_STOP_UNIT command. 30499 * Therefore, we'd better check if a device supports it 30500 * before sending it. 30501 */ 30502 un->un_f_check_start_stop = TRUE; 30503 30504 /* 30505 * support eject media ioctl: 30506 * FDEJECT, DKIOCEJECT, CDROMEJECT 30507 */ 30508 un->un_f_eject_media_supported = TRUE; 30509 30510 /* 30511 * Because many removable-media devices don't support 30512 * LOG_SENSE, we couldn't use this command to check if 30513 * a removable media device support power-management. 30514 * We assume that they support power-management via 30515 * START_STOP_UNIT command and can be spun up and down 30516 * without limitations. 30517 */ 30518 un->un_f_pm_supported = TRUE; 30519 30520 /* 30521 * Need to create a zero length (Boolean) property 30522 * removable-media for the removable media devices. 30523 * Note that the return value of the property is not being 30524 * checked, since if unable to create the property 30525 * then do not want the attach to fail altogether. Consistent 30526 * with other property creation in attach. 30527 */ 30528 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, 30529 DDI_PROP_CANSLEEP, "removable-media", NULL, 0); 30530 30531 } else { 30532 /* 30533 * create device ID for device 30534 */ 30535 un->un_f_devid_supported = TRUE; 30536 30537 /* 30538 * Spin up non-removable-media devices once it is attached 30539 */ 30540 un->un_f_attach_spinup = TRUE; 30541 30542 /* 30543 * According to SCSI specification, Sense data has two kinds of 30544 * format: fixed format, and descriptor format. At present, we 30545 * don't support descriptor format sense data for removable 30546 * media. 30547 */ 30548 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT) { 30549 un->un_f_descr_format_supported = TRUE; 30550 } 30551 30552 /* 30553 * kstats are created only for non-removable media devices. 30554 * 30555 * Set this in sd.conf to 0 in order to disable kstats. The 30556 * default is 1, so they are enabled by default. 30557 */ 30558 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY, 30559 SD_DEVINFO(un), DDI_PROP_DONTPASS, 30560 "enable-partition-kstats", 1)); 30561 30562 /* 30563 * Check if HBA has set the "pm-capable" property. 30564 * If "pm-capable" exists and is non-zero then we can 30565 * power manage the device without checking the start/stop 30566 * cycle count log sense page. 30567 * 30568 * If "pm-capable" exists and is set to be false (0), 30569 * then we should not power manage the device. 30570 * 30571 * If "pm-capable" doesn't exist then pm_cap will 30572 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case, 30573 * sd will check the start/stop cycle count log sense page 30574 * and power manage the device if the cycle count limit has 30575 * not been exceeded. 30576 */ 30577 pm_cap = ddi_prop_get_int(DDI_DEV_T_ANY, devi, 30578 DDI_PROP_DONTPASS, "pm-capable", SD_PM_CAPABLE_UNDEFINED); 30579 if (SD_PM_CAPABLE_IS_UNDEFINED(pm_cap)) { 30580 un->un_f_log_sense_supported = TRUE; 30581 if (!un->un_f_power_condition_disabled && 30582 SD_INQUIRY(un)->inq_ansi == 6) { 30583 un->un_f_power_condition_supported = TRUE; 30584 } 30585 } else { 30586 /* 30587 * pm-capable property exists. 30588 * 30589 * Convert "TRUE" values for pm_cap to 30590 * SD_PM_CAPABLE_IS_TRUE to make it easier to check 30591 * later. "TRUE" values are any values defined in 30592 * inquiry.h. 30593 */ 30594 if (SD_PM_CAPABLE_IS_FALSE(pm_cap)) { 30595 un->un_f_log_sense_supported = FALSE; 30596 } else { 30597 /* SD_PM_CAPABLE_IS_TRUE case */ 30598 un->un_f_pm_supported = TRUE; 30599 if (!un->un_f_power_condition_disabled && 30600 SD_PM_CAPABLE_IS_SPC_4(pm_cap)) { 30601 un->un_f_power_condition_supported = 30602 TRUE; 30603 } 30604 if (SD_PM_CAP_LOG_SUPPORTED(pm_cap)) { 30605 un->un_f_log_sense_supported = TRUE; 30606 un->un_f_pm_log_sense_smart = 30607 SD_PM_CAP_SMART_LOG(pm_cap); 30608 } 30609 } 30610 30611 SD_INFO(SD_LOG_ATTACH_DETACH, un, 30612 "sd_unit_attach: un:0x%p pm-capable " 30613 "property set to %d.\n", un, un->un_f_pm_supported); 30614 } 30615 } 30616 30617 if (un->un_f_is_hotpluggable) { 30618 30619 /* 30620 * Have to watch hotpluggable devices as well, since 30621 * that's the only way for userland applications to 30622 * detect hot removal while device is busy/mounted. 30623 */ 30624 un->un_f_monitor_media_state = TRUE; 30625 30626 un->un_f_check_start_stop = TRUE; 30627 30628 } 30629 } 30630 30631 /* 30632 * sd_tg_rdwr: 30633 * Provides rdwr access for cmlb via sd_tgops. The start_block is 30634 * in sys block size, req_length in bytes. 30635 * 30636 */ 30637 static int 30638 sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 30639 diskaddr_t start_block, size_t reqlength, void *tg_cookie) 30640 { 30641 struct sd_lun *un; 30642 int path_flag = (int)(uintptr_t)tg_cookie; 30643 char *dkl = NULL; 30644 diskaddr_t real_addr = start_block; 30645 diskaddr_t first_byte, end_block; 30646 30647 size_t buffer_size = reqlength; 30648 int rval = 0; 30649 diskaddr_t cap; 30650 uint32_t lbasize; 30651 sd_ssc_t *ssc; 30652 30653 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 30654 if (un == NULL) 30655 return (ENXIO); 30656 30657 if (cmd != TG_READ && cmd != TG_WRITE) 30658 return (EINVAL); 30659 30660 ssc = sd_ssc_init(un); 30661 mutex_enter(SD_MUTEX(un)); 30662 if (un->un_f_tgt_blocksize_is_valid == FALSE) { 30663 mutex_exit(SD_MUTEX(un)); 30664 rval = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap, 30665 &lbasize, path_flag); 30666 if (rval != 0) 30667 goto done1; 30668 mutex_enter(SD_MUTEX(un)); 30669 sd_update_block_info(un, lbasize, cap); 30670 if ((un->un_f_tgt_blocksize_is_valid == FALSE)) { 30671 mutex_exit(SD_MUTEX(un)); 30672 rval = EIO; 30673 goto done; 30674 } 30675 } 30676 30677 if (NOT_DEVBSIZE(un)) { 30678 /* 30679 * sys_blocksize != tgt_blocksize, need to re-adjust 30680 * blkno and save the index to beginning of dk_label 30681 */ 30682 first_byte = SD_SYSBLOCKS2BYTES(start_block); 30683 real_addr = first_byte / un->un_tgt_blocksize; 30684 30685 end_block = (first_byte + reqlength + 30686 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize; 30687 30688 /* round up buffer size to multiple of target block size */ 30689 buffer_size = (end_block - real_addr) * un->un_tgt_blocksize; 30690 30691 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_tg_rdwr", 30692 "label_addr: 0x%x allocation size: 0x%x\n", 30693 real_addr, buffer_size); 30694 30695 if (((first_byte % un->un_tgt_blocksize) != 0) || 30696 (reqlength % un->un_tgt_blocksize) != 0) 30697 /* the request is not aligned */ 30698 dkl = kmem_zalloc(buffer_size, KM_SLEEP); 30699 } 30700 30701 /* 30702 * The MMC standard allows READ CAPACITY to be 30703 * inaccurate by a bounded amount (in the interest of 30704 * response latency). As a result, failed READs are 30705 * commonplace (due to the reading of metadata and not 30706 * data). Depending on the per-Vendor/drive Sense data, 30707 * the failed READ can cause many (unnecessary) retries. 30708 */ 30709 30710 if (ISCD(un) && (cmd == TG_READ) && 30711 (un->un_f_blockcount_is_valid == TRUE) && 30712 ((start_block == (un->un_blockcount - 1))|| 30713 (start_block == (un->un_blockcount - 2)))) { 30714 path_flag = SD_PATH_DIRECT_PRIORITY; 30715 } 30716 30717 mutex_exit(SD_MUTEX(un)); 30718 if (cmd == TG_READ) { 30719 rval = sd_send_scsi_READ(ssc, (dkl != NULL)? dkl: bufaddr, 30720 buffer_size, real_addr, path_flag); 30721 if (dkl != NULL) 30722 bcopy(dkl + SD_TGTBYTEOFFSET(un, start_block, 30723 real_addr), bufaddr, reqlength); 30724 } else { 30725 if (dkl) { 30726 rval = sd_send_scsi_READ(ssc, dkl, buffer_size, 30727 real_addr, path_flag); 30728 if (rval) { 30729 goto done1; 30730 } 30731 bcopy(bufaddr, dkl + SD_TGTBYTEOFFSET(un, start_block, 30732 real_addr), reqlength); 30733 } 30734 rval = sd_send_scsi_WRITE(ssc, (dkl != NULL)? dkl: bufaddr, 30735 buffer_size, real_addr, path_flag); 30736 } 30737 30738 done1: 30739 if (dkl != NULL) 30740 kmem_free(dkl, buffer_size); 30741 30742 if (rval != 0) { 30743 if (rval == EIO) 30744 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 30745 else 30746 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 30747 } 30748 done: 30749 sd_ssc_fini(ssc); 30750 return (rval); 30751 } 30752 30753 30754 static int 30755 sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie) 30756 { 30757 30758 struct sd_lun *un; 30759 diskaddr_t cap; 30760 uint32_t lbasize; 30761 int path_flag = (int)(uintptr_t)tg_cookie; 30762 int ret = 0; 30763 30764 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 30765 if (un == NULL) 30766 return (ENXIO); 30767 30768 switch (cmd) { 30769 case TG_GETPHYGEOM: 30770 case TG_GETVIRTGEOM: 30771 case TG_GETCAPACITY: 30772 case TG_GETBLOCKSIZE: 30773 mutex_enter(SD_MUTEX(un)); 30774 30775 if ((un->un_f_blockcount_is_valid == TRUE) && 30776 (un->un_f_tgt_blocksize_is_valid == TRUE)) { 30777 cap = un->un_blockcount; 30778 lbasize = un->un_tgt_blocksize; 30779 mutex_exit(SD_MUTEX(un)); 30780 } else { 30781 sd_ssc_t *ssc; 30782 mutex_exit(SD_MUTEX(un)); 30783 ssc = sd_ssc_init(un); 30784 ret = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap, 30785 &lbasize, path_flag); 30786 if (ret != 0) { 30787 if (ret == EIO) 30788 sd_ssc_assessment(ssc, 30789 SD_FMT_STATUS_CHECK); 30790 else 30791 sd_ssc_assessment(ssc, 30792 SD_FMT_IGNORE); 30793 sd_ssc_fini(ssc); 30794 return (ret); 30795 } 30796 sd_ssc_fini(ssc); 30797 mutex_enter(SD_MUTEX(un)); 30798 sd_update_block_info(un, lbasize, cap); 30799 if ((un->un_f_blockcount_is_valid == FALSE) || 30800 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 30801 mutex_exit(SD_MUTEX(un)); 30802 return (EIO); 30803 } 30804 mutex_exit(SD_MUTEX(un)); 30805 } 30806 30807 if (cmd == TG_GETCAPACITY) { 30808 *(diskaddr_t *)arg = cap; 30809 return (0); 30810 } 30811 30812 if (cmd == TG_GETBLOCKSIZE) { 30813 *(uint32_t *)arg = lbasize; 30814 return (0); 30815 } 30816 30817 if (cmd == TG_GETPHYGEOM) 30818 ret = sd_get_physical_geometry(un, (cmlb_geom_t *)arg, 30819 cap, lbasize, path_flag); 30820 else 30821 /* TG_GETVIRTGEOM */ 30822 ret = sd_get_virtual_geometry(un, 30823 (cmlb_geom_t *)arg, cap, lbasize); 30824 30825 return (ret); 30826 30827 case TG_GETATTR: 30828 mutex_enter(SD_MUTEX(un)); 30829 ((tg_attribute_t *)arg)->media_is_writable = 30830 un->un_f_mmc_writable_media; 30831 mutex_exit(SD_MUTEX(un)); 30832 return (0); 30833 default: 30834 return (ENOTTY); 30835 30836 } 30837 } 30838 30839 /* 30840 * Function: sd_ssc_ereport_post 30841 * 30842 * Description: Will be called when SD driver need to post an ereport. 30843 * 30844 * Context: Kernel thread or interrupt context. 30845 */ 30846 static void 30847 sd_ssc_ereport_post(sd_ssc_t *ssc, enum sd_driver_assessment drv_assess) 30848 { 30849 int uscsi_path_instance = 0; 30850 uchar_t uscsi_pkt_reason; 30851 uint32_t uscsi_pkt_state; 30852 uint32_t uscsi_pkt_statistics; 30853 uint64_t uscsi_ena; 30854 uchar_t op_code; 30855 uint8_t *sensep; 30856 union scsi_cdb *cdbp; 30857 uint_t cdblen = 0; 30858 uint_t senlen = 0; 30859 struct sd_lun *un; 30860 dev_info_t *dip; 30861 char *devid; 30862 int ssc_invalid_flags = SSC_FLAGS_INVALID_PKT_REASON | 30863 SSC_FLAGS_INVALID_STATUS | 30864 SSC_FLAGS_INVALID_SENSE | 30865 SSC_FLAGS_INVALID_DATA; 30866 char assessment[16]; 30867 30868 ASSERT(ssc != NULL); 30869 ASSERT(ssc->ssc_uscsi_cmd != NULL); 30870 ASSERT(ssc->ssc_uscsi_info != NULL); 30871 30872 un = ssc->ssc_un; 30873 ASSERT(un != NULL); 30874 30875 dip = un->un_sd->sd_dev; 30876 30877 /* 30878 * Get the devid: 30879 * devid will only be passed to non-transport error reports. 30880 */ 30881 devid = DEVI(dip)->devi_devid_str; 30882 30883 /* 30884 * If we are syncing or dumping, the command will not be executed 30885 * so we bypass this situation. 30886 */ 30887 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 30888 (un->un_state == SD_STATE_DUMPING)) 30889 return; 30890 30891 uscsi_pkt_reason = ssc->ssc_uscsi_info->ui_pkt_reason; 30892 uscsi_path_instance = ssc->ssc_uscsi_cmd->uscsi_path_instance; 30893 uscsi_pkt_state = ssc->ssc_uscsi_info->ui_pkt_state; 30894 uscsi_pkt_statistics = ssc->ssc_uscsi_info->ui_pkt_statistics; 30895 uscsi_ena = ssc->ssc_uscsi_info->ui_ena; 30896 30897 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 30898 cdbp = (union scsi_cdb *)ssc->ssc_uscsi_cmd->uscsi_cdb; 30899 30900 /* In rare cases, EG:DOORLOCK, the cdb could be NULL */ 30901 if (cdbp == NULL) { 30902 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 30903 "sd_ssc_ereport_post meet empty cdb\n"); 30904 return; 30905 } 30906 30907 op_code = cdbp->scc_cmd; 30908 30909 cdblen = (int)ssc->ssc_uscsi_cmd->uscsi_cdblen; 30910 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 30911 ssc->ssc_uscsi_cmd->uscsi_rqresid); 30912 30913 if (senlen > 0) 30914 ASSERT(sensep != NULL); 30915 30916 /* 30917 * Initialize drv_assess to corresponding values. 30918 * SD_FM_DRV_FATAL will be mapped to "fail" or "fatal" depending 30919 * on the sense-key returned back. 30920 */ 30921 switch (drv_assess) { 30922 case SD_FM_DRV_RECOVERY: 30923 (void) sprintf(assessment, "%s", "recovered"); 30924 break; 30925 case SD_FM_DRV_RETRY: 30926 (void) sprintf(assessment, "%s", "retry"); 30927 break; 30928 case SD_FM_DRV_NOTICE: 30929 (void) sprintf(assessment, "%s", "info"); 30930 break; 30931 case SD_FM_DRV_FATAL: 30932 default: 30933 (void) sprintf(assessment, "%s", "unknown"); 30934 } 30935 /* 30936 * If drv_assess == SD_FM_DRV_RECOVERY, this should be a recovered 30937 * command, we will post ereport.io.scsi.cmd.disk.recovered. 30938 * driver-assessment will always be "recovered" here. 30939 */ 30940 if (drv_assess == SD_FM_DRV_RECOVERY) { 30941 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 30942 "cmd.disk.recovered", uscsi_ena, devid, DDI_NOSLEEP, 30943 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30944 "driver-assessment", DATA_TYPE_STRING, assessment, 30945 "op-code", DATA_TYPE_UINT8, op_code, 30946 "cdb", DATA_TYPE_UINT8_ARRAY, 30947 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30948 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 30949 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 30950 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics, 30951 NULL); 30952 return; 30953 } 30954 30955 /* 30956 * If there is un-expected/un-decodable data, we should post 30957 * ereport.io.scsi.cmd.disk.dev.uderr. 30958 * driver-assessment will be set based on parameter drv_assess. 30959 * SSC_FLAGS_INVALID_SENSE - invalid sense data sent back. 30960 * SSC_FLAGS_INVALID_PKT_REASON - invalid pkt-reason encountered. 30961 * SSC_FLAGS_INVALID_STATUS - invalid stat-code encountered. 30962 * SSC_FLAGS_INVALID_DATA - invalid data sent back. 30963 */ 30964 if (ssc->ssc_flags & ssc_invalid_flags) { 30965 if (ssc->ssc_flags & SSC_FLAGS_INVALID_SENSE) { 30966 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 30967 "cmd.disk.dev.uderr", uscsi_ena, devid, DDI_NOSLEEP, 30968 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30969 "driver-assessment", DATA_TYPE_STRING, 30970 drv_assess == SD_FM_DRV_FATAL ? 30971 "fail" : assessment, 30972 "op-code", DATA_TYPE_UINT8, op_code, 30973 "cdb", DATA_TYPE_UINT8_ARRAY, 30974 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30975 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 30976 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 30977 "pkt-stats", DATA_TYPE_UINT32, 30978 uscsi_pkt_statistics, 30979 "stat-code", DATA_TYPE_UINT8, 30980 ssc->ssc_uscsi_cmd->uscsi_status, 30981 "un-decode-info", DATA_TYPE_STRING, 30982 ssc->ssc_info, 30983 "un-decode-value", DATA_TYPE_UINT8_ARRAY, 30984 senlen, sensep, 30985 NULL); 30986 } else { 30987 /* 30988 * For other type of invalid data, the 30989 * un-decode-value field would be empty because the 30990 * un-decodable content could be seen from upper 30991 * level payload or inside un-decode-info. 30992 */ 30993 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 30994 "cmd.disk.dev.uderr", uscsi_ena, devid, DDI_NOSLEEP, 30995 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30996 "driver-assessment", DATA_TYPE_STRING, 30997 drv_assess == SD_FM_DRV_FATAL ? 30998 "fail" : assessment, 30999 "op-code", DATA_TYPE_UINT8, op_code, 31000 "cdb", DATA_TYPE_UINT8_ARRAY, 31001 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 31002 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 31003 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 31004 "pkt-stats", DATA_TYPE_UINT32, 31005 uscsi_pkt_statistics, 31006 "stat-code", DATA_TYPE_UINT8, 31007 ssc->ssc_uscsi_cmd->uscsi_status, 31008 "un-decode-info", DATA_TYPE_STRING, 31009 ssc->ssc_info, 31010 "un-decode-value", DATA_TYPE_UINT8_ARRAY, 31011 0, NULL, 31012 NULL); 31013 } 31014 ssc->ssc_flags &= ~ssc_invalid_flags; 31015 return; 31016 } 31017 31018 if (uscsi_pkt_reason != CMD_CMPLT || 31019 (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT)) { 31020 /* 31021 * pkt-reason != CMD_CMPLT or SSC_FLAGS_TRAN_ABORT was 31022 * set inside sd_start_cmds due to errors(bad packet or 31023 * fatal transport error), we should take it as a 31024 * transport error, so we post ereport.io.scsi.cmd.disk.tran. 31025 * driver-assessment will be set based on drv_assess. 31026 * We will set devid to NULL because it is a transport 31027 * error. 31028 */ 31029 if (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT) 31030 ssc->ssc_flags &= ~SSC_FLAGS_TRAN_ABORT; 31031 31032 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 31033 "cmd.disk.tran", uscsi_ena, NULL, DDI_NOSLEEP, FM_VERSION, 31034 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 31035 "driver-assessment", DATA_TYPE_STRING, 31036 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment, 31037 "op-code", DATA_TYPE_UINT8, op_code, 31038 "cdb", DATA_TYPE_UINT8_ARRAY, 31039 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 31040 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 31041 "pkt-state", DATA_TYPE_UINT8, uscsi_pkt_state, 31042 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics, 31043 NULL); 31044 } else { 31045 /* 31046 * If we got here, we have a completed command, and we need 31047 * to further investigate the sense data to see what kind 31048 * of ereport we should post. 31049 * Post ereport.io.scsi.cmd.disk.dev.rqs.merr 31050 * if sense-key == 0x3. 31051 * Post ereport.io.scsi.cmd.disk.dev.rqs.derr otherwise. 31052 * driver-assessment will be set based on the parameter 31053 * drv_assess. 31054 */ 31055 if (senlen > 0) { 31056 /* 31057 * Here we have sense data available. 31058 */ 31059 uint8_t sense_key; 31060 sense_key = scsi_sense_key(sensep); 31061 if (sense_key == 0x3) { 31062 /* 31063 * sense-key == 0x3(medium error), 31064 * driver-assessment should be "fatal" if 31065 * drv_assess is SD_FM_DRV_FATAL. 31066 */ 31067 scsi_fm_ereport_post(un->un_sd, 31068 uscsi_path_instance, 31069 "cmd.disk.dev.rqs.merr", 31070 uscsi_ena, devid, DDI_NOSLEEP, FM_VERSION, 31071 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 31072 "driver-assessment", 31073 DATA_TYPE_STRING, 31074 drv_assess == SD_FM_DRV_FATAL ? 31075 "fatal" : assessment, 31076 "op-code", 31077 DATA_TYPE_UINT8, op_code, 31078 "cdb", 31079 DATA_TYPE_UINT8_ARRAY, cdblen, 31080 ssc->ssc_uscsi_cmd->uscsi_cdb, 31081 "pkt-reason", 31082 DATA_TYPE_UINT8, uscsi_pkt_reason, 31083 "pkt-state", 31084 DATA_TYPE_UINT8, uscsi_pkt_state, 31085 "pkt-stats", 31086 DATA_TYPE_UINT32, 31087 uscsi_pkt_statistics, 31088 "stat-code", 31089 DATA_TYPE_UINT8, 31090 ssc->ssc_uscsi_cmd->uscsi_status, 31091 "key", 31092 DATA_TYPE_UINT8, 31093 scsi_sense_key(sensep), 31094 "asc", 31095 DATA_TYPE_UINT8, 31096 scsi_sense_asc(sensep), 31097 "ascq", 31098 DATA_TYPE_UINT8, 31099 scsi_sense_ascq(sensep), 31100 "sense-data", 31101 DATA_TYPE_UINT8_ARRAY, 31102 senlen, sensep, 31103 "lba", 31104 DATA_TYPE_UINT64, 31105 ssc->ssc_uscsi_info->ui_lba, 31106 NULL); 31107 } else { 31108 /* 31109 * if sense-key == 0x4(hardware 31110 * error), driver-assessment should 31111 * be "fatal" if drv_assess is 31112 * SD_FM_DRV_FATAL. 31113 */ 31114 scsi_fm_ereport_post(un->un_sd, 31115 uscsi_path_instance, 31116 "cmd.disk.dev.rqs.derr", 31117 uscsi_ena, devid, DDI_NOSLEEP, 31118 FM_VERSION, 31119 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 31120 "driver-assessment", 31121 DATA_TYPE_STRING, 31122 drv_assess == SD_FM_DRV_FATAL ? 31123 (sense_key == 0x4 ? 31124 "fatal" : "fail") : assessment, 31125 "op-code", 31126 DATA_TYPE_UINT8, op_code, 31127 "cdb", 31128 DATA_TYPE_UINT8_ARRAY, cdblen, 31129 ssc->ssc_uscsi_cmd->uscsi_cdb, 31130 "pkt-reason", 31131 DATA_TYPE_UINT8, uscsi_pkt_reason, 31132 "pkt-state", 31133 DATA_TYPE_UINT8, uscsi_pkt_state, 31134 "pkt-stats", 31135 DATA_TYPE_UINT32, 31136 uscsi_pkt_statistics, 31137 "stat-code", 31138 DATA_TYPE_UINT8, 31139 ssc->ssc_uscsi_cmd->uscsi_status, 31140 "key", 31141 DATA_TYPE_UINT8, 31142 scsi_sense_key(sensep), 31143 "asc", 31144 DATA_TYPE_UINT8, 31145 scsi_sense_asc(sensep), 31146 "ascq", 31147 DATA_TYPE_UINT8, 31148 scsi_sense_ascq(sensep), 31149 "sense-data", 31150 DATA_TYPE_UINT8_ARRAY, 31151 senlen, sensep, 31152 NULL); 31153 } 31154 } else { 31155 /* 31156 * For stat_code == STATUS_GOOD, this is not a 31157 * hardware error. 31158 */ 31159 if (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD) 31160 return; 31161 31162 /* 31163 * Post ereport.io.scsi.cmd.disk.dev.serr if we got the 31164 * stat-code but with sense data unavailable. 31165 * driver-assessment will be set based on parameter 31166 * drv_assess. 31167 */ 31168 scsi_fm_ereport_post(un->un_sd, 31169 uscsi_path_instance, "cmd.disk.dev.serr", uscsi_ena, 31170 devid, DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 31171 FM_EREPORT_VERS0, 31172 "driver-assessment", DATA_TYPE_STRING, 31173 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment, 31174 "op-code", DATA_TYPE_UINT8, op_code, 31175 "cdb", 31176 DATA_TYPE_UINT8_ARRAY, 31177 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 31178 "pkt-reason", 31179 DATA_TYPE_UINT8, uscsi_pkt_reason, 31180 "pkt-state", 31181 DATA_TYPE_UINT8, uscsi_pkt_state, 31182 "pkt-stats", 31183 DATA_TYPE_UINT32, uscsi_pkt_statistics, 31184 "stat-code", 31185 DATA_TYPE_UINT8, 31186 ssc->ssc_uscsi_cmd->uscsi_status, 31187 NULL); 31188 } 31189 } 31190 } 31191 31192 /* 31193 * Function: sd_ssc_extract_info 31194 * 31195 * Description: Extract information available to help generate ereport. 31196 * 31197 * Context: Kernel thread or interrupt context. 31198 */ 31199 static void 31200 sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un, struct scsi_pkt *pktp, 31201 struct buf *bp, struct sd_xbuf *xp) 31202 { 31203 size_t senlen = 0; 31204 union scsi_cdb *cdbp; 31205 int path_instance; 31206 /* 31207 * Need scsi_cdb_size array to determine the cdb length. 31208 */ 31209 extern uchar_t scsi_cdb_size[]; 31210 31211 ASSERT(un != NULL); 31212 ASSERT(pktp != NULL); 31213 ASSERT(bp != NULL); 31214 ASSERT(xp != NULL); 31215 ASSERT(ssc != NULL); 31216 ASSERT(mutex_owned(SD_MUTEX(un))); 31217 31218 /* 31219 * Transfer the cdb buffer pointer here. 31220 */ 31221 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 31222 31223 ssc->ssc_uscsi_cmd->uscsi_cdblen = scsi_cdb_size[GETGROUP(cdbp)]; 31224 ssc->ssc_uscsi_cmd->uscsi_cdb = (caddr_t)cdbp; 31225 31226 /* 31227 * Transfer the sense data buffer pointer if sense data is available, 31228 * calculate the sense data length first. 31229 */ 31230 if ((xp->xb_sense_state & STATE_XARQ_DONE) || 31231 (xp->xb_sense_state & STATE_ARQ_DONE)) { 31232 /* 31233 * For arq case, we will enter here. 31234 */ 31235 if (xp->xb_sense_state & STATE_XARQ_DONE) { 31236 senlen = MAX_SENSE_LENGTH - xp->xb_sense_resid; 31237 } else { 31238 senlen = SENSE_LENGTH; 31239 } 31240 } else { 31241 /* 31242 * For non-arq case, we will enter this branch. 31243 */ 31244 if (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK && 31245 (xp->xb_sense_state & STATE_XFERRED_DATA)) { 31246 senlen = SENSE_LENGTH - xp->xb_sense_resid; 31247 } 31248 31249 } 31250 31251 ssc->ssc_uscsi_cmd->uscsi_rqlen = (senlen & 0xff); 31252 ssc->ssc_uscsi_cmd->uscsi_rqresid = 0; 31253 ssc->ssc_uscsi_cmd->uscsi_rqbuf = (caddr_t)xp->xb_sense_data; 31254 31255 ssc->ssc_uscsi_cmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 31256 31257 /* 31258 * Only transfer path_instance when scsi_pkt was properly allocated. 31259 */ 31260 path_instance = pktp->pkt_path_instance; 31261 if (scsi_pkt_allocated_correctly(pktp) && path_instance) 31262 ssc->ssc_uscsi_cmd->uscsi_path_instance = path_instance; 31263 else 31264 ssc->ssc_uscsi_cmd->uscsi_path_instance = 0; 31265 31266 /* 31267 * Copy in the other fields we may need when posting ereport. 31268 */ 31269 ssc->ssc_uscsi_info->ui_pkt_reason = pktp->pkt_reason; 31270 ssc->ssc_uscsi_info->ui_pkt_state = pktp->pkt_state; 31271 ssc->ssc_uscsi_info->ui_pkt_statistics = pktp->pkt_statistics; 31272 ssc->ssc_uscsi_info->ui_lba = (uint64_t)SD_GET_BLKNO(bp); 31273 31274 /* 31275 * For partially read/write command, we will not create ena 31276 * in case of a successful command be reconized as recovered. 31277 */ 31278 if ((pktp->pkt_reason == CMD_CMPLT) && 31279 (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD) && 31280 (senlen == 0)) { 31281 return; 31282 } 31283 31284 /* 31285 * To associate ereports of a single command execution flow, we 31286 * need a shared ena for a specific command. 31287 */ 31288 if (xp->xb_ena == 0) 31289 xp->xb_ena = fm_ena_generate(0, FM_ENA_FMT1); 31290 ssc->ssc_uscsi_info->ui_ena = xp->xb_ena; 31291 } 31292