1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * SCSI disk target driver. 29 */ 30 #include <sys/scsi/scsi.h> 31 #include <sys/dkbad.h> 32 #include <sys/dklabel.h> 33 #include <sys/dkio.h> 34 #include <sys/fdio.h> 35 #include <sys/cdio.h> 36 #include <sys/mhd.h> 37 #include <sys/vtoc.h> 38 #include <sys/dktp/fdisk.h> 39 #include <sys/kstat.h> 40 #include <sys/vtrace.h> 41 #include <sys/note.h> 42 #include <sys/thread.h> 43 #include <sys/proc.h> 44 #include <sys/efi_partition.h> 45 #include <sys/var.h> 46 #include <sys/aio_req.h> 47 48 #ifdef __lock_lint 49 #define _LP64 50 #define __amd64 51 #endif 52 53 #if (defined(__fibre)) 54 /* Note: is there a leadville version of the following? */ 55 #include <sys/fc4/fcal_linkapp.h> 56 #endif 57 #include <sys/taskq.h> 58 #include <sys/uuid.h> 59 #include <sys/byteorder.h> 60 #include <sys/sdt.h> 61 62 #include "sd_xbuf.h" 63 64 #include <sys/scsi/targets/sddef.h> 65 #include <sys/cmlb.h> 66 #include <sys/sysevent/eventdefs.h> 67 #include <sys/sysevent/dev.h> 68 69 #include <sys/fm/protocol.h> 70 71 /* 72 * Loadable module info. 73 */ 74 #if (defined(__fibre)) 75 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver" 76 char _depends_on[] = "misc/scsi misc/cmlb drv/fcp"; 77 #else /* !__fibre */ 78 #define SD_MODULE_NAME "SCSI Disk Driver" 79 char _depends_on[] = "misc/scsi misc/cmlb"; 80 #endif /* !__fibre */ 81 82 /* 83 * Define the interconnect type, to allow the driver to distinguish 84 * between parallel SCSI (sd) and fibre channel (ssd) behaviors. 85 * 86 * This is really for backward compatibility. In the future, the driver 87 * should actually check the "interconnect-type" property as reported by 88 * the HBA; however at present this property is not defined by all HBAs, 89 * so we will use this #define (1) to permit the driver to run in 90 * backward-compatibility mode; and (2) to print a notification message 91 * if an FC HBA does not support the "interconnect-type" property. The 92 * behavior of the driver will be to assume parallel SCSI behaviors unless 93 * the "interconnect-type" property is defined by the HBA **AND** has a 94 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or 95 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre 96 * Channel behaviors (as per the old ssd). (Note that the 97 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and 98 * will result in the driver assuming parallel SCSI behaviors.) 99 * 100 * (see common/sys/scsi/impl/services.h) 101 * 102 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default 103 * since some FC HBAs may already support that, and there is some code in 104 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the 105 * default would confuse that code, and besides things should work fine 106 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the 107 * "interconnect_type" property. 108 * 109 */ 110 #if (defined(__fibre)) 111 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE 112 #else 113 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL 114 #endif 115 116 /* 117 * The name of the driver, established from the module name in _init. 118 */ 119 static char *sd_label = NULL; 120 121 /* 122 * Driver name is unfortunately prefixed on some driver.conf properties. 123 */ 124 #if (defined(__fibre)) 125 #define sd_max_xfer_size ssd_max_xfer_size 126 #define sd_config_list ssd_config_list 127 static char *sd_max_xfer_size = "ssd_max_xfer_size"; 128 static char *sd_config_list = "ssd-config-list"; 129 #else 130 static char *sd_max_xfer_size = "sd_max_xfer_size"; 131 static char *sd_config_list = "sd-config-list"; 132 #endif 133 134 /* 135 * Driver global variables 136 */ 137 138 #if (defined(__fibre)) 139 /* 140 * These #defines are to avoid namespace collisions that occur because this 141 * code is currently used to compile two separate driver modules: sd and ssd. 142 * All global variables need to be treated this way (even if declared static) 143 * in order to allow the debugger to resolve the names properly. 144 * It is anticipated that in the near future the ssd module will be obsoleted, 145 * at which time this namespace issue should go away. 146 */ 147 #define sd_state ssd_state 148 #define sd_io_time ssd_io_time 149 #define sd_failfast_enable ssd_failfast_enable 150 #define sd_ua_retry_count ssd_ua_retry_count 151 #define sd_report_pfa ssd_report_pfa 152 #define sd_max_throttle ssd_max_throttle 153 #define sd_min_throttle ssd_min_throttle 154 #define sd_rot_delay ssd_rot_delay 155 156 #define sd_retry_on_reservation_conflict \ 157 ssd_retry_on_reservation_conflict 158 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay 159 #define sd_resv_conflict_name ssd_resv_conflict_name 160 161 #define sd_component_mask ssd_component_mask 162 #define sd_level_mask ssd_level_mask 163 #define sd_debug_un ssd_debug_un 164 #define sd_error_level ssd_error_level 165 166 #define sd_xbuf_active_limit ssd_xbuf_active_limit 167 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit 168 169 #define sd_tr ssd_tr 170 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout 171 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout 172 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable 173 #define sd_check_media_time ssd_check_media_time 174 #define sd_wait_cmds_complete ssd_wait_cmds_complete 175 #define sd_label_mutex ssd_label_mutex 176 #define sd_detach_mutex ssd_detach_mutex 177 #define sd_log_buf ssd_log_buf 178 #define sd_log_mutex ssd_log_mutex 179 180 #define sd_disk_table ssd_disk_table 181 #define sd_disk_table_size ssd_disk_table_size 182 #define sd_sense_mutex ssd_sense_mutex 183 #define sd_cdbtab ssd_cdbtab 184 185 #define sd_cb_ops ssd_cb_ops 186 #define sd_ops ssd_ops 187 #define sd_additional_codes ssd_additional_codes 188 #define sd_tgops ssd_tgops 189 190 #define sd_minor_data ssd_minor_data 191 #define sd_minor_data_efi ssd_minor_data_efi 192 193 #define sd_tq ssd_tq 194 #define sd_wmr_tq ssd_wmr_tq 195 #define sd_taskq_name ssd_taskq_name 196 #define sd_wmr_taskq_name ssd_wmr_taskq_name 197 #define sd_taskq_minalloc ssd_taskq_minalloc 198 #define sd_taskq_maxalloc ssd_taskq_maxalloc 199 200 #define sd_dump_format_string ssd_dump_format_string 201 202 #define sd_iostart_chain ssd_iostart_chain 203 #define sd_iodone_chain ssd_iodone_chain 204 205 #define sd_pm_idletime ssd_pm_idletime 206 207 #define sd_force_pm_supported ssd_force_pm_supported 208 209 #define sd_dtype_optical_bind ssd_dtype_optical_bind 210 211 #define sd_ssc_init ssd_ssc_init 212 #define sd_ssc_send ssd_ssc_send 213 #define sd_ssc_fini ssd_ssc_fini 214 #define sd_ssc_assessment ssd_ssc_assessment 215 #define sd_ssc_post ssd_ssc_post 216 #define sd_ssc_print ssd_ssc_print 217 #define sd_ssc_ereport_post ssd_ssc_ereport_post 218 #define sd_ssc_set_info ssd_ssc_set_info 219 #define sd_ssc_extract_info ssd_ssc_extract_info 220 221 #endif 222 223 #ifdef SDDEBUG 224 int sd_force_pm_supported = 0; 225 #endif /* SDDEBUG */ 226 227 void *sd_state = NULL; 228 int sd_io_time = SD_IO_TIME; 229 int sd_failfast_enable = 1; 230 int sd_ua_retry_count = SD_UA_RETRY_COUNT; 231 int sd_report_pfa = 1; 232 int sd_max_throttle = SD_MAX_THROTTLE; 233 int sd_min_throttle = SD_MIN_THROTTLE; 234 int sd_rot_delay = 4; /* Default 4ms Rotation delay */ 235 int sd_qfull_throttle_enable = TRUE; 236 237 int sd_retry_on_reservation_conflict = 1; 238 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 239 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay)) 240 241 static int sd_dtype_optical_bind = -1; 242 243 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */ 244 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict"; 245 246 /* 247 * Global data for debug logging. To enable debug printing, sd_component_mask 248 * and sd_level_mask should be set to the desired bit patterns as outlined in 249 * sddef.h. 250 */ 251 uint_t sd_component_mask = 0x0; 252 uint_t sd_level_mask = 0x0; 253 struct sd_lun *sd_debug_un = NULL; 254 uint_t sd_error_level = SCSI_ERR_RETRYABLE; 255 256 /* Note: these may go away in the future... */ 257 static uint32_t sd_xbuf_active_limit = 512; 258 static uint32_t sd_xbuf_reserve_limit = 16; 259 260 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 }; 261 262 /* 263 * Timer value used to reset the throttle after it has been reduced 264 * (typically in response to TRAN_BUSY or STATUS_QFULL) 265 */ 266 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT; 267 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT; 268 269 /* 270 * Interval value associated with the media change scsi watch. 271 */ 272 static int sd_check_media_time = 3000000; 273 274 /* 275 * Wait value used for in progress operations during a DDI_SUSPEND 276 */ 277 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE; 278 279 /* 280 * sd_label_mutex protects a static buffer used in the disk label 281 * component of the driver 282 */ 283 static kmutex_t sd_label_mutex; 284 285 /* 286 * sd_detach_mutex protects un_layer_count, un_detach_count, and 287 * un_opens_in_progress in the sd_lun structure. 288 */ 289 static kmutex_t sd_detach_mutex; 290 291 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex, 292 sd_lun::{un_layer_count un_detach_count un_opens_in_progress})) 293 294 /* 295 * Global buffer and mutex for debug logging 296 */ 297 static char sd_log_buf[1024]; 298 static kmutex_t sd_log_mutex; 299 300 /* 301 * Structs and globals for recording attached lun information. 302 * This maintains a chain. Each node in the chain represents a SCSI controller. 303 * The structure records the number of luns attached to each target connected 304 * with the controller. 305 * For parallel scsi device only. 306 */ 307 struct sd_scsi_hba_tgt_lun { 308 struct sd_scsi_hba_tgt_lun *next; 309 dev_info_t *pdip; 310 int nlun[NTARGETS_WIDE]; 311 }; 312 313 /* 314 * Flag to indicate the lun is attached or detached 315 */ 316 #define SD_SCSI_LUN_ATTACH 0 317 #define SD_SCSI_LUN_DETACH 1 318 319 static kmutex_t sd_scsi_target_lun_mutex; 320 static struct sd_scsi_hba_tgt_lun *sd_scsi_target_lun_head = NULL; 321 322 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 323 sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip)) 324 325 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 326 sd_scsi_target_lun_head)) 327 328 /* 329 * "Smart" Probe Caching structs, globals, #defines, etc. 330 * For parallel scsi and non-self-identify device only. 331 */ 332 333 /* 334 * The following resources and routines are implemented to support 335 * "smart" probing, which caches the scsi_probe() results in an array, 336 * in order to help avoid long probe times. 337 */ 338 struct sd_scsi_probe_cache { 339 struct sd_scsi_probe_cache *next; 340 dev_info_t *pdip; 341 int cache[NTARGETS_WIDE]; 342 }; 343 344 static kmutex_t sd_scsi_probe_cache_mutex; 345 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; 346 347 /* 348 * Really we only need protection on the head of the linked list, but 349 * better safe than sorry. 350 */ 351 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 352 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip)) 353 354 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 355 sd_scsi_probe_cache_head)) 356 357 /* 358 * Power attribute table 359 */ 360 static sd_power_attr_ss sd_pwr_ss = { 361 { "NAME=spindle-motor", "0=off", "1=on", NULL }, 362 {0, 100}, 363 {30, 0}, 364 {20000, 0} 365 }; 366 367 static sd_power_attr_pc sd_pwr_pc = { 368 { "NAME=spindle-motor", "0=stopped", "1=standby", "2=idle", 369 "3=active", NULL }, 370 {0, 0, 0, 100}, 371 {90, 90, 20, 0}, 372 {15000, 15000, 1000, 0} 373 }; 374 375 /* 376 * Power level to power condition 377 */ 378 static int sd_pl2pc[] = { 379 SD_TARGET_START_VALID, 380 SD_TARGET_STANDBY, 381 SD_TARGET_IDLE, 382 SD_TARGET_ACTIVE 383 }; 384 385 /* 386 * Vendor specific data name property declarations 387 */ 388 389 #if defined(__fibre) || defined(__i386) ||defined(__amd64) 390 391 static sd_tunables seagate_properties = { 392 SEAGATE_THROTTLE_VALUE, 393 0, 394 0, 395 0, 396 0, 397 0, 398 0, 399 0, 400 0 401 }; 402 403 404 static sd_tunables fujitsu_properties = { 405 FUJITSU_THROTTLE_VALUE, 406 0, 407 0, 408 0, 409 0, 410 0, 411 0, 412 0, 413 0 414 }; 415 416 static sd_tunables ibm_properties = { 417 IBM_THROTTLE_VALUE, 418 0, 419 0, 420 0, 421 0, 422 0, 423 0, 424 0, 425 0 426 }; 427 428 static sd_tunables purple_properties = { 429 PURPLE_THROTTLE_VALUE, 430 0, 431 0, 432 PURPLE_BUSY_RETRIES, 433 PURPLE_RESET_RETRY_COUNT, 434 PURPLE_RESERVE_RELEASE_TIME, 435 0, 436 0, 437 0 438 }; 439 440 static sd_tunables sve_properties = { 441 SVE_THROTTLE_VALUE, 442 0, 443 0, 444 SVE_BUSY_RETRIES, 445 SVE_RESET_RETRY_COUNT, 446 SVE_RESERVE_RELEASE_TIME, 447 SVE_MIN_THROTTLE_VALUE, 448 SVE_DISKSORT_DISABLED_FLAG, 449 0 450 }; 451 452 static sd_tunables maserati_properties = { 453 0, 454 0, 455 0, 456 0, 457 0, 458 0, 459 0, 460 MASERATI_DISKSORT_DISABLED_FLAG, 461 MASERATI_LUN_RESET_ENABLED_FLAG 462 }; 463 464 static sd_tunables pirus_properties = { 465 PIRUS_THROTTLE_VALUE, 466 0, 467 PIRUS_NRR_COUNT, 468 PIRUS_BUSY_RETRIES, 469 PIRUS_RESET_RETRY_COUNT, 470 0, 471 PIRUS_MIN_THROTTLE_VALUE, 472 PIRUS_DISKSORT_DISABLED_FLAG, 473 PIRUS_LUN_RESET_ENABLED_FLAG 474 }; 475 476 #endif 477 478 #if (defined(__sparc) && !defined(__fibre)) || \ 479 (defined(__i386) || defined(__amd64)) 480 481 482 static sd_tunables elite_properties = { 483 ELITE_THROTTLE_VALUE, 484 0, 485 0, 486 0, 487 0, 488 0, 489 0, 490 0, 491 0 492 }; 493 494 static sd_tunables st31200n_properties = { 495 ST31200N_THROTTLE_VALUE, 496 0, 497 0, 498 0, 499 0, 500 0, 501 0, 502 0, 503 0 504 }; 505 506 #endif /* Fibre or not */ 507 508 static sd_tunables lsi_properties_scsi = { 509 LSI_THROTTLE_VALUE, 510 0, 511 LSI_NOTREADY_RETRIES, 512 0, 513 0, 514 0, 515 0, 516 0, 517 0 518 }; 519 520 static sd_tunables symbios_properties = { 521 SYMBIOS_THROTTLE_VALUE, 522 0, 523 SYMBIOS_NOTREADY_RETRIES, 524 0, 525 0, 526 0, 527 0, 528 0, 529 0 530 }; 531 532 static sd_tunables lsi_properties = { 533 0, 534 0, 535 LSI_NOTREADY_RETRIES, 536 0, 537 0, 538 0, 539 0, 540 0, 541 0 542 }; 543 544 static sd_tunables lsi_oem_properties = { 545 0, 546 0, 547 LSI_OEM_NOTREADY_RETRIES, 548 0, 549 0, 550 0, 551 0, 552 0, 553 0, 554 1 555 }; 556 557 558 559 #if (defined(SD_PROP_TST)) 560 561 #define SD_TST_CTYPE_VAL CTYPE_CDROM 562 #define SD_TST_THROTTLE_VAL 16 563 #define SD_TST_NOTREADY_VAL 12 564 #define SD_TST_BUSY_VAL 60 565 #define SD_TST_RST_RETRY_VAL 36 566 #define SD_TST_RSV_REL_TIME 60 567 568 static sd_tunables tst_properties = { 569 SD_TST_THROTTLE_VAL, 570 SD_TST_CTYPE_VAL, 571 SD_TST_NOTREADY_VAL, 572 SD_TST_BUSY_VAL, 573 SD_TST_RST_RETRY_VAL, 574 SD_TST_RSV_REL_TIME, 575 0, 576 0, 577 0 578 }; 579 #endif 580 581 /* This is similar to the ANSI toupper implementation */ 582 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C)) 583 584 /* 585 * Static Driver Configuration Table 586 * 587 * This is the table of disks which need throttle adjustment (or, perhaps 588 * something else as defined by the flags at a future time.) device_id 589 * is a string consisting of concatenated vid (vendor), pid (product/model) 590 * and revision strings as defined in the scsi_inquiry structure. Offsets of 591 * the parts of the string are as defined by the sizes in the scsi_inquiry 592 * structure. Device type is searched as far as the device_id string is 593 * defined. Flags defines which values are to be set in the driver from the 594 * properties list. 595 * 596 * Entries below which begin and end with a "*" are a special case. 597 * These do not have a specific vendor, and the string which follows 598 * can appear anywhere in the 16 byte PID portion of the inquiry data. 599 * 600 * Entries below which begin and end with a " " (blank) are a special 601 * case. The comparison function will treat multiple consecutive blanks 602 * as equivalent to a single blank. For example, this causes a 603 * sd_disk_table entry of " NEC CDROM " to match a device's id string 604 * of "NEC CDROM". 605 * 606 * Note: The MD21 controller type has been obsoleted. 607 * ST318202F is a Legacy device 608 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been 609 * made with an FC connection. The entries here are a legacy. 610 */ 611 static sd_disk_config_t sd_disk_table[] = { 612 #if defined(__fibre) || defined(__i386) || defined(__amd64) 613 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 614 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 615 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 616 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 617 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 618 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 619 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 620 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 621 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 622 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 623 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 624 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 625 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 626 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 627 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 628 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 629 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 630 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 631 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 632 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 633 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 634 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 635 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 636 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties }, 637 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties }, 638 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties }, 639 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties }, 640 { "IBM 1724-100", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 641 { "IBM 1726-2xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 642 { "IBM 1726-22x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 643 { "IBM 1726-4xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 644 { "IBM 1726-42x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 645 { "IBM 1726-3xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 646 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 647 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 648 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 649 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 650 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 651 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 652 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 653 { "IBM 1814", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 654 { "IBM 1814-200", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 655 { "IBM 1818", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 656 { "DELL MD3000", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 657 { "DELL MD3000i", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 658 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 659 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 660 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 661 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 662 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT | 663 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 664 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT | 665 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 666 { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties }, 667 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, 668 { "SUN T3", SD_CONF_BSET_THROTTLE | 669 SD_CONF_BSET_BSY_RETRY_COUNT| 670 SD_CONF_BSET_RST_RETRIES| 671 SD_CONF_BSET_RSV_REL_TIME, 672 &purple_properties }, 673 { "SUN SESS01", SD_CONF_BSET_THROTTLE | 674 SD_CONF_BSET_BSY_RETRY_COUNT| 675 SD_CONF_BSET_RST_RETRIES| 676 SD_CONF_BSET_RSV_REL_TIME| 677 SD_CONF_BSET_MIN_THROTTLE| 678 SD_CONF_BSET_DISKSORT_DISABLED, 679 &sve_properties }, 680 { "SUN T4", SD_CONF_BSET_THROTTLE | 681 SD_CONF_BSET_BSY_RETRY_COUNT| 682 SD_CONF_BSET_RST_RETRIES| 683 SD_CONF_BSET_RSV_REL_TIME, 684 &purple_properties }, 685 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED | 686 SD_CONF_BSET_LUN_RESET_ENABLED, 687 &maserati_properties }, 688 { "SUN SE6920", SD_CONF_BSET_THROTTLE | 689 SD_CONF_BSET_NRR_COUNT| 690 SD_CONF_BSET_BSY_RETRY_COUNT| 691 SD_CONF_BSET_RST_RETRIES| 692 SD_CONF_BSET_MIN_THROTTLE| 693 SD_CONF_BSET_DISKSORT_DISABLED| 694 SD_CONF_BSET_LUN_RESET_ENABLED, 695 &pirus_properties }, 696 { "SUN SE6940", SD_CONF_BSET_THROTTLE | 697 SD_CONF_BSET_NRR_COUNT| 698 SD_CONF_BSET_BSY_RETRY_COUNT| 699 SD_CONF_BSET_RST_RETRIES| 700 SD_CONF_BSET_MIN_THROTTLE| 701 SD_CONF_BSET_DISKSORT_DISABLED| 702 SD_CONF_BSET_LUN_RESET_ENABLED, 703 &pirus_properties }, 704 { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE | 705 SD_CONF_BSET_NRR_COUNT| 706 SD_CONF_BSET_BSY_RETRY_COUNT| 707 SD_CONF_BSET_RST_RETRIES| 708 SD_CONF_BSET_MIN_THROTTLE| 709 SD_CONF_BSET_DISKSORT_DISABLED| 710 SD_CONF_BSET_LUN_RESET_ENABLED, 711 &pirus_properties }, 712 { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE | 713 SD_CONF_BSET_NRR_COUNT| 714 SD_CONF_BSET_BSY_RETRY_COUNT| 715 SD_CONF_BSET_RST_RETRIES| 716 SD_CONF_BSET_MIN_THROTTLE| 717 SD_CONF_BSET_DISKSORT_DISABLED| 718 SD_CONF_BSET_LUN_RESET_ENABLED, 719 &pirus_properties }, 720 { "SUN PSX1000", SD_CONF_BSET_THROTTLE | 721 SD_CONF_BSET_NRR_COUNT| 722 SD_CONF_BSET_BSY_RETRY_COUNT| 723 SD_CONF_BSET_RST_RETRIES| 724 SD_CONF_BSET_MIN_THROTTLE| 725 SD_CONF_BSET_DISKSORT_DISABLED| 726 SD_CONF_BSET_LUN_RESET_ENABLED, 727 &pirus_properties }, 728 { "SUN SE6330", SD_CONF_BSET_THROTTLE | 729 SD_CONF_BSET_NRR_COUNT| 730 SD_CONF_BSET_BSY_RETRY_COUNT| 731 SD_CONF_BSET_RST_RETRIES| 732 SD_CONF_BSET_MIN_THROTTLE| 733 SD_CONF_BSET_DISKSORT_DISABLED| 734 SD_CONF_BSET_LUN_RESET_ENABLED, 735 &pirus_properties }, 736 { "SUN STK6580_6780", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 737 { "SUN SUN_6180", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 738 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 739 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 740 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 741 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 742 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties }, 743 #endif /* fibre or NON-sparc platforms */ 744 #if ((defined(__sparc) && !defined(__fibre)) ||\ 745 (defined(__i386) || defined(__amd64))) 746 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties }, 747 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties }, 748 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL }, 749 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL }, 750 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL }, 751 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL }, 752 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, 753 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, 754 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, 755 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, 756 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, 757 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL }, 758 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT, 759 &symbios_properties }, 760 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT, 761 &lsi_properties_scsi }, 762 #if defined(__i386) || defined(__amd64) 763 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD 764 | SD_CONF_BSET_READSUB_BCD 765 | SD_CONF_BSET_READ_TOC_ADDR_BCD 766 | SD_CONF_BSET_NO_READ_HEADER 767 | SD_CONF_BSET_READ_CD_XD4), NULL }, 768 769 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD 770 | SD_CONF_BSET_READSUB_BCD 771 | SD_CONF_BSET_READ_TOC_ADDR_BCD 772 | SD_CONF_BSET_NO_READ_HEADER 773 | SD_CONF_BSET_READ_CD_XD4), NULL }, 774 #endif /* __i386 || __amd64 */ 775 #endif /* sparc NON-fibre or NON-sparc platforms */ 776 777 #if (defined(SD_PROP_TST)) 778 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE 779 | SD_CONF_BSET_CTYPE 780 | SD_CONF_BSET_NRR_COUNT 781 | SD_CONF_BSET_FAB_DEVID 782 | SD_CONF_BSET_NOCACHE 783 | SD_CONF_BSET_BSY_RETRY_COUNT 784 | SD_CONF_BSET_PLAYMSF_BCD 785 | SD_CONF_BSET_READSUB_BCD 786 | SD_CONF_BSET_READ_TOC_TRK_BCD 787 | SD_CONF_BSET_READ_TOC_ADDR_BCD 788 | SD_CONF_BSET_NO_READ_HEADER 789 | SD_CONF_BSET_READ_CD_XD4 790 | SD_CONF_BSET_RST_RETRIES 791 | SD_CONF_BSET_RSV_REL_TIME 792 | SD_CONF_BSET_TUR_CHECK), &tst_properties}, 793 #endif 794 }; 795 796 static const int sd_disk_table_size = 797 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t); 798 799 800 801 #define SD_INTERCONNECT_PARALLEL 0 802 #define SD_INTERCONNECT_FABRIC 1 803 #define SD_INTERCONNECT_FIBRE 2 804 #define SD_INTERCONNECT_SSA 3 805 #define SD_INTERCONNECT_SATA 4 806 #define SD_INTERCONNECT_SAS 5 807 808 #define SD_IS_PARALLEL_SCSI(un) \ 809 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL) 810 #define SD_IS_SERIAL(un) \ 811 (((un)->un_interconnect_type == SD_INTERCONNECT_SATA) ||\ 812 ((un)->un_interconnect_type == SD_INTERCONNECT_SAS)) 813 814 /* 815 * Definitions used by device id registration routines 816 */ 817 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */ 818 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */ 819 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */ 820 821 static kmutex_t sd_sense_mutex = {0}; 822 823 /* 824 * Macros for updates of the driver state 825 */ 826 #define New_state(un, s) \ 827 (un)->un_last_state = (un)->un_state, (un)->un_state = (s) 828 #define Restore_state(un) \ 829 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); } 830 831 static struct sd_cdbinfo sd_cdbtab[] = { 832 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, }, 833 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, }, 834 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, }, 835 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, }, 836 }; 837 838 /* 839 * Specifies the number of seconds that must have elapsed since the last 840 * cmd. has completed for a device to be declared idle to the PM framework. 841 */ 842 static int sd_pm_idletime = 1; 843 844 /* 845 * Internal function prototypes 846 */ 847 848 #if (defined(__fibre)) 849 /* 850 * These #defines are to avoid namespace collisions that occur because this 851 * code is currently used to compile two separate driver modules: sd and ssd. 852 * All function names need to be treated this way (even if declared static) 853 * in order to allow the debugger to resolve the names properly. 854 * It is anticipated that in the near future the ssd module will be obsoleted, 855 * at which time this ugliness should go away. 856 */ 857 #define sd_log_trace ssd_log_trace 858 #define sd_log_info ssd_log_info 859 #define sd_log_err ssd_log_err 860 #define sdprobe ssdprobe 861 #define sdinfo ssdinfo 862 #define sd_prop_op ssd_prop_op 863 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init 864 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini 865 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache 866 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache 867 #define sd_scsi_target_lun_init ssd_scsi_target_lun_init 868 #define sd_scsi_target_lun_fini ssd_scsi_target_lun_fini 869 #define sd_scsi_get_target_lun_count ssd_scsi_get_target_lun_count 870 #define sd_scsi_update_lun_on_target ssd_scsi_update_lun_on_target 871 #define sd_spin_up_unit ssd_spin_up_unit 872 #define sd_enable_descr_sense ssd_enable_descr_sense 873 #define sd_reenable_dsense_task ssd_reenable_dsense_task 874 #define sd_set_mmc_caps ssd_set_mmc_caps 875 #define sd_read_unit_properties ssd_read_unit_properties 876 #define sd_process_sdconf_file ssd_process_sdconf_file 877 #define sd_process_sdconf_table ssd_process_sdconf_table 878 #define sd_sdconf_id_match ssd_sdconf_id_match 879 #define sd_blank_cmp ssd_blank_cmp 880 #define sd_chk_vers1_data ssd_chk_vers1_data 881 #define sd_set_vers1_properties ssd_set_vers1_properties 882 883 #define sd_get_physical_geometry ssd_get_physical_geometry 884 #define sd_get_virtual_geometry ssd_get_virtual_geometry 885 #define sd_update_block_info ssd_update_block_info 886 #define sd_register_devid ssd_register_devid 887 #define sd_get_devid ssd_get_devid 888 #define sd_create_devid ssd_create_devid 889 #define sd_write_deviceid ssd_write_deviceid 890 #define sd_check_vpd_page_support ssd_check_vpd_page_support 891 #define sd_setup_pm ssd_setup_pm 892 #define sd_create_pm_components ssd_create_pm_components 893 #define sd_ddi_suspend ssd_ddi_suspend 894 #define sd_ddi_resume ssd_ddi_resume 895 #define sd_pm_state_change ssd_pm_state_change 896 #define sdpower ssdpower 897 #define sdattach ssdattach 898 #define sddetach ssddetach 899 #define sd_unit_attach ssd_unit_attach 900 #define sd_unit_detach ssd_unit_detach 901 #define sd_set_unit_attributes ssd_set_unit_attributes 902 #define sd_create_errstats ssd_create_errstats 903 #define sd_set_errstats ssd_set_errstats 904 #define sd_set_pstats ssd_set_pstats 905 #define sddump ssddump 906 #define sd_scsi_poll ssd_scsi_poll 907 #define sd_send_polled_RQS ssd_send_polled_RQS 908 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll 909 #define sd_init_event_callbacks ssd_init_event_callbacks 910 #define sd_event_callback ssd_event_callback 911 #define sd_cache_control ssd_cache_control 912 #define sd_get_write_cache_enabled ssd_get_write_cache_enabled 913 #define sd_get_nv_sup ssd_get_nv_sup 914 #define sd_make_device ssd_make_device 915 #define sdopen ssdopen 916 #define sdclose ssdclose 917 #define sd_ready_and_valid ssd_ready_and_valid 918 #define sdmin ssdmin 919 #define sdread ssdread 920 #define sdwrite ssdwrite 921 #define sdaread ssdaread 922 #define sdawrite ssdawrite 923 #define sdstrategy ssdstrategy 924 #define sdioctl ssdioctl 925 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart 926 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart 927 #define sd_checksum_iostart ssd_checksum_iostart 928 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart 929 #define sd_pm_iostart ssd_pm_iostart 930 #define sd_core_iostart ssd_core_iostart 931 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone 932 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone 933 #define sd_checksum_iodone ssd_checksum_iodone 934 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone 935 #define sd_pm_iodone ssd_pm_iodone 936 #define sd_initpkt_for_buf ssd_initpkt_for_buf 937 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf 938 #define sd_setup_rw_pkt ssd_setup_rw_pkt 939 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt 940 #define sd_buf_iodone ssd_buf_iodone 941 #define sd_uscsi_strategy ssd_uscsi_strategy 942 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi 943 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi 944 #define sd_uscsi_iodone ssd_uscsi_iodone 945 #define sd_xbuf_strategy ssd_xbuf_strategy 946 #define sd_xbuf_init ssd_xbuf_init 947 #define sd_pm_entry ssd_pm_entry 948 #define sd_pm_exit ssd_pm_exit 949 950 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler 951 #define sd_pm_timeout_handler ssd_pm_timeout_handler 952 953 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq 954 #define sdintr ssdintr 955 #define sd_start_cmds ssd_start_cmds 956 #define sd_send_scsi_cmd ssd_send_scsi_cmd 957 #define sd_bioclone_alloc ssd_bioclone_alloc 958 #define sd_bioclone_free ssd_bioclone_free 959 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc 960 #define sd_shadow_buf_free ssd_shadow_buf_free 961 #define sd_print_transport_rejected_message \ 962 ssd_print_transport_rejected_message 963 #define sd_retry_command ssd_retry_command 964 #define sd_set_retry_bp ssd_set_retry_bp 965 #define sd_send_request_sense_command ssd_send_request_sense_command 966 #define sd_start_retry_command ssd_start_retry_command 967 #define sd_start_direct_priority_command \ 968 ssd_start_direct_priority_command 969 #define sd_return_failed_command ssd_return_failed_command 970 #define sd_return_failed_command_no_restart \ 971 ssd_return_failed_command_no_restart 972 #define sd_return_command ssd_return_command 973 #define sd_sync_with_callback ssd_sync_with_callback 974 #define sdrunout ssdrunout 975 #define sd_mark_rqs_busy ssd_mark_rqs_busy 976 #define sd_mark_rqs_idle ssd_mark_rqs_idle 977 #define sd_reduce_throttle ssd_reduce_throttle 978 #define sd_restore_throttle ssd_restore_throttle 979 #define sd_print_incomplete_msg ssd_print_incomplete_msg 980 #define sd_init_cdb_limits ssd_init_cdb_limits 981 #define sd_pkt_status_good ssd_pkt_status_good 982 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition 983 #define sd_pkt_status_busy ssd_pkt_status_busy 984 #define sd_pkt_status_reservation_conflict \ 985 ssd_pkt_status_reservation_conflict 986 #define sd_pkt_status_qfull ssd_pkt_status_qfull 987 #define sd_handle_request_sense ssd_handle_request_sense 988 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense 989 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg 990 #define sd_validate_sense_data ssd_validate_sense_data 991 #define sd_decode_sense ssd_decode_sense 992 #define sd_print_sense_msg ssd_print_sense_msg 993 #define sd_sense_key_no_sense ssd_sense_key_no_sense 994 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error 995 #define sd_sense_key_not_ready ssd_sense_key_not_ready 996 #define sd_sense_key_medium_or_hardware_error \ 997 ssd_sense_key_medium_or_hardware_error 998 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request 999 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention 1000 #define sd_sense_key_fail_command ssd_sense_key_fail_command 1001 #define sd_sense_key_blank_check ssd_sense_key_blank_check 1002 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command 1003 #define sd_sense_key_default ssd_sense_key_default 1004 #define sd_print_retry_msg ssd_print_retry_msg 1005 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg 1006 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete 1007 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err 1008 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset 1009 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted 1010 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout 1011 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free 1012 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject 1013 #define sd_pkt_reason_default ssd_pkt_reason_default 1014 #define sd_reset_target ssd_reset_target 1015 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback 1016 #define sd_start_stop_unit_task ssd_start_stop_unit_task 1017 #define sd_taskq_create ssd_taskq_create 1018 #define sd_taskq_delete ssd_taskq_delete 1019 #define sd_target_change_task ssd_target_change_task 1020 #define sd_log_lun_expansion_event ssd_log_lun_expansion_event 1021 #define sd_media_change_task ssd_media_change_task 1022 #define sd_handle_mchange ssd_handle_mchange 1023 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK 1024 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY 1025 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16 1026 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION 1027 #define sd_send_scsi_feature_GET_CONFIGURATION \ 1028 sd_send_scsi_feature_GET_CONFIGURATION 1029 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT 1030 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY 1031 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY 1032 #define sd_send_scsi_PERSISTENT_RESERVE_IN \ 1033 ssd_send_scsi_PERSISTENT_RESERVE_IN 1034 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \ 1035 ssd_send_scsi_PERSISTENT_RESERVE_OUT 1036 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE 1037 #define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \ 1038 ssd_send_scsi_SYNCHRONIZE_CACHE_biodone 1039 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE 1040 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT 1041 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR 1042 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE 1043 #define sd_alloc_rqs ssd_alloc_rqs 1044 #define sd_free_rqs ssd_free_rqs 1045 #define sd_dump_memory ssd_dump_memory 1046 #define sd_get_media_info ssd_get_media_info 1047 #define sd_get_media_info_ext ssd_get_media_info_ext 1048 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info 1049 #define sd_nvpair_str_decode ssd_nvpair_str_decode 1050 #define sd_strtok_r ssd_strtok_r 1051 #define sd_set_properties ssd_set_properties 1052 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf 1053 #define sd_setup_next_xfer ssd_setup_next_xfer 1054 #define sd_dkio_get_temp ssd_dkio_get_temp 1055 #define sd_check_mhd ssd_check_mhd 1056 #define sd_mhd_watch_cb ssd_mhd_watch_cb 1057 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete 1058 #define sd_sname ssd_sname 1059 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover 1060 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread 1061 #define sd_take_ownership ssd_take_ownership 1062 #define sd_reserve_release ssd_reserve_release 1063 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req 1064 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb 1065 #define sd_persistent_reservation_in_read_keys \ 1066 ssd_persistent_reservation_in_read_keys 1067 #define sd_persistent_reservation_in_read_resv \ 1068 ssd_persistent_reservation_in_read_resv 1069 #define sd_mhdioc_takeown ssd_mhdioc_takeown 1070 #define sd_mhdioc_failfast ssd_mhdioc_failfast 1071 #define sd_mhdioc_release ssd_mhdioc_release 1072 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid 1073 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys 1074 #define sd_mhdioc_inresv ssd_mhdioc_inresv 1075 #define sr_change_blkmode ssr_change_blkmode 1076 #define sr_change_speed ssr_change_speed 1077 #define sr_atapi_change_speed ssr_atapi_change_speed 1078 #define sr_pause_resume ssr_pause_resume 1079 #define sr_play_msf ssr_play_msf 1080 #define sr_play_trkind ssr_play_trkind 1081 #define sr_read_all_subcodes ssr_read_all_subcodes 1082 #define sr_read_subchannel ssr_read_subchannel 1083 #define sr_read_tocentry ssr_read_tocentry 1084 #define sr_read_tochdr ssr_read_tochdr 1085 #define sr_read_cdda ssr_read_cdda 1086 #define sr_read_cdxa ssr_read_cdxa 1087 #define sr_read_mode1 ssr_read_mode1 1088 #define sr_read_mode2 ssr_read_mode2 1089 #define sr_read_cd_mode2 ssr_read_cd_mode2 1090 #define sr_sector_mode ssr_sector_mode 1091 #define sr_eject ssr_eject 1092 #define sr_ejected ssr_ejected 1093 #define sr_check_wp ssr_check_wp 1094 #define sd_check_media ssd_check_media 1095 #define sd_media_watch_cb ssd_media_watch_cb 1096 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast 1097 #define sr_volume_ctrl ssr_volume_ctrl 1098 #define sr_read_sony_session_offset ssr_read_sony_session_offset 1099 #define sd_log_page_supported ssd_log_page_supported 1100 #define sd_check_for_writable_cd ssd_check_for_writable_cd 1101 #define sd_wm_cache_constructor ssd_wm_cache_constructor 1102 #define sd_wm_cache_destructor ssd_wm_cache_destructor 1103 #define sd_range_lock ssd_range_lock 1104 #define sd_get_range ssd_get_range 1105 #define sd_free_inlist_wmap ssd_free_inlist_wmap 1106 #define sd_range_unlock ssd_range_unlock 1107 #define sd_read_modify_write_task ssd_read_modify_write_task 1108 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw 1109 1110 #define sd_iostart_chain ssd_iostart_chain 1111 #define sd_iodone_chain ssd_iodone_chain 1112 #define sd_initpkt_map ssd_initpkt_map 1113 #define sd_destroypkt_map ssd_destroypkt_map 1114 #define sd_chain_type_map ssd_chain_type_map 1115 #define sd_chain_index_map ssd_chain_index_map 1116 1117 #define sd_failfast_flushctl ssd_failfast_flushctl 1118 #define sd_failfast_flushq ssd_failfast_flushq 1119 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback 1120 1121 #define sd_is_lsi ssd_is_lsi 1122 #define sd_tg_rdwr ssd_tg_rdwr 1123 #define sd_tg_getinfo ssd_tg_getinfo 1124 #define sd_rmw_msg_print_handler ssd_rmw_msg_print_handler 1125 1126 #endif /* #if (defined(__fibre)) */ 1127 1128 1129 int _init(void); 1130 int _fini(void); 1131 int _info(struct modinfo *modinfop); 1132 1133 /*PRINTFLIKE3*/ 1134 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1135 /*PRINTFLIKE3*/ 1136 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1137 /*PRINTFLIKE3*/ 1138 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1139 1140 static int sdprobe(dev_info_t *devi); 1141 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 1142 void **result); 1143 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1144 int mod_flags, char *name, caddr_t valuep, int *lengthp); 1145 1146 /* 1147 * Smart probe for parallel scsi 1148 */ 1149 static void sd_scsi_probe_cache_init(void); 1150 static void sd_scsi_probe_cache_fini(void); 1151 static void sd_scsi_clear_probe_cache(void); 1152 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)()); 1153 1154 /* 1155 * Attached luns on target for parallel scsi 1156 */ 1157 static void sd_scsi_target_lun_init(void); 1158 static void sd_scsi_target_lun_fini(void); 1159 static int sd_scsi_get_target_lun_count(dev_info_t *dip, int target); 1160 static void sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag); 1161 1162 static int sd_spin_up_unit(sd_ssc_t *ssc); 1163 1164 /* 1165 * Using sd_ssc_init to establish sd_ssc_t struct 1166 * Using sd_ssc_send to send uscsi internal command 1167 * Using sd_ssc_fini to free sd_ssc_t struct 1168 */ 1169 static sd_ssc_t *sd_ssc_init(struct sd_lun *un); 1170 static int sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, 1171 int flag, enum uio_seg dataspace, int path_flag); 1172 static void sd_ssc_fini(sd_ssc_t *ssc); 1173 1174 /* 1175 * Using sd_ssc_assessment to set correct type-of-assessment 1176 * Using sd_ssc_post to post ereport & system log 1177 * sd_ssc_post will call sd_ssc_print to print system log 1178 * sd_ssc_post will call sd_ssd_ereport_post to post ereport 1179 */ 1180 static void sd_ssc_assessment(sd_ssc_t *ssc, 1181 enum sd_type_assessment tp_assess); 1182 1183 static void sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess); 1184 static void sd_ssc_print(sd_ssc_t *ssc, int sd_severity); 1185 static void sd_ssc_ereport_post(sd_ssc_t *ssc, 1186 enum sd_driver_assessment drv_assess); 1187 1188 /* 1189 * Using sd_ssc_set_info to mark an un-decodable-data error. 1190 * Using sd_ssc_extract_info to transfer information from internal 1191 * data structures to sd_ssc_t. 1192 */ 1193 static void sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, uint_t comp, 1194 const char *fmt, ...); 1195 static void sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un, 1196 struct scsi_pkt *pktp, struct buf *bp, struct sd_xbuf *xp); 1197 1198 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1199 enum uio_seg dataspace, int path_flag); 1200 1201 #ifdef _LP64 1202 static void sd_enable_descr_sense(sd_ssc_t *ssc); 1203 static void sd_reenable_dsense_task(void *arg); 1204 #endif /* _LP64 */ 1205 1206 static void sd_set_mmc_caps(sd_ssc_t *ssc); 1207 1208 static void sd_read_unit_properties(struct sd_lun *un); 1209 static int sd_process_sdconf_file(struct sd_lun *un); 1210 static void sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str); 1211 static char *sd_strtok_r(char *string, const char *sepset, char **lasts); 1212 static void sd_set_properties(struct sd_lun *un, char *name, char *value); 1213 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags, 1214 int *data_list, sd_tunables *values); 1215 static void sd_process_sdconf_table(struct sd_lun *un); 1216 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen); 1217 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen); 1218 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 1219 int list_len, char *dataname_ptr); 1220 static void sd_set_vers1_properties(struct sd_lun *un, int flags, 1221 sd_tunables *prop_list); 1222 1223 static void sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi, 1224 int reservation_flag); 1225 static int sd_get_devid(sd_ssc_t *ssc); 1226 static ddi_devid_t sd_create_devid(sd_ssc_t *ssc); 1227 static int sd_write_deviceid(sd_ssc_t *ssc); 1228 static int sd_get_devid_page(struct sd_lun *un, uchar_t *wwn, int *len); 1229 static int sd_check_vpd_page_support(sd_ssc_t *ssc); 1230 1231 static void sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi); 1232 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); 1233 1234 static int sd_ddi_suspend(dev_info_t *devi); 1235 static int sd_ddi_resume(dev_info_t *devi); 1236 static int sd_pm_state_change(struct sd_lun *un, int level, int flag); 1237 static int sdpower(dev_info_t *devi, int component, int level); 1238 1239 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 1240 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 1241 static int sd_unit_attach(dev_info_t *devi); 1242 static int sd_unit_detach(dev_info_t *devi); 1243 1244 static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi); 1245 static void sd_create_errstats(struct sd_lun *un, int instance); 1246 static void sd_set_errstats(struct sd_lun *un); 1247 static void sd_set_pstats(struct sd_lun *un); 1248 1249 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 1250 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt); 1251 static int sd_send_polled_RQS(struct sd_lun *un); 1252 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt); 1253 1254 #if (defined(__fibre)) 1255 /* 1256 * Event callbacks (photon) 1257 */ 1258 static void sd_init_event_callbacks(struct sd_lun *un); 1259 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *); 1260 #endif 1261 1262 /* 1263 * Defines for sd_cache_control 1264 */ 1265 1266 #define SD_CACHE_ENABLE 1 1267 #define SD_CACHE_DISABLE 0 1268 #define SD_CACHE_NOCHANGE -1 1269 1270 static int sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag); 1271 static int sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled); 1272 static void sd_get_nv_sup(sd_ssc_t *ssc); 1273 static dev_t sd_make_device(dev_info_t *devi); 1274 1275 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize, 1276 uint64_t capacity); 1277 1278 /* 1279 * Driver entry point functions. 1280 */ 1281 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 1282 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 1283 static int sd_ready_and_valid(sd_ssc_t *ssc, int part); 1284 1285 static void sdmin(struct buf *bp); 1286 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p); 1287 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 1288 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1289 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1290 1291 static int sdstrategy(struct buf *bp); 1292 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 1293 1294 /* 1295 * Function prototypes for layering functions in the iostart chain. 1296 */ 1297 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un, 1298 struct buf *bp); 1299 static void sd_mapblocksize_iostart(int index, struct sd_lun *un, 1300 struct buf *bp); 1301 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp); 1302 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un, 1303 struct buf *bp); 1304 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp); 1305 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp); 1306 1307 /* 1308 * Function prototypes for layering functions in the iodone chain. 1309 */ 1310 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp); 1311 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp); 1312 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un, 1313 struct buf *bp); 1314 static void sd_mapblocksize_iodone(int index, struct sd_lun *un, 1315 struct buf *bp); 1316 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp); 1317 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un, 1318 struct buf *bp); 1319 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp); 1320 1321 /* 1322 * Prototypes for functions to support buf(9S) based IO. 1323 */ 1324 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg); 1325 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **); 1326 static void sd_destroypkt_for_buf(struct buf *); 1327 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp, 1328 struct buf *bp, int flags, 1329 int (*callback)(caddr_t), caddr_t callback_arg, 1330 diskaddr_t lba, uint32_t blockcount); 1331 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp, 1332 struct buf *bp, diskaddr_t lba, uint32_t blockcount); 1333 1334 /* 1335 * Prototypes for functions to support USCSI IO. 1336 */ 1337 static int sd_uscsi_strategy(struct buf *bp); 1338 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **); 1339 static void sd_destroypkt_for_uscsi(struct buf *); 1340 1341 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 1342 uchar_t chain_type, void *pktinfop); 1343 1344 static int sd_pm_entry(struct sd_lun *un); 1345 static void sd_pm_exit(struct sd_lun *un); 1346 1347 static void sd_pm_idletimeout_handler(void *arg); 1348 1349 /* 1350 * sd_core internal functions (used at the sd_core_io layer). 1351 */ 1352 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp); 1353 static void sdintr(struct scsi_pkt *pktp); 1354 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp); 1355 1356 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1357 enum uio_seg dataspace, int path_flag); 1358 1359 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen, 1360 daddr_t blkno, int (*func)(struct buf *)); 1361 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen, 1362 uint_t bflags, daddr_t blkno, int (*func)(struct buf *)); 1363 static void sd_bioclone_free(struct buf *bp); 1364 static void sd_shadow_buf_free(struct buf *bp); 1365 1366 static void sd_print_transport_rejected_message(struct sd_lun *un, 1367 struct sd_xbuf *xp, int code); 1368 static void sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, 1369 void *arg, int code); 1370 static void sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, 1371 void *arg, int code); 1372 static void sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, 1373 void *arg, int code); 1374 1375 static void sd_retry_command(struct sd_lun *un, struct buf *bp, 1376 int retry_check_flag, 1377 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, 1378 int c), 1379 void *user_arg, int failure_code, clock_t retry_delay, 1380 void (*statp)(kstat_io_t *)); 1381 1382 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, 1383 clock_t retry_delay, void (*statp)(kstat_io_t *)); 1384 1385 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 1386 struct scsi_pkt *pktp); 1387 static void sd_start_retry_command(void *arg); 1388 static void sd_start_direct_priority_command(void *arg); 1389 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, 1390 int errcode); 1391 static void sd_return_failed_command_no_restart(struct sd_lun *un, 1392 struct buf *bp, int errcode); 1393 static void sd_return_command(struct sd_lun *un, struct buf *bp); 1394 static void sd_sync_with_callback(struct sd_lun *un); 1395 static int sdrunout(caddr_t arg); 1396 1397 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp); 1398 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp); 1399 1400 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type); 1401 static void sd_restore_throttle(void *arg); 1402 1403 static void sd_init_cdb_limits(struct sd_lun *un); 1404 1405 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 1406 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1407 1408 /* 1409 * Error handling functions 1410 */ 1411 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 1412 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1413 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, 1414 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1415 static void sd_pkt_status_reservation_conflict(struct sd_lun *un, 1416 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1417 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 1418 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1419 1420 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp, 1421 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1422 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 1423 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1424 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp, 1425 struct sd_xbuf *xp, size_t actual_len); 1426 static void sd_decode_sense(struct sd_lun *un, struct buf *bp, 1427 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1428 1429 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp, 1430 void *arg, int code); 1431 1432 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 1433 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1434 static void sd_sense_key_recoverable_error(struct sd_lun *un, 1435 uint8_t *sense_datap, 1436 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1437 static void sd_sense_key_not_ready(struct sd_lun *un, 1438 uint8_t *sense_datap, 1439 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1440 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 1441 uint8_t *sense_datap, 1442 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1443 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 1444 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1445 static void sd_sense_key_unit_attention(struct sd_lun *un, 1446 uint8_t *sense_datap, 1447 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1448 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 1449 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1450 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 1451 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1452 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 1453 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1454 static void sd_sense_key_default(struct sd_lun *un, 1455 uint8_t *sense_datap, 1456 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1457 1458 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp, 1459 void *arg, int flag); 1460 1461 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 1462 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1463 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 1464 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1465 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 1466 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1467 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 1468 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1469 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 1470 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1471 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 1472 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1473 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 1474 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1475 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 1476 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1477 1478 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp); 1479 1480 static void sd_start_stop_unit_callback(void *arg); 1481 static void sd_start_stop_unit_task(void *arg); 1482 1483 static void sd_taskq_create(void); 1484 static void sd_taskq_delete(void); 1485 static void sd_target_change_task(void *arg); 1486 static void sd_log_lun_expansion_event(struct sd_lun *un, int km_flag); 1487 static void sd_media_change_task(void *arg); 1488 1489 static int sd_handle_mchange(struct sd_lun *un); 1490 static int sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag); 1491 static int sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp, 1492 uint32_t *lbap, int path_flag); 1493 static int sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp, 1494 uint32_t *lbap, uint32_t *psp, int path_flag); 1495 static int sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int pc_flag, 1496 int flag, int path_flag); 1497 static int sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr, 1498 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp); 1499 static int sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag); 1500 static int sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc, 1501 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp); 1502 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, 1503 uchar_t usr_cmd, uchar_t *usr_bufp); 1504 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, 1505 struct dk_callback *dkc); 1506 static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp); 1507 static int sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, 1508 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1509 uchar_t *bufaddr, uint_t buflen, int path_flag); 1510 static int sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc, 1511 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1512 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag); 1513 static int sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize, 1514 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag); 1515 static int sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize, 1516 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag); 1517 static int sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr, 1518 size_t buflen, daddr_t start_block, int path_flag); 1519 #define sd_send_scsi_READ(ssc, bufaddr, buflen, start_block, path_flag) \ 1520 sd_send_scsi_RDWR(ssc, SCMD_READ, bufaddr, buflen, start_block, \ 1521 path_flag) 1522 #define sd_send_scsi_WRITE(ssc, bufaddr, buflen, start_block, path_flag)\ 1523 sd_send_scsi_RDWR(ssc, SCMD_WRITE, bufaddr, buflen, start_block,\ 1524 path_flag) 1525 1526 static int sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr, 1527 uint16_t buflen, uchar_t page_code, uchar_t page_control, 1528 uint16_t param_ptr, int path_flag); 1529 1530 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un); 1531 static void sd_free_rqs(struct sd_lun *un); 1532 1533 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, 1534 uchar_t *data, int len, int fmt); 1535 static void sd_panic_for_res_conflict(struct sd_lun *un); 1536 1537 /* 1538 * Disk Ioctl Function Prototypes 1539 */ 1540 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag); 1541 static int sd_get_media_info_ext(dev_t dev, caddr_t arg, int flag); 1542 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag); 1543 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag); 1544 1545 /* 1546 * Multi-host Ioctl Prototypes 1547 */ 1548 static int sd_check_mhd(dev_t dev, int interval); 1549 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1550 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt); 1551 static char *sd_sname(uchar_t status); 1552 static void sd_mhd_resvd_recover(void *arg); 1553 static void sd_resv_reclaim_thread(); 1554 static int sd_take_ownership(dev_t dev, struct mhioctkown *p); 1555 static int sd_reserve_release(dev_t dev, int cmd); 1556 static void sd_rmv_resv_reclaim_req(dev_t dev); 1557 static void sd_mhd_reset_notify_cb(caddr_t arg); 1558 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un, 1559 mhioc_inkeys_t *usrp, int flag); 1560 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un, 1561 mhioc_inresvs_t *usrp, int flag); 1562 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag); 1563 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag); 1564 static int sd_mhdioc_release(dev_t dev); 1565 static int sd_mhdioc_register_devid(dev_t dev); 1566 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag); 1567 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag); 1568 1569 /* 1570 * SCSI removable prototypes 1571 */ 1572 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag); 1573 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1574 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1575 static int sr_pause_resume(dev_t dev, int mode); 1576 static int sr_play_msf(dev_t dev, caddr_t data, int flag); 1577 static int sr_play_trkind(dev_t dev, caddr_t data, int flag); 1578 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag); 1579 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag); 1580 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag); 1581 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag); 1582 static int sr_read_cdda(dev_t dev, caddr_t data, int flag); 1583 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag); 1584 static int sr_read_mode1(dev_t dev, caddr_t data, int flag); 1585 static int sr_read_mode2(dev_t dev, caddr_t data, int flag); 1586 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag); 1587 static int sr_sector_mode(dev_t dev, uint32_t blksize); 1588 static int sr_eject(dev_t dev); 1589 static void sr_ejected(register struct sd_lun *un); 1590 static int sr_check_wp(dev_t dev); 1591 static int sd_check_media(dev_t dev, enum dkio_state state); 1592 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1593 static void sd_delayed_cv_broadcast(void *arg); 1594 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag); 1595 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag); 1596 1597 static int sd_log_page_supported(sd_ssc_t *ssc, int log_page); 1598 1599 /* 1600 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions. 1601 */ 1602 static void sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag); 1603 static int sd_wm_cache_constructor(void *wm, void *un, int flags); 1604 static void sd_wm_cache_destructor(void *wm, void *un); 1605 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb, 1606 daddr_t endb, ushort_t typ); 1607 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb, 1608 daddr_t endb); 1609 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp); 1610 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm); 1611 static void sd_read_modify_write_task(void * arg); 1612 static int 1613 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 1614 struct buf **bpp); 1615 1616 1617 /* 1618 * Function prototypes for failfast support. 1619 */ 1620 static void sd_failfast_flushq(struct sd_lun *un); 1621 static int sd_failfast_flushq_callback(struct buf *bp); 1622 1623 /* 1624 * Function prototypes to check for lsi devices 1625 */ 1626 static void sd_is_lsi(struct sd_lun *un); 1627 1628 /* 1629 * Function prototypes for partial DMA support 1630 */ 1631 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 1632 struct scsi_pkt *pkt, struct sd_xbuf *xp); 1633 1634 1635 /* Function prototypes for cmlb */ 1636 static int sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 1637 diskaddr_t start_block, size_t reqlength, void *tg_cookie); 1638 1639 static int sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie); 1640 1641 /* 1642 * For printing RMW warning message timely 1643 */ 1644 static void sd_rmw_msg_print_handler(void *arg); 1645 1646 /* 1647 * Constants for failfast support: 1648 * 1649 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO 1650 * failfast processing being performed. 1651 * 1652 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing 1653 * failfast processing on all bufs with B_FAILFAST set. 1654 */ 1655 1656 #define SD_FAILFAST_INACTIVE 0 1657 #define SD_FAILFAST_ACTIVE 1 1658 1659 /* 1660 * Bitmask to control behavior of buf(9S) flushes when a transition to 1661 * the failfast state occurs. Optional bits include: 1662 * 1663 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that 1664 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will 1665 * be flushed. 1666 * 1667 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the 1668 * driver, in addition to the regular wait queue. This includes the xbuf 1669 * queues. When clear, only the driver's wait queue will be flushed. 1670 */ 1671 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 1672 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 1673 1674 /* 1675 * The default behavior is to only flush bufs that have B_FAILFAST set, but 1676 * to flush all queues within the driver. 1677 */ 1678 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES; 1679 1680 1681 /* 1682 * SD Testing Fault Injection 1683 */ 1684 #ifdef SD_FAULT_INJECTION 1685 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); 1686 static void sd_faultinjection(struct scsi_pkt *pktp); 1687 static void sd_injection_log(char *buf, struct sd_lun *un); 1688 #endif 1689 1690 /* 1691 * Device driver ops vector 1692 */ 1693 static struct cb_ops sd_cb_ops = { 1694 sdopen, /* open */ 1695 sdclose, /* close */ 1696 sdstrategy, /* strategy */ 1697 nodev, /* print */ 1698 sddump, /* dump */ 1699 sdread, /* read */ 1700 sdwrite, /* write */ 1701 sdioctl, /* ioctl */ 1702 nodev, /* devmap */ 1703 nodev, /* mmap */ 1704 nodev, /* segmap */ 1705 nochpoll, /* poll */ 1706 sd_prop_op, /* cb_prop_op */ 1707 0, /* streamtab */ 1708 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */ 1709 CB_REV, /* cb_rev */ 1710 sdaread, /* async I/O read entry point */ 1711 sdawrite /* async I/O write entry point */ 1712 }; 1713 1714 struct dev_ops sd_ops = { 1715 DEVO_REV, /* devo_rev, */ 1716 0, /* refcnt */ 1717 sdinfo, /* info */ 1718 nulldev, /* identify */ 1719 sdprobe, /* probe */ 1720 sdattach, /* attach */ 1721 sddetach, /* detach */ 1722 nodev, /* reset */ 1723 &sd_cb_ops, /* driver operations */ 1724 NULL, /* bus operations */ 1725 sdpower, /* power */ 1726 ddi_quiesce_not_needed, /* quiesce */ 1727 }; 1728 1729 /* 1730 * This is the loadable module wrapper. 1731 */ 1732 #include <sys/modctl.h> 1733 1734 #ifndef XPV_HVM_DRIVER 1735 static struct modldrv modldrv = { 1736 &mod_driverops, /* Type of module. This one is a driver */ 1737 SD_MODULE_NAME, /* Module name. */ 1738 &sd_ops /* driver ops */ 1739 }; 1740 1741 static struct modlinkage modlinkage = { 1742 MODREV_1, &modldrv, NULL 1743 }; 1744 1745 #else /* XPV_HVM_DRIVER */ 1746 static struct modlmisc modlmisc = { 1747 &mod_miscops, /* Type of module. This one is a misc */ 1748 "HVM " SD_MODULE_NAME, /* Module name. */ 1749 }; 1750 1751 static struct modlinkage modlinkage = { 1752 MODREV_1, &modlmisc, NULL 1753 }; 1754 1755 #endif /* XPV_HVM_DRIVER */ 1756 1757 static cmlb_tg_ops_t sd_tgops = { 1758 TG_DK_OPS_VERSION_1, 1759 sd_tg_rdwr, 1760 sd_tg_getinfo 1761 }; 1762 1763 static struct scsi_asq_key_strings sd_additional_codes[] = { 1764 0x81, 0, "Logical Unit is Reserved", 1765 0x85, 0, "Audio Address Not Valid", 1766 0xb6, 0, "Media Load Mechanism Failed", 1767 0xB9, 0, "Audio Play Operation Aborted", 1768 0xbf, 0, "Buffer Overflow for Read All Subcodes Command", 1769 0x53, 2, "Medium removal prevented", 1770 0x6f, 0, "Authentication failed during key exchange", 1771 0x6f, 1, "Key not present", 1772 0x6f, 2, "Key not established", 1773 0x6f, 3, "Read without proper authentication", 1774 0x6f, 4, "Mismatched region to this logical unit", 1775 0x6f, 5, "Region reset count error", 1776 0xffff, 0x0, NULL 1777 }; 1778 1779 1780 /* 1781 * Struct for passing printing information for sense data messages 1782 */ 1783 struct sd_sense_info { 1784 int ssi_severity; 1785 int ssi_pfa_flag; 1786 }; 1787 1788 /* 1789 * Table of function pointers for iostart-side routines. Separate "chains" 1790 * of layered function calls are formed by placing the function pointers 1791 * sequentially in the desired order. Functions are called according to an 1792 * incrementing table index ordering. The last function in each chain must 1793 * be sd_core_iostart(). The corresponding iodone-side routines are expected 1794 * in the sd_iodone_chain[] array. 1795 * 1796 * Note: It may seem more natural to organize both the iostart and iodone 1797 * functions together, into an array of structures (or some similar 1798 * organization) with a common index, rather than two separate arrays which 1799 * must be maintained in synchronization. The purpose of this division is 1800 * to achieve improved performance: individual arrays allows for more 1801 * effective cache line utilization on certain platforms. 1802 */ 1803 1804 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp); 1805 1806 1807 static sd_chain_t sd_iostart_chain[] = { 1808 1809 /* Chain for buf IO for disk drive targets (PM enabled) */ 1810 sd_mapblockaddr_iostart, /* Index: 0 */ 1811 sd_pm_iostart, /* Index: 1 */ 1812 sd_core_iostart, /* Index: 2 */ 1813 1814 /* Chain for buf IO for disk drive targets (PM disabled) */ 1815 sd_mapblockaddr_iostart, /* Index: 3 */ 1816 sd_core_iostart, /* Index: 4 */ 1817 1818 /* 1819 * Chain for buf IO for removable-media or large sector size 1820 * disk drive targets with RMW needed (PM enabled) 1821 */ 1822 sd_mapblockaddr_iostart, /* Index: 5 */ 1823 sd_mapblocksize_iostart, /* Index: 6 */ 1824 sd_pm_iostart, /* Index: 7 */ 1825 sd_core_iostart, /* Index: 8 */ 1826 1827 /* 1828 * Chain for buf IO for removable-media or large sector size 1829 * disk drive targets with RMW needed (PM disabled) 1830 */ 1831 sd_mapblockaddr_iostart, /* Index: 9 */ 1832 sd_mapblocksize_iostart, /* Index: 10 */ 1833 sd_core_iostart, /* Index: 11 */ 1834 1835 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1836 sd_mapblockaddr_iostart, /* Index: 12 */ 1837 sd_checksum_iostart, /* Index: 13 */ 1838 sd_pm_iostart, /* Index: 14 */ 1839 sd_core_iostart, /* Index: 15 */ 1840 1841 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1842 sd_mapblockaddr_iostart, /* Index: 16 */ 1843 sd_checksum_iostart, /* Index: 17 */ 1844 sd_core_iostart, /* Index: 18 */ 1845 1846 /* Chain for USCSI commands (all targets) */ 1847 sd_pm_iostart, /* Index: 19 */ 1848 sd_core_iostart, /* Index: 20 */ 1849 1850 /* Chain for checksumming USCSI commands (all targets) */ 1851 sd_checksum_uscsi_iostart, /* Index: 21 */ 1852 sd_pm_iostart, /* Index: 22 */ 1853 sd_core_iostart, /* Index: 23 */ 1854 1855 /* Chain for "direct" USCSI commands (all targets) */ 1856 sd_core_iostart, /* Index: 24 */ 1857 1858 /* Chain for "direct priority" USCSI commands (all targets) */ 1859 sd_core_iostart, /* Index: 25 */ 1860 1861 /* 1862 * Chain for buf IO for large sector size disk drive targets 1863 * with RMW needed with checksumming (PM enabled) 1864 */ 1865 sd_mapblockaddr_iostart, /* Index: 26 */ 1866 sd_mapblocksize_iostart, /* Index: 27 */ 1867 sd_checksum_iostart, /* Index: 28 */ 1868 sd_pm_iostart, /* Index: 29 */ 1869 sd_core_iostart, /* Index: 30 */ 1870 1871 /* 1872 * Chain for buf IO for large sector size disk drive targets 1873 * with RMW needed with checksumming (PM disabled) 1874 */ 1875 sd_mapblockaddr_iostart, /* Index: 31 */ 1876 sd_mapblocksize_iostart, /* Index: 32 */ 1877 sd_checksum_iostart, /* Index: 33 */ 1878 sd_core_iostart, /* Index: 34 */ 1879 1880 }; 1881 1882 /* 1883 * Macros to locate the first function of each iostart chain in the 1884 * sd_iostart_chain[] array. These are located by the index in the array. 1885 */ 1886 #define SD_CHAIN_DISK_IOSTART 0 1887 #define SD_CHAIN_DISK_IOSTART_NO_PM 3 1888 #define SD_CHAIN_MSS_DISK_IOSTART 5 1889 #define SD_CHAIN_RMMEDIA_IOSTART 5 1890 #define SD_CHAIN_MSS_DISK_IOSTART_NO_PM 9 1891 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9 1892 #define SD_CHAIN_CHKSUM_IOSTART 12 1893 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16 1894 #define SD_CHAIN_USCSI_CMD_IOSTART 19 1895 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21 1896 #define SD_CHAIN_DIRECT_CMD_IOSTART 24 1897 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25 1898 #define SD_CHAIN_MSS_CHKSUM_IOSTART 26 1899 #define SD_CHAIN_MSS_CHKSUM_IOSTART_NO_PM 31 1900 1901 1902 /* 1903 * Table of function pointers for the iodone-side routines for the driver- 1904 * internal layering mechanism. The calling sequence for iodone routines 1905 * uses a decrementing table index, so the last routine called in a chain 1906 * must be at the lowest array index location for that chain. The last 1907 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs) 1908 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering 1909 * of the functions in an iodone side chain must correspond to the ordering 1910 * of the iostart routines for that chain. Note that there is no iodone 1911 * side routine that corresponds to sd_core_iostart(), so there is no 1912 * entry in the table for this. 1913 */ 1914 1915 static sd_chain_t sd_iodone_chain[] = { 1916 1917 /* Chain for buf IO for disk drive targets (PM enabled) */ 1918 sd_buf_iodone, /* Index: 0 */ 1919 sd_mapblockaddr_iodone, /* Index: 1 */ 1920 sd_pm_iodone, /* Index: 2 */ 1921 1922 /* Chain for buf IO for disk drive targets (PM disabled) */ 1923 sd_buf_iodone, /* Index: 3 */ 1924 sd_mapblockaddr_iodone, /* Index: 4 */ 1925 1926 /* 1927 * Chain for buf IO for removable-media or large sector size 1928 * disk drive targets with RMW needed (PM enabled) 1929 */ 1930 sd_buf_iodone, /* Index: 5 */ 1931 sd_mapblockaddr_iodone, /* Index: 6 */ 1932 sd_mapblocksize_iodone, /* Index: 7 */ 1933 sd_pm_iodone, /* Index: 8 */ 1934 1935 /* 1936 * Chain for buf IO for removable-media or large sector size 1937 * disk drive targets with RMW needed (PM disabled) 1938 */ 1939 sd_buf_iodone, /* Index: 9 */ 1940 sd_mapblockaddr_iodone, /* Index: 10 */ 1941 sd_mapblocksize_iodone, /* Index: 11 */ 1942 1943 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1944 sd_buf_iodone, /* Index: 12 */ 1945 sd_mapblockaddr_iodone, /* Index: 13 */ 1946 sd_checksum_iodone, /* Index: 14 */ 1947 sd_pm_iodone, /* Index: 15 */ 1948 1949 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1950 sd_buf_iodone, /* Index: 16 */ 1951 sd_mapblockaddr_iodone, /* Index: 17 */ 1952 sd_checksum_iodone, /* Index: 18 */ 1953 1954 /* Chain for USCSI commands (non-checksum targets) */ 1955 sd_uscsi_iodone, /* Index: 19 */ 1956 sd_pm_iodone, /* Index: 20 */ 1957 1958 /* Chain for USCSI commands (checksum targets) */ 1959 sd_uscsi_iodone, /* Index: 21 */ 1960 sd_checksum_uscsi_iodone, /* Index: 22 */ 1961 sd_pm_iodone, /* Index: 22 */ 1962 1963 /* Chain for "direct" USCSI commands (all targets) */ 1964 sd_uscsi_iodone, /* Index: 24 */ 1965 1966 /* Chain for "direct priority" USCSI commands (all targets) */ 1967 sd_uscsi_iodone, /* Index: 25 */ 1968 1969 /* 1970 * Chain for buf IO for large sector size disk drive targets 1971 * with checksumming (PM enabled) 1972 */ 1973 sd_buf_iodone, /* Index: 26 */ 1974 sd_mapblockaddr_iodone, /* Index: 27 */ 1975 sd_mapblocksize_iodone, /* Index: 28 */ 1976 sd_checksum_iodone, /* Index: 29 */ 1977 sd_pm_iodone, /* Index: 30 */ 1978 1979 /* 1980 * Chain for buf IO for large sector size disk drive targets 1981 * with checksumming (PM disabled) 1982 */ 1983 sd_buf_iodone, /* Index: 31 */ 1984 sd_mapblockaddr_iodone, /* Index: 32 */ 1985 sd_mapblocksize_iodone, /* Index: 33 */ 1986 sd_checksum_iodone, /* Index: 34 */ 1987 }; 1988 1989 1990 /* 1991 * Macros to locate the "first" function in the sd_iodone_chain[] array for 1992 * each iodone-side chain. These are located by the array index, but as the 1993 * iodone side functions are called in a decrementing-index order, the 1994 * highest index number in each chain must be specified (as these correspond 1995 * to the first function in the iodone chain that will be called by the core 1996 * at IO completion time). 1997 */ 1998 1999 #define SD_CHAIN_DISK_IODONE 2 2000 #define SD_CHAIN_DISK_IODONE_NO_PM 4 2001 #define SD_CHAIN_RMMEDIA_IODONE 8 2002 #define SD_CHAIN_MSS_DISK_IODONE 8 2003 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11 2004 #define SD_CHAIN_MSS_DISK_IODONE_NO_PM 11 2005 #define SD_CHAIN_CHKSUM_IODONE 15 2006 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18 2007 #define SD_CHAIN_USCSI_CMD_IODONE 20 2008 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22 2009 #define SD_CHAIN_DIRECT_CMD_IODONE 24 2010 #define SD_CHAIN_PRIORITY_CMD_IODONE 25 2011 #define SD_CHAIN_MSS_CHKSUM_IODONE 30 2012 #define SD_CHAIN_MSS_CHKSUM_IODONE_NO_PM 34 2013 2014 2015 2016 /* 2017 * Array to map a layering chain index to the appropriate initpkt routine. 2018 * The redundant entries are present so that the index used for accessing 2019 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 2020 * with this table as well. 2021 */ 2022 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **); 2023 2024 static sd_initpkt_t sd_initpkt_map[] = { 2025 2026 /* Chain for buf IO for disk drive targets (PM enabled) */ 2027 sd_initpkt_for_buf, /* Index: 0 */ 2028 sd_initpkt_for_buf, /* Index: 1 */ 2029 sd_initpkt_for_buf, /* Index: 2 */ 2030 2031 /* Chain for buf IO for disk drive targets (PM disabled) */ 2032 sd_initpkt_for_buf, /* Index: 3 */ 2033 sd_initpkt_for_buf, /* Index: 4 */ 2034 2035 /* 2036 * Chain for buf IO for removable-media or large sector size 2037 * disk drive targets (PM enabled) 2038 */ 2039 sd_initpkt_for_buf, /* Index: 5 */ 2040 sd_initpkt_for_buf, /* Index: 6 */ 2041 sd_initpkt_for_buf, /* Index: 7 */ 2042 sd_initpkt_for_buf, /* Index: 8 */ 2043 2044 /* 2045 * Chain for buf IO for removable-media or large sector size 2046 * disk drive targets (PM disabled) 2047 */ 2048 sd_initpkt_for_buf, /* Index: 9 */ 2049 sd_initpkt_for_buf, /* Index: 10 */ 2050 sd_initpkt_for_buf, /* Index: 11 */ 2051 2052 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2053 sd_initpkt_for_buf, /* Index: 12 */ 2054 sd_initpkt_for_buf, /* Index: 13 */ 2055 sd_initpkt_for_buf, /* Index: 14 */ 2056 sd_initpkt_for_buf, /* Index: 15 */ 2057 2058 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2059 sd_initpkt_for_buf, /* Index: 16 */ 2060 sd_initpkt_for_buf, /* Index: 17 */ 2061 sd_initpkt_for_buf, /* Index: 18 */ 2062 2063 /* Chain for USCSI commands (non-checksum targets) */ 2064 sd_initpkt_for_uscsi, /* Index: 19 */ 2065 sd_initpkt_for_uscsi, /* Index: 20 */ 2066 2067 /* Chain for USCSI commands (checksum targets) */ 2068 sd_initpkt_for_uscsi, /* Index: 21 */ 2069 sd_initpkt_for_uscsi, /* Index: 22 */ 2070 sd_initpkt_for_uscsi, /* Index: 22 */ 2071 2072 /* Chain for "direct" USCSI commands (all targets) */ 2073 sd_initpkt_for_uscsi, /* Index: 24 */ 2074 2075 /* Chain for "direct priority" USCSI commands (all targets) */ 2076 sd_initpkt_for_uscsi, /* Index: 25 */ 2077 2078 /* 2079 * Chain for buf IO for large sector size disk drive targets 2080 * with checksumming (PM enabled) 2081 */ 2082 sd_initpkt_for_buf, /* Index: 26 */ 2083 sd_initpkt_for_buf, /* Index: 27 */ 2084 sd_initpkt_for_buf, /* Index: 28 */ 2085 sd_initpkt_for_buf, /* Index: 29 */ 2086 sd_initpkt_for_buf, /* Index: 30 */ 2087 2088 /* 2089 * Chain for buf IO for large sector size disk drive targets 2090 * with checksumming (PM disabled) 2091 */ 2092 sd_initpkt_for_buf, /* Index: 31 */ 2093 sd_initpkt_for_buf, /* Index: 32 */ 2094 sd_initpkt_for_buf, /* Index: 33 */ 2095 sd_initpkt_for_buf, /* Index: 34 */ 2096 }; 2097 2098 2099 /* 2100 * Array to map a layering chain index to the appropriate destroypktpkt routine. 2101 * The redundant entries are present so that the index used for accessing 2102 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 2103 * with this table as well. 2104 */ 2105 typedef void (*sd_destroypkt_t)(struct buf *); 2106 2107 static sd_destroypkt_t sd_destroypkt_map[] = { 2108 2109 /* Chain for buf IO for disk drive targets (PM enabled) */ 2110 sd_destroypkt_for_buf, /* Index: 0 */ 2111 sd_destroypkt_for_buf, /* Index: 1 */ 2112 sd_destroypkt_for_buf, /* Index: 2 */ 2113 2114 /* Chain for buf IO for disk drive targets (PM disabled) */ 2115 sd_destroypkt_for_buf, /* Index: 3 */ 2116 sd_destroypkt_for_buf, /* Index: 4 */ 2117 2118 /* 2119 * Chain for buf IO for removable-media or large sector size 2120 * disk drive targets (PM enabled) 2121 */ 2122 sd_destroypkt_for_buf, /* Index: 5 */ 2123 sd_destroypkt_for_buf, /* Index: 6 */ 2124 sd_destroypkt_for_buf, /* Index: 7 */ 2125 sd_destroypkt_for_buf, /* Index: 8 */ 2126 2127 /* 2128 * Chain for buf IO for removable-media or large sector size 2129 * disk drive targets (PM disabled) 2130 */ 2131 sd_destroypkt_for_buf, /* Index: 9 */ 2132 sd_destroypkt_for_buf, /* Index: 10 */ 2133 sd_destroypkt_for_buf, /* Index: 11 */ 2134 2135 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2136 sd_destroypkt_for_buf, /* Index: 12 */ 2137 sd_destroypkt_for_buf, /* Index: 13 */ 2138 sd_destroypkt_for_buf, /* Index: 14 */ 2139 sd_destroypkt_for_buf, /* Index: 15 */ 2140 2141 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2142 sd_destroypkt_for_buf, /* Index: 16 */ 2143 sd_destroypkt_for_buf, /* Index: 17 */ 2144 sd_destroypkt_for_buf, /* Index: 18 */ 2145 2146 /* Chain for USCSI commands (non-checksum targets) */ 2147 sd_destroypkt_for_uscsi, /* Index: 19 */ 2148 sd_destroypkt_for_uscsi, /* Index: 20 */ 2149 2150 /* Chain for USCSI commands (checksum targets) */ 2151 sd_destroypkt_for_uscsi, /* Index: 21 */ 2152 sd_destroypkt_for_uscsi, /* Index: 22 */ 2153 sd_destroypkt_for_uscsi, /* Index: 22 */ 2154 2155 /* Chain for "direct" USCSI commands (all targets) */ 2156 sd_destroypkt_for_uscsi, /* Index: 24 */ 2157 2158 /* Chain for "direct priority" USCSI commands (all targets) */ 2159 sd_destroypkt_for_uscsi, /* Index: 25 */ 2160 2161 /* 2162 * Chain for buf IO for large sector size disk drive targets 2163 * with checksumming (PM disabled) 2164 */ 2165 sd_destroypkt_for_buf, /* Index: 26 */ 2166 sd_destroypkt_for_buf, /* Index: 27 */ 2167 sd_destroypkt_for_buf, /* Index: 28 */ 2168 sd_destroypkt_for_buf, /* Index: 29 */ 2169 sd_destroypkt_for_buf, /* Index: 30 */ 2170 2171 /* 2172 * Chain for buf IO for large sector size disk drive targets 2173 * with checksumming (PM enabled) 2174 */ 2175 sd_destroypkt_for_buf, /* Index: 31 */ 2176 sd_destroypkt_for_buf, /* Index: 32 */ 2177 sd_destroypkt_for_buf, /* Index: 33 */ 2178 sd_destroypkt_for_buf, /* Index: 34 */ 2179 }; 2180 2181 2182 2183 /* 2184 * Array to map a layering chain index to the appropriate chain "type". 2185 * The chain type indicates a specific property/usage of the chain. 2186 * The redundant entries are present so that the index used for accessing 2187 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 2188 * with this table as well. 2189 */ 2190 2191 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */ 2192 #define SD_CHAIN_BUFIO 1 /* regular buf IO */ 2193 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */ 2194 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */ 2195 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */ 2196 /* (for error recovery) */ 2197 2198 static int sd_chain_type_map[] = { 2199 2200 /* Chain for buf IO for disk drive targets (PM enabled) */ 2201 SD_CHAIN_BUFIO, /* Index: 0 */ 2202 SD_CHAIN_BUFIO, /* Index: 1 */ 2203 SD_CHAIN_BUFIO, /* Index: 2 */ 2204 2205 /* Chain for buf IO for disk drive targets (PM disabled) */ 2206 SD_CHAIN_BUFIO, /* Index: 3 */ 2207 SD_CHAIN_BUFIO, /* Index: 4 */ 2208 2209 /* 2210 * Chain for buf IO for removable-media or large sector size 2211 * disk drive targets (PM enabled) 2212 */ 2213 SD_CHAIN_BUFIO, /* Index: 5 */ 2214 SD_CHAIN_BUFIO, /* Index: 6 */ 2215 SD_CHAIN_BUFIO, /* Index: 7 */ 2216 SD_CHAIN_BUFIO, /* Index: 8 */ 2217 2218 /* 2219 * Chain for buf IO for removable-media or large sector size 2220 * disk drive targets (PM disabled) 2221 */ 2222 SD_CHAIN_BUFIO, /* Index: 9 */ 2223 SD_CHAIN_BUFIO, /* Index: 10 */ 2224 SD_CHAIN_BUFIO, /* Index: 11 */ 2225 2226 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2227 SD_CHAIN_BUFIO, /* Index: 12 */ 2228 SD_CHAIN_BUFIO, /* Index: 13 */ 2229 SD_CHAIN_BUFIO, /* Index: 14 */ 2230 SD_CHAIN_BUFIO, /* Index: 15 */ 2231 2232 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2233 SD_CHAIN_BUFIO, /* Index: 16 */ 2234 SD_CHAIN_BUFIO, /* Index: 17 */ 2235 SD_CHAIN_BUFIO, /* Index: 18 */ 2236 2237 /* Chain for USCSI commands (non-checksum targets) */ 2238 SD_CHAIN_USCSI, /* Index: 19 */ 2239 SD_CHAIN_USCSI, /* Index: 20 */ 2240 2241 /* Chain for USCSI commands (checksum targets) */ 2242 SD_CHAIN_USCSI, /* Index: 21 */ 2243 SD_CHAIN_USCSI, /* Index: 22 */ 2244 SD_CHAIN_USCSI, /* Index: 23 */ 2245 2246 /* Chain for "direct" USCSI commands (all targets) */ 2247 SD_CHAIN_DIRECT, /* Index: 24 */ 2248 2249 /* Chain for "direct priority" USCSI commands (all targets) */ 2250 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */ 2251 2252 /* 2253 * Chain for buf IO for large sector size disk drive targets 2254 * with checksumming (PM enabled) 2255 */ 2256 SD_CHAIN_BUFIO, /* Index: 26 */ 2257 SD_CHAIN_BUFIO, /* Index: 27 */ 2258 SD_CHAIN_BUFIO, /* Index: 28 */ 2259 SD_CHAIN_BUFIO, /* Index: 29 */ 2260 SD_CHAIN_BUFIO, /* Index: 30 */ 2261 2262 /* 2263 * Chain for buf IO for large sector size disk drive targets 2264 * with checksumming (PM disabled) 2265 */ 2266 SD_CHAIN_BUFIO, /* Index: 31 */ 2267 SD_CHAIN_BUFIO, /* Index: 32 */ 2268 SD_CHAIN_BUFIO, /* Index: 33 */ 2269 SD_CHAIN_BUFIO, /* Index: 34 */ 2270 }; 2271 2272 2273 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */ 2274 #define SD_IS_BUFIO(xp) \ 2275 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO) 2276 2277 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */ 2278 #define SD_IS_DIRECT_PRIORITY(xp) \ 2279 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY) 2280 2281 2282 2283 /* 2284 * Struct, array, and macros to map a specific chain to the appropriate 2285 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays. 2286 * 2287 * The sd_chain_index_map[] array is used at attach time to set the various 2288 * un_xxx_chain type members of the sd_lun softstate to the specific layering 2289 * chain to be used with the instance. This allows different instances to use 2290 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart 2291 * and xb_chain_iodone index values in the sd_xbuf are initialized to these 2292 * values at sd_xbuf init time, this allows (1) layering chains may be changed 2293 * dynamically & without the use of locking; and (2) a layer may update the 2294 * xb_chain_io[start|done] member in a given xbuf with its current index value, 2295 * to allow for deferred processing of an IO within the same chain from a 2296 * different execution context. 2297 */ 2298 2299 struct sd_chain_index { 2300 int sci_iostart_index; 2301 int sci_iodone_index; 2302 }; 2303 2304 static struct sd_chain_index sd_chain_index_map[] = { 2305 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE }, 2306 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM }, 2307 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE }, 2308 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM }, 2309 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE }, 2310 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM }, 2311 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE }, 2312 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE }, 2313 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE }, 2314 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE }, 2315 { SD_CHAIN_MSS_CHKSUM_IOSTART, SD_CHAIN_MSS_CHKSUM_IODONE }, 2316 { SD_CHAIN_MSS_CHKSUM_IOSTART_NO_PM, SD_CHAIN_MSS_CHKSUM_IODONE_NO_PM }, 2317 2318 }; 2319 2320 2321 /* 2322 * The following are indexes into the sd_chain_index_map[] array. 2323 */ 2324 2325 /* un->un_buf_chain_type must be set to one of these */ 2326 #define SD_CHAIN_INFO_DISK 0 2327 #define SD_CHAIN_INFO_DISK_NO_PM 1 2328 #define SD_CHAIN_INFO_RMMEDIA 2 2329 #define SD_CHAIN_INFO_MSS_DISK 2 2330 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3 2331 #define SD_CHAIN_INFO_MSS_DSK_NO_PM 3 2332 #define SD_CHAIN_INFO_CHKSUM 4 2333 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5 2334 #define SD_CHAIN_INFO_MSS_DISK_CHKSUM 10 2335 #define SD_CHAIN_INFO_MSS_DISK_CHKSUM_NO_PM 11 2336 2337 /* un->un_uscsi_chain_type must be set to one of these */ 2338 #define SD_CHAIN_INFO_USCSI_CMD 6 2339 /* USCSI with PM disabled is the same as DIRECT */ 2340 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8 2341 #define SD_CHAIN_INFO_USCSI_CHKSUM 7 2342 2343 /* un->un_direct_chain_type must be set to one of these */ 2344 #define SD_CHAIN_INFO_DIRECT_CMD 8 2345 2346 /* un->un_priority_chain_type must be set to one of these */ 2347 #define SD_CHAIN_INFO_PRIORITY_CMD 9 2348 2349 /* size for devid inquiries */ 2350 #define MAX_INQUIRY_SIZE 0xF0 2351 2352 /* 2353 * Macros used by functions to pass a given buf(9S) struct along to the 2354 * next function in the layering chain for further processing. 2355 * 2356 * In the following macros, passing more than three arguments to the called 2357 * routines causes the optimizer for the SPARC compiler to stop doing tail 2358 * call elimination which results in significant performance degradation. 2359 */ 2360 #define SD_BEGIN_IOSTART(index, un, bp) \ 2361 ((*(sd_iostart_chain[index]))(index, un, bp)) 2362 2363 #define SD_BEGIN_IODONE(index, un, bp) \ 2364 ((*(sd_iodone_chain[index]))(index, un, bp)) 2365 2366 #define SD_NEXT_IOSTART(index, un, bp) \ 2367 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp)) 2368 2369 #define SD_NEXT_IODONE(index, un, bp) \ 2370 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp)) 2371 2372 /* 2373 * Function: _init 2374 * 2375 * Description: This is the driver _init(9E) entry point. 2376 * 2377 * Return Code: Returns the value from mod_install(9F) or 2378 * ddi_soft_state_init(9F) as appropriate. 2379 * 2380 * Context: Called when driver module loaded. 2381 */ 2382 2383 int 2384 _init(void) 2385 { 2386 int err; 2387 2388 /* establish driver name from module name */ 2389 sd_label = (char *)mod_modname(&modlinkage); 2390 2391 #ifndef XPV_HVM_DRIVER 2392 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun), 2393 SD_MAXUNIT); 2394 if (err != 0) { 2395 return (err); 2396 } 2397 2398 #else /* XPV_HVM_DRIVER */ 2399 /* Remove the leading "hvm_" from the module name */ 2400 ASSERT(strncmp(sd_label, "hvm_", strlen("hvm_")) == 0); 2401 sd_label += strlen("hvm_"); 2402 2403 #endif /* XPV_HVM_DRIVER */ 2404 2405 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL); 2406 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL); 2407 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL); 2408 2409 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL); 2410 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL); 2411 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL); 2412 2413 /* 2414 * it's ok to init here even for fibre device 2415 */ 2416 sd_scsi_probe_cache_init(); 2417 2418 sd_scsi_target_lun_init(); 2419 2420 /* 2421 * Creating taskq before mod_install ensures that all callers (threads) 2422 * that enter the module after a successful mod_install encounter 2423 * a valid taskq. 2424 */ 2425 sd_taskq_create(); 2426 2427 err = mod_install(&modlinkage); 2428 if (err != 0) { 2429 /* delete taskq if install fails */ 2430 sd_taskq_delete(); 2431 2432 mutex_destroy(&sd_detach_mutex); 2433 mutex_destroy(&sd_log_mutex); 2434 mutex_destroy(&sd_label_mutex); 2435 2436 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2437 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2438 cv_destroy(&sd_tr.srq_inprocess_cv); 2439 2440 sd_scsi_probe_cache_fini(); 2441 2442 sd_scsi_target_lun_fini(); 2443 2444 #ifndef XPV_HVM_DRIVER 2445 ddi_soft_state_fini(&sd_state); 2446 #endif /* !XPV_HVM_DRIVER */ 2447 return (err); 2448 } 2449 2450 return (err); 2451 } 2452 2453 2454 /* 2455 * Function: _fini 2456 * 2457 * Description: This is the driver _fini(9E) entry point. 2458 * 2459 * Return Code: Returns the value from mod_remove(9F) 2460 * 2461 * Context: Called when driver module is unloaded. 2462 */ 2463 2464 int 2465 _fini(void) 2466 { 2467 int err; 2468 2469 if ((err = mod_remove(&modlinkage)) != 0) { 2470 return (err); 2471 } 2472 2473 sd_taskq_delete(); 2474 2475 mutex_destroy(&sd_detach_mutex); 2476 mutex_destroy(&sd_log_mutex); 2477 mutex_destroy(&sd_label_mutex); 2478 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2479 2480 sd_scsi_probe_cache_fini(); 2481 2482 sd_scsi_target_lun_fini(); 2483 2484 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2485 cv_destroy(&sd_tr.srq_inprocess_cv); 2486 2487 #ifndef XPV_HVM_DRIVER 2488 ddi_soft_state_fini(&sd_state); 2489 #endif /* !XPV_HVM_DRIVER */ 2490 2491 return (err); 2492 } 2493 2494 2495 /* 2496 * Function: _info 2497 * 2498 * Description: This is the driver _info(9E) entry point. 2499 * 2500 * Arguments: modinfop - pointer to the driver modinfo structure 2501 * 2502 * Return Code: Returns the value from mod_info(9F). 2503 * 2504 * Context: Kernel thread context 2505 */ 2506 2507 int 2508 _info(struct modinfo *modinfop) 2509 { 2510 return (mod_info(&modlinkage, modinfop)); 2511 } 2512 2513 2514 /* 2515 * The following routines implement the driver message logging facility. 2516 * They provide component- and level- based debug output filtering. 2517 * Output may also be restricted to messages for a single instance by 2518 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set 2519 * to NULL, then messages for all instances are printed. 2520 * 2521 * These routines have been cloned from each other due to the language 2522 * constraints of macros and variable argument list processing. 2523 */ 2524 2525 2526 /* 2527 * Function: sd_log_err 2528 * 2529 * Description: This routine is called by the SD_ERROR macro for debug 2530 * logging of error conditions. 2531 * 2532 * Arguments: comp - driver component being logged 2533 * dev - pointer to driver info structure 2534 * fmt - error string and format to be logged 2535 */ 2536 2537 static void 2538 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...) 2539 { 2540 va_list ap; 2541 dev_info_t *dev; 2542 2543 ASSERT(un != NULL); 2544 dev = SD_DEVINFO(un); 2545 ASSERT(dev != NULL); 2546 2547 /* 2548 * Filter messages based on the global component and level masks. 2549 * Also print if un matches the value of sd_debug_un, or if 2550 * sd_debug_un is set to NULL. 2551 */ 2552 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) && 2553 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2554 mutex_enter(&sd_log_mutex); 2555 va_start(ap, fmt); 2556 (void) vsprintf(sd_log_buf, fmt, ap); 2557 va_end(ap); 2558 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2559 mutex_exit(&sd_log_mutex); 2560 } 2561 #ifdef SD_FAULT_INJECTION 2562 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2563 if (un->sd_injection_mask & comp) { 2564 mutex_enter(&sd_log_mutex); 2565 va_start(ap, fmt); 2566 (void) vsprintf(sd_log_buf, fmt, ap); 2567 va_end(ap); 2568 sd_injection_log(sd_log_buf, un); 2569 mutex_exit(&sd_log_mutex); 2570 } 2571 #endif 2572 } 2573 2574 2575 /* 2576 * Function: sd_log_info 2577 * 2578 * Description: This routine is called by the SD_INFO macro for debug 2579 * logging of general purpose informational conditions. 2580 * 2581 * Arguments: comp - driver component being logged 2582 * dev - pointer to driver info structure 2583 * fmt - info string and format to be logged 2584 */ 2585 2586 static void 2587 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...) 2588 { 2589 va_list ap; 2590 dev_info_t *dev; 2591 2592 ASSERT(un != NULL); 2593 dev = SD_DEVINFO(un); 2594 ASSERT(dev != NULL); 2595 2596 /* 2597 * Filter messages based on the global component and level masks. 2598 * Also print if un matches the value of sd_debug_un, or if 2599 * sd_debug_un is set to NULL. 2600 */ 2601 if ((sd_component_mask & component) && 2602 (sd_level_mask & SD_LOGMASK_INFO) && 2603 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2604 mutex_enter(&sd_log_mutex); 2605 va_start(ap, fmt); 2606 (void) vsprintf(sd_log_buf, fmt, ap); 2607 va_end(ap); 2608 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2609 mutex_exit(&sd_log_mutex); 2610 } 2611 #ifdef SD_FAULT_INJECTION 2612 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2613 if (un->sd_injection_mask & component) { 2614 mutex_enter(&sd_log_mutex); 2615 va_start(ap, fmt); 2616 (void) vsprintf(sd_log_buf, fmt, ap); 2617 va_end(ap); 2618 sd_injection_log(sd_log_buf, un); 2619 mutex_exit(&sd_log_mutex); 2620 } 2621 #endif 2622 } 2623 2624 2625 /* 2626 * Function: sd_log_trace 2627 * 2628 * Description: This routine is called by the SD_TRACE macro for debug 2629 * logging of trace conditions (i.e. function entry/exit). 2630 * 2631 * Arguments: comp - driver component being logged 2632 * dev - pointer to driver info structure 2633 * fmt - trace string and format to be logged 2634 */ 2635 2636 static void 2637 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...) 2638 { 2639 va_list ap; 2640 dev_info_t *dev; 2641 2642 ASSERT(un != NULL); 2643 dev = SD_DEVINFO(un); 2644 ASSERT(dev != NULL); 2645 2646 /* 2647 * Filter messages based on the global component and level masks. 2648 * Also print if un matches the value of sd_debug_un, or if 2649 * sd_debug_un is set to NULL. 2650 */ 2651 if ((sd_component_mask & component) && 2652 (sd_level_mask & SD_LOGMASK_TRACE) && 2653 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2654 mutex_enter(&sd_log_mutex); 2655 va_start(ap, fmt); 2656 (void) vsprintf(sd_log_buf, fmt, ap); 2657 va_end(ap); 2658 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2659 mutex_exit(&sd_log_mutex); 2660 } 2661 #ifdef SD_FAULT_INJECTION 2662 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2663 if (un->sd_injection_mask & component) { 2664 mutex_enter(&sd_log_mutex); 2665 va_start(ap, fmt); 2666 (void) vsprintf(sd_log_buf, fmt, ap); 2667 va_end(ap); 2668 sd_injection_log(sd_log_buf, un); 2669 mutex_exit(&sd_log_mutex); 2670 } 2671 #endif 2672 } 2673 2674 2675 /* 2676 * Function: sdprobe 2677 * 2678 * Description: This is the driver probe(9e) entry point function. 2679 * 2680 * Arguments: devi - opaque device info handle 2681 * 2682 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful. 2683 * DDI_PROBE_FAILURE: If the probe failed. 2684 * DDI_PROBE_PARTIAL: If the instance is not present now, 2685 * but may be present in the future. 2686 */ 2687 2688 static int 2689 sdprobe(dev_info_t *devi) 2690 { 2691 struct scsi_device *devp; 2692 int rval; 2693 #ifndef XPV_HVM_DRIVER 2694 int instance = ddi_get_instance(devi); 2695 #endif /* !XPV_HVM_DRIVER */ 2696 2697 /* 2698 * if it wasn't for pln, sdprobe could actually be nulldev 2699 * in the "__fibre" case. 2700 */ 2701 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) { 2702 return (DDI_PROBE_DONTCARE); 2703 } 2704 2705 devp = ddi_get_driver_private(devi); 2706 2707 if (devp == NULL) { 2708 /* Ooops... nexus driver is mis-configured... */ 2709 return (DDI_PROBE_FAILURE); 2710 } 2711 2712 #ifndef XPV_HVM_DRIVER 2713 if (ddi_get_soft_state(sd_state, instance) != NULL) { 2714 return (DDI_PROBE_PARTIAL); 2715 } 2716 #endif /* !XPV_HVM_DRIVER */ 2717 2718 /* 2719 * Call the SCSA utility probe routine to see if we actually 2720 * have a target at this SCSI nexus. 2721 */ 2722 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) { 2723 case SCSIPROBE_EXISTS: 2724 switch (devp->sd_inq->inq_dtype) { 2725 case DTYPE_DIRECT: 2726 rval = DDI_PROBE_SUCCESS; 2727 break; 2728 case DTYPE_RODIRECT: 2729 /* CDs etc. Can be removable media */ 2730 rval = DDI_PROBE_SUCCESS; 2731 break; 2732 case DTYPE_OPTICAL: 2733 /* 2734 * Rewritable optical driver HP115AA 2735 * Can also be removable media 2736 */ 2737 2738 /* 2739 * Do not attempt to bind to DTYPE_OPTICAL if 2740 * pre solaris 9 sparc sd behavior is required 2741 * 2742 * If first time through and sd_dtype_optical_bind 2743 * has not been set in /etc/system check properties 2744 */ 2745 2746 if (sd_dtype_optical_bind < 0) { 2747 sd_dtype_optical_bind = ddi_prop_get_int 2748 (DDI_DEV_T_ANY, devi, 0, 2749 "optical-device-bind", 1); 2750 } 2751 2752 if (sd_dtype_optical_bind == 0) { 2753 rval = DDI_PROBE_FAILURE; 2754 } else { 2755 rval = DDI_PROBE_SUCCESS; 2756 } 2757 break; 2758 2759 case DTYPE_NOTPRESENT: 2760 default: 2761 rval = DDI_PROBE_FAILURE; 2762 break; 2763 } 2764 break; 2765 default: 2766 rval = DDI_PROBE_PARTIAL; 2767 break; 2768 } 2769 2770 /* 2771 * This routine checks for resource allocation prior to freeing, 2772 * so it will take care of the "smart probing" case where a 2773 * scsi_probe() may or may not have been issued and will *not* 2774 * free previously-freed resources. 2775 */ 2776 scsi_unprobe(devp); 2777 return (rval); 2778 } 2779 2780 2781 /* 2782 * Function: sdinfo 2783 * 2784 * Description: This is the driver getinfo(9e) entry point function. 2785 * Given the device number, return the devinfo pointer from 2786 * the scsi_device structure or the instance number 2787 * associated with the dev_t. 2788 * 2789 * Arguments: dip - pointer to device info structure 2790 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO, 2791 * DDI_INFO_DEVT2INSTANCE) 2792 * arg - driver dev_t 2793 * resultp - user buffer for request response 2794 * 2795 * Return Code: DDI_SUCCESS 2796 * DDI_FAILURE 2797 */ 2798 /* ARGSUSED */ 2799 static int 2800 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2801 { 2802 struct sd_lun *un; 2803 dev_t dev; 2804 int instance; 2805 int error; 2806 2807 switch (infocmd) { 2808 case DDI_INFO_DEVT2DEVINFO: 2809 dev = (dev_t)arg; 2810 instance = SDUNIT(dev); 2811 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 2812 return (DDI_FAILURE); 2813 } 2814 *result = (void *) SD_DEVINFO(un); 2815 error = DDI_SUCCESS; 2816 break; 2817 case DDI_INFO_DEVT2INSTANCE: 2818 dev = (dev_t)arg; 2819 instance = SDUNIT(dev); 2820 *result = (void *)(uintptr_t)instance; 2821 error = DDI_SUCCESS; 2822 break; 2823 default: 2824 error = DDI_FAILURE; 2825 } 2826 return (error); 2827 } 2828 2829 /* 2830 * Function: sd_prop_op 2831 * 2832 * Description: This is the driver prop_op(9e) entry point function. 2833 * Return the number of blocks for the partition in question 2834 * or forward the request to the property facilities. 2835 * 2836 * Arguments: dev - device number 2837 * dip - pointer to device info structure 2838 * prop_op - property operator 2839 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent 2840 * name - pointer to property name 2841 * valuep - pointer or address of the user buffer 2842 * lengthp - property length 2843 * 2844 * Return Code: DDI_PROP_SUCCESS 2845 * DDI_PROP_NOT_FOUND 2846 * DDI_PROP_UNDEFINED 2847 * DDI_PROP_NO_MEMORY 2848 * DDI_PROP_BUF_TOO_SMALL 2849 */ 2850 2851 static int 2852 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 2853 char *name, caddr_t valuep, int *lengthp) 2854 { 2855 struct sd_lun *un; 2856 2857 if ((un = ddi_get_soft_state(sd_state, ddi_get_instance(dip))) == NULL) 2858 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 2859 name, valuep, lengthp)); 2860 2861 return (cmlb_prop_op(un->un_cmlbhandle, 2862 dev, dip, prop_op, mod_flags, name, valuep, lengthp, 2863 SDPART(dev), (void *)SD_PATH_DIRECT)); 2864 } 2865 2866 /* 2867 * The following functions are for smart probing: 2868 * sd_scsi_probe_cache_init() 2869 * sd_scsi_probe_cache_fini() 2870 * sd_scsi_clear_probe_cache() 2871 * sd_scsi_probe_with_cache() 2872 */ 2873 2874 /* 2875 * Function: sd_scsi_probe_cache_init 2876 * 2877 * Description: Initializes the probe response cache mutex and head pointer. 2878 * 2879 * Context: Kernel thread context 2880 */ 2881 2882 static void 2883 sd_scsi_probe_cache_init(void) 2884 { 2885 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL); 2886 sd_scsi_probe_cache_head = NULL; 2887 } 2888 2889 2890 /* 2891 * Function: sd_scsi_probe_cache_fini 2892 * 2893 * Description: Frees all resources associated with the probe response cache. 2894 * 2895 * Context: Kernel thread context 2896 */ 2897 2898 static void 2899 sd_scsi_probe_cache_fini(void) 2900 { 2901 struct sd_scsi_probe_cache *cp; 2902 struct sd_scsi_probe_cache *ncp; 2903 2904 /* Clean up our smart probing linked list */ 2905 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) { 2906 ncp = cp->next; 2907 kmem_free(cp, sizeof (struct sd_scsi_probe_cache)); 2908 } 2909 sd_scsi_probe_cache_head = NULL; 2910 mutex_destroy(&sd_scsi_probe_cache_mutex); 2911 } 2912 2913 2914 /* 2915 * Function: sd_scsi_clear_probe_cache 2916 * 2917 * Description: This routine clears the probe response cache. This is 2918 * done when open() returns ENXIO so that when deferred 2919 * attach is attempted (possibly after a device has been 2920 * turned on) we will retry the probe. Since we don't know 2921 * which target we failed to open, we just clear the 2922 * entire cache. 2923 * 2924 * Context: Kernel thread context 2925 */ 2926 2927 static void 2928 sd_scsi_clear_probe_cache(void) 2929 { 2930 struct sd_scsi_probe_cache *cp; 2931 int i; 2932 2933 mutex_enter(&sd_scsi_probe_cache_mutex); 2934 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2935 /* 2936 * Reset all entries to SCSIPROBE_EXISTS. This will 2937 * force probing to be performed the next time 2938 * sd_scsi_probe_with_cache is called. 2939 */ 2940 for (i = 0; i < NTARGETS_WIDE; i++) { 2941 cp->cache[i] = SCSIPROBE_EXISTS; 2942 } 2943 } 2944 mutex_exit(&sd_scsi_probe_cache_mutex); 2945 } 2946 2947 2948 /* 2949 * Function: sd_scsi_probe_with_cache 2950 * 2951 * Description: This routine implements support for a scsi device probe 2952 * with cache. The driver maintains a cache of the target 2953 * responses to scsi probes. If we get no response from a 2954 * target during a probe inquiry, we remember that, and we 2955 * avoid additional calls to scsi_probe on non-zero LUNs 2956 * on the same target until the cache is cleared. By doing 2957 * so we avoid the 1/4 sec selection timeout for nonzero 2958 * LUNs. lun0 of a target is always probed. 2959 * 2960 * Arguments: devp - Pointer to a scsi_device(9S) structure 2961 * waitfunc - indicates what the allocator routines should 2962 * do when resources are not available. This value 2963 * is passed on to scsi_probe() when that routine 2964 * is called. 2965 * 2966 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache; 2967 * otherwise the value returned by scsi_probe(9F). 2968 * 2969 * Context: Kernel thread context 2970 */ 2971 2972 static int 2973 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)()) 2974 { 2975 struct sd_scsi_probe_cache *cp; 2976 dev_info_t *pdip = ddi_get_parent(devp->sd_dev); 2977 int lun, tgt; 2978 2979 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2980 SCSI_ADDR_PROP_LUN, 0); 2981 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2982 SCSI_ADDR_PROP_TARGET, -1); 2983 2984 /* Make sure caching enabled and target in range */ 2985 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) { 2986 /* do it the old way (no cache) */ 2987 return (scsi_probe(devp, waitfn)); 2988 } 2989 2990 mutex_enter(&sd_scsi_probe_cache_mutex); 2991 2992 /* Find the cache for this scsi bus instance */ 2993 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2994 if (cp->pdip == pdip) { 2995 break; 2996 } 2997 } 2998 2999 /* If we can't find a cache for this pdip, create one */ 3000 if (cp == NULL) { 3001 int i; 3002 3003 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache), 3004 KM_SLEEP); 3005 cp->pdip = pdip; 3006 cp->next = sd_scsi_probe_cache_head; 3007 sd_scsi_probe_cache_head = cp; 3008 for (i = 0; i < NTARGETS_WIDE; i++) { 3009 cp->cache[i] = SCSIPROBE_EXISTS; 3010 } 3011 } 3012 3013 mutex_exit(&sd_scsi_probe_cache_mutex); 3014 3015 /* Recompute the cache for this target if LUN zero */ 3016 if (lun == 0) { 3017 cp->cache[tgt] = SCSIPROBE_EXISTS; 3018 } 3019 3020 /* Don't probe if cache remembers a NORESP from a previous LUN. */ 3021 if (cp->cache[tgt] != SCSIPROBE_EXISTS) { 3022 return (SCSIPROBE_NORESP); 3023 } 3024 3025 /* Do the actual probe; save & return the result */ 3026 return (cp->cache[tgt] = scsi_probe(devp, waitfn)); 3027 } 3028 3029 3030 /* 3031 * Function: sd_scsi_target_lun_init 3032 * 3033 * Description: Initializes the attached lun chain mutex and head pointer. 3034 * 3035 * Context: Kernel thread context 3036 */ 3037 3038 static void 3039 sd_scsi_target_lun_init(void) 3040 { 3041 mutex_init(&sd_scsi_target_lun_mutex, NULL, MUTEX_DRIVER, NULL); 3042 sd_scsi_target_lun_head = NULL; 3043 } 3044 3045 3046 /* 3047 * Function: sd_scsi_target_lun_fini 3048 * 3049 * Description: Frees all resources associated with the attached lun 3050 * chain 3051 * 3052 * Context: Kernel thread context 3053 */ 3054 3055 static void 3056 sd_scsi_target_lun_fini(void) 3057 { 3058 struct sd_scsi_hba_tgt_lun *cp; 3059 struct sd_scsi_hba_tgt_lun *ncp; 3060 3061 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = ncp) { 3062 ncp = cp->next; 3063 kmem_free(cp, sizeof (struct sd_scsi_hba_tgt_lun)); 3064 } 3065 sd_scsi_target_lun_head = NULL; 3066 mutex_destroy(&sd_scsi_target_lun_mutex); 3067 } 3068 3069 3070 /* 3071 * Function: sd_scsi_get_target_lun_count 3072 * 3073 * Description: This routine will check in the attached lun chain to see 3074 * how many luns are attached on the required SCSI controller 3075 * and target. Currently, some capabilities like tagged queue 3076 * are supported per target based by HBA. So all luns in a 3077 * target have the same capabilities. Based on this assumption, 3078 * sd should only set these capabilities once per target. This 3079 * function is called when sd needs to decide how many luns 3080 * already attached on a target. 3081 * 3082 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 3083 * controller device. 3084 * target - The target ID on the controller's SCSI bus. 3085 * 3086 * Return Code: The number of luns attached on the required target and 3087 * controller. 3088 * -1 if target ID is not in parallel SCSI scope or the given 3089 * dip is not in the chain. 3090 * 3091 * Context: Kernel thread context 3092 */ 3093 3094 static int 3095 sd_scsi_get_target_lun_count(dev_info_t *dip, int target) 3096 { 3097 struct sd_scsi_hba_tgt_lun *cp; 3098 3099 if ((target < 0) || (target >= NTARGETS_WIDE)) { 3100 return (-1); 3101 } 3102 3103 mutex_enter(&sd_scsi_target_lun_mutex); 3104 3105 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 3106 if (cp->pdip == dip) { 3107 break; 3108 } 3109 } 3110 3111 mutex_exit(&sd_scsi_target_lun_mutex); 3112 3113 if (cp == NULL) { 3114 return (-1); 3115 } 3116 3117 return (cp->nlun[target]); 3118 } 3119 3120 3121 /* 3122 * Function: sd_scsi_update_lun_on_target 3123 * 3124 * Description: This routine is used to update the attached lun chain when a 3125 * lun is attached or detached on a target. 3126 * 3127 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 3128 * controller device. 3129 * target - The target ID on the controller's SCSI bus. 3130 * flag - Indicate the lun is attached or detached. 3131 * 3132 * Context: Kernel thread context 3133 */ 3134 3135 static void 3136 sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag) 3137 { 3138 struct sd_scsi_hba_tgt_lun *cp; 3139 3140 mutex_enter(&sd_scsi_target_lun_mutex); 3141 3142 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 3143 if (cp->pdip == dip) { 3144 break; 3145 } 3146 } 3147 3148 if ((cp == NULL) && (flag == SD_SCSI_LUN_ATTACH)) { 3149 cp = kmem_zalloc(sizeof (struct sd_scsi_hba_tgt_lun), 3150 KM_SLEEP); 3151 cp->pdip = dip; 3152 cp->next = sd_scsi_target_lun_head; 3153 sd_scsi_target_lun_head = cp; 3154 } 3155 3156 mutex_exit(&sd_scsi_target_lun_mutex); 3157 3158 if (cp != NULL) { 3159 if (flag == SD_SCSI_LUN_ATTACH) { 3160 cp->nlun[target] ++; 3161 } else { 3162 cp->nlun[target] --; 3163 } 3164 } 3165 } 3166 3167 3168 /* 3169 * Function: sd_spin_up_unit 3170 * 3171 * Description: Issues the following commands to spin-up the device: 3172 * START STOP UNIT, and INQUIRY. 3173 * 3174 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3175 * structure for this target. 3176 * 3177 * Return Code: 0 - success 3178 * EIO - failure 3179 * EACCES - reservation conflict 3180 * 3181 * Context: Kernel thread context 3182 */ 3183 3184 static int 3185 sd_spin_up_unit(sd_ssc_t *ssc) 3186 { 3187 size_t resid = 0; 3188 int has_conflict = FALSE; 3189 uchar_t *bufaddr; 3190 int status; 3191 struct sd_lun *un; 3192 3193 ASSERT(ssc != NULL); 3194 un = ssc->ssc_un; 3195 ASSERT(un != NULL); 3196 3197 /* 3198 * Send a throwaway START UNIT command. 3199 * 3200 * If we fail on this, we don't care presently what precisely 3201 * is wrong. EMC's arrays will also fail this with a check 3202 * condition (0x2/0x4/0x3) if the device is "inactive," but 3203 * we don't want to fail the attach because it may become 3204 * "active" later. 3205 * We don't know if power condition is supported or not at 3206 * this stage, use START STOP bit. 3207 */ 3208 status = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 3209 SD_TARGET_START, SD_PATH_DIRECT); 3210 3211 if (status != 0) { 3212 if (status == EACCES) 3213 has_conflict = TRUE; 3214 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3215 } 3216 3217 /* 3218 * Send another INQUIRY command to the target. This is necessary for 3219 * non-removable media direct access devices because their INQUIRY data 3220 * may not be fully qualified until they are spun up (perhaps via the 3221 * START command above). Note: This seems to be needed for some 3222 * legacy devices only.) The INQUIRY command should succeed even if a 3223 * Reservation Conflict is present. 3224 */ 3225 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP); 3226 3227 if (sd_send_scsi_INQUIRY(ssc, bufaddr, SUN_INQSIZE, 0, 0, &resid) 3228 != 0) { 3229 kmem_free(bufaddr, SUN_INQSIZE); 3230 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 3231 return (EIO); 3232 } 3233 3234 /* 3235 * If we got enough INQUIRY data, copy it over the old INQUIRY data. 3236 * Note that this routine does not return a failure here even if the 3237 * INQUIRY command did not return any data. This is a legacy behavior. 3238 */ 3239 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) { 3240 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE); 3241 } 3242 3243 kmem_free(bufaddr, SUN_INQSIZE); 3244 3245 /* If we hit a reservation conflict above, tell the caller. */ 3246 if (has_conflict == TRUE) { 3247 return (EACCES); 3248 } 3249 3250 return (0); 3251 } 3252 3253 #ifdef _LP64 3254 /* 3255 * Function: sd_enable_descr_sense 3256 * 3257 * Description: This routine attempts to select descriptor sense format 3258 * using the Control mode page. Devices that support 64 bit 3259 * LBAs (for >2TB luns) should also implement descriptor 3260 * sense data so we will call this function whenever we see 3261 * a lun larger than 2TB. If for some reason the device 3262 * supports 64 bit LBAs but doesn't support descriptor sense 3263 * presumably the mode select will fail. Everything will 3264 * continue to work normally except that we will not get 3265 * complete sense data for commands that fail with an LBA 3266 * larger than 32 bits. 3267 * 3268 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3269 * structure for this target. 3270 * 3271 * Context: Kernel thread context only 3272 */ 3273 3274 static void 3275 sd_enable_descr_sense(sd_ssc_t *ssc) 3276 { 3277 uchar_t *header; 3278 struct mode_control_scsi3 *ctrl_bufp; 3279 size_t buflen; 3280 size_t bd_len; 3281 int status; 3282 struct sd_lun *un; 3283 3284 ASSERT(ssc != NULL); 3285 un = ssc->ssc_un; 3286 ASSERT(un != NULL); 3287 3288 /* 3289 * Read MODE SENSE page 0xA, Control Mode Page 3290 */ 3291 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH + 3292 sizeof (struct mode_control_scsi3); 3293 header = kmem_zalloc(buflen, KM_SLEEP); 3294 3295 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 3296 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT); 3297 3298 if (status != 0) { 3299 SD_ERROR(SD_LOG_COMMON, un, 3300 "sd_enable_descr_sense: mode sense ctrl page failed\n"); 3301 goto eds_exit; 3302 } 3303 3304 /* 3305 * Determine size of Block Descriptors in order to locate 3306 * the mode page data. ATAPI devices return 0, SCSI devices 3307 * should return MODE_BLK_DESC_LENGTH. 3308 */ 3309 bd_len = ((struct mode_header *)header)->bdesc_length; 3310 3311 /* Clear the mode data length field for MODE SELECT */ 3312 ((struct mode_header *)header)->length = 0; 3313 3314 ctrl_bufp = (struct mode_control_scsi3 *) 3315 (header + MODE_HEADER_LENGTH + bd_len); 3316 3317 /* 3318 * If the page length is smaller than the expected value, 3319 * the target device doesn't support D_SENSE. Bail out here. 3320 */ 3321 if (ctrl_bufp->mode_page.length < 3322 sizeof (struct mode_control_scsi3) - 2) { 3323 SD_ERROR(SD_LOG_COMMON, un, 3324 "sd_enable_descr_sense: enable D_SENSE failed\n"); 3325 goto eds_exit; 3326 } 3327 3328 /* 3329 * Clear PS bit for MODE SELECT 3330 */ 3331 ctrl_bufp->mode_page.ps = 0; 3332 3333 /* 3334 * Set D_SENSE to enable descriptor sense format. 3335 */ 3336 ctrl_bufp->d_sense = 1; 3337 3338 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3339 3340 /* 3341 * Use MODE SELECT to commit the change to the D_SENSE bit 3342 */ 3343 status = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, header, 3344 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT); 3345 3346 if (status != 0) { 3347 SD_INFO(SD_LOG_COMMON, un, 3348 "sd_enable_descr_sense: mode select ctrl page failed\n"); 3349 } else { 3350 kmem_free(header, buflen); 3351 return; 3352 } 3353 3354 eds_exit: 3355 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3356 kmem_free(header, buflen); 3357 } 3358 3359 /* 3360 * Function: sd_reenable_dsense_task 3361 * 3362 * Description: Re-enable descriptor sense after device or bus reset 3363 * 3364 * Context: Executes in a taskq() thread context 3365 */ 3366 static void 3367 sd_reenable_dsense_task(void *arg) 3368 { 3369 struct sd_lun *un = arg; 3370 sd_ssc_t *ssc; 3371 3372 ASSERT(un != NULL); 3373 3374 ssc = sd_ssc_init(un); 3375 sd_enable_descr_sense(ssc); 3376 sd_ssc_fini(ssc); 3377 } 3378 #endif /* _LP64 */ 3379 3380 /* 3381 * Function: sd_set_mmc_caps 3382 * 3383 * Description: This routine determines if the device is MMC compliant and if 3384 * the device supports CDDA via a mode sense of the CDVD 3385 * capabilities mode page. Also checks if the device is a 3386 * dvdram writable device. 3387 * 3388 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3389 * structure for this target. 3390 * 3391 * Context: Kernel thread context only 3392 */ 3393 3394 static void 3395 sd_set_mmc_caps(sd_ssc_t *ssc) 3396 { 3397 struct mode_header_grp2 *sense_mhp; 3398 uchar_t *sense_page; 3399 caddr_t buf; 3400 int bd_len; 3401 int status; 3402 struct uscsi_cmd com; 3403 int rtn; 3404 uchar_t *out_data_rw, *out_data_hd; 3405 uchar_t *rqbuf_rw, *rqbuf_hd; 3406 struct sd_lun *un; 3407 3408 ASSERT(ssc != NULL); 3409 un = ssc->ssc_un; 3410 ASSERT(un != NULL); 3411 3412 /* 3413 * The flags which will be set in this function are - mmc compliant, 3414 * dvdram writable device, cdda support. Initialize them to FALSE 3415 * and if a capability is detected - it will be set to TRUE. 3416 */ 3417 un->un_f_mmc_cap = FALSE; 3418 un->un_f_dvdram_writable_device = FALSE; 3419 un->un_f_cfg_cdda = FALSE; 3420 3421 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3422 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf, 3423 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 3424 3425 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3426 3427 if (status != 0) { 3428 /* command failed; just return */ 3429 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3430 return; 3431 } 3432 /* 3433 * If the mode sense request for the CDROM CAPABILITIES 3434 * page (0x2A) succeeds the device is assumed to be MMC. 3435 */ 3436 un->un_f_mmc_cap = TRUE; 3437 3438 /* Get to the page data */ 3439 sense_mhp = (struct mode_header_grp2 *)buf; 3440 bd_len = (sense_mhp->bdesc_length_hi << 8) | 3441 sense_mhp->bdesc_length_lo; 3442 if (bd_len > MODE_BLK_DESC_LENGTH) { 3443 /* 3444 * We did not get back the expected block descriptor 3445 * length so we cannot determine if the device supports 3446 * CDDA. However, we still indicate the device is MMC 3447 * according to the successful response to the page 3448 * 0x2A mode sense request. 3449 */ 3450 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3451 "sd_set_mmc_caps: Mode Sense returned " 3452 "invalid block descriptor length\n"); 3453 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3454 return; 3455 } 3456 3457 /* See if read CDDA is supported */ 3458 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + 3459 bd_len); 3460 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE; 3461 3462 /* See if writing DVD RAM is supported. */ 3463 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE; 3464 if (un->un_f_dvdram_writable_device == TRUE) { 3465 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3466 return; 3467 } 3468 3469 /* 3470 * If the device presents DVD or CD capabilities in the mode 3471 * page, we can return here since a RRD will not have 3472 * these capabilities. 3473 */ 3474 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3475 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3476 return; 3477 } 3478 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3479 3480 /* 3481 * If un->un_f_dvdram_writable_device is still FALSE, 3482 * check for a Removable Rigid Disk (RRD). A RRD 3483 * device is identified by the features RANDOM_WRITABLE and 3484 * HARDWARE_DEFECT_MANAGEMENT. 3485 */ 3486 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3487 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3488 3489 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw, 3490 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3491 RANDOM_WRITABLE, SD_PATH_STANDARD); 3492 3493 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3494 3495 if (rtn != 0) { 3496 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3497 kmem_free(rqbuf_rw, SENSE_LENGTH); 3498 return; 3499 } 3500 3501 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3502 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3503 3504 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd, 3505 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3506 HARDWARE_DEFECT_MANAGEMENT, SD_PATH_STANDARD); 3507 3508 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3509 3510 if (rtn == 0) { 3511 /* 3512 * We have good information, check for random writable 3513 * and hardware defect features. 3514 */ 3515 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3516 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) { 3517 un->un_f_dvdram_writable_device = TRUE; 3518 } 3519 } 3520 3521 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3522 kmem_free(rqbuf_rw, SENSE_LENGTH); 3523 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3524 kmem_free(rqbuf_hd, SENSE_LENGTH); 3525 } 3526 3527 /* 3528 * Function: sd_check_for_writable_cd 3529 * 3530 * Description: This routine determines if the media in the device is 3531 * writable or not. It uses the get configuration command (0x46) 3532 * to determine if the media is writable 3533 * 3534 * Arguments: un - driver soft state (unit) structure 3535 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" 3536 * chain and the normal command waitq, or 3537 * SD_PATH_DIRECT_PRIORITY to use the USCSI 3538 * "direct" chain and bypass the normal command 3539 * waitq. 3540 * 3541 * Context: Never called at interrupt context. 3542 */ 3543 3544 static void 3545 sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag) 3546 { 3547 struct uscsi_cmd com; 3548 uchar_t *out_data; 3549 uchar_t *rqbuf; 3550 int rtn; 3551 uchar_t *out_data_rw, *out_data_hd; 3552 uchar_t *rqbuf_rw, *rqbuf_hd; 3553 struct mode_header_grp2 *sense_mhp; 3554 uchar_t *sense_page; 3555 caddr_t buf; 3556 int bd_len; 3557 int status; 3558 struct sd_lun *un; 3559 3560 ASSERT(ssc != NULL); 3561 un = ssc->ssc_un; 3562 ASSERT(un != NULL); 3563 ASSERT(mutex_owned(SD_MUTEX(un))); 3564 3565 /* 3566 * Initialize the writable media to false, if configuration info. 3567 * tells us otherwise then only we will set it. 3568 */ 3569 un->un_f_mmc_writable_media = FALSE; 3570 mutex_exit(SD_MUTEX(un)); 3571 3572 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 3573 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3574 3575 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, SENSE_LENGTH, 3576 out_data, SD_PROFILE_HEADER_LEN, path_flag); 3577 3578 if (rtn != 0) 3579 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3580 3581 mutex_enter(SD_MUTEX(un)); 3582 if (rtn == 0) { 3583 /* 3584 * We have good information, check for writable DVD. 3585 */ 3586 if ((out_data[6] == 0) && (out_data[7] == 0x12)) { 3587 un->un_f_mmc_writable_media = TRUE; 3588 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3589 kmem_free(rqbuf, SENSE_LENGTH); 3590 return; 3591 } 3592 } 3593 3594 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3595 kmem_free(rqbuf, SENSE_LENGTH); 3596 3597 /* 3598 * Determine if this is a RRD type device. 3599 */ 3600 mutex_exit(SD_MUTEX(un)); 3601 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3602 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf, 3603 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, path_flag); 3604 3605 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3606 3607 mutex_enter(SD_MUTEX(un)); 3608 if (status != 0) { 3609 /* command failed; just return */ 3610 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3611 return; 3612 } 3613 3614 /* Get to the page data */ 3615 sense_mhp = (struct mode_header_grp2 *)buf; 3616 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 3617 if (bd_len > MODE_BLK_DESC_LENGTH) { 3618 /* 3619 * We did not get back the expected block descriptor length so 3620 * we cannot check the mode page. 3621 */ 3622 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3623 "sd_check_for_writable_cd: Mode Sense returned " 3624 "invalid block descriptor length\n"); 3625 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3626 return; 3627 } 3628 3629 /* 3630 * If the device presents DVD or CD capabilities in the mode 3631 * page, we can return here since a RRD device will not have 3632 * these capabilities. 3633 */ 3634 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len); 3635 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3636 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3637 return; 3638 } 3639 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3640 3641 /* 3642 * If un->un_f_mmc_writable_media is still FALSE, 3643 * check for RRD type media. A RRD device is identified 3644 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT. 3645 */ 3646 mutex_exit(SD_MUTEX(un)); 3647 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3648 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3649 3650 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw, 3651 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3652 RANDOM_WRITABLE, path_flag); 3653 3654 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3655 if (rtn != 0) { 3656 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3657 kmem_free(rqbuf_rw, SENSE_LENGTH); 3658 mutex_enter(SD_MUTEX(un)); 3659 return; 3660 } 3661 3662 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3663 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3664 3665 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd, 3666 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3667 HARDWARE_DEFECT_MANAGEMENT, path_flag); 3668 3669 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3670 mutex_enter(SD_MUTEX(un)); 3671 if (rtn == 0) { 3672 /* 3673 * We have good information, check for random writable 3674 * and hardware defect features as current. 3675 */ 3676 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3677 (out_data_rw[10] & 0x1) && 3678 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) && 3679 (out_data_hd[10] & 0x1)) { 3680 un->un_f_mmc_writable_media = TRUE; 3681 } 3682 } 3683 3684 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3685 kmem_free(rqbuf_rw, SENSE_LENGTH); 3686 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3687 kmem_free(rqbuf_hd, SENSE_LENGTH); 3688 } 3689 3690 /* 3691 * Function: sd_read_unit_properties 3692 * 3693 * Description: The following implements a property lookup mechanism. 3694 * Properties for particular disks (keyed on vendor, model 3695 * and rev numbers) are sought in the sd.conf file via 3696 * sd_process_sdconf_file(), and if not found there, are 3697 * looked for in a list hardcoded in this driver via 3698 * sd_process_sdconf_table() Once located the properties 3699 * are used to update the driver unit structure. 3700 * 3701 * Arguments: un - driver soft state (unit) structure 3702 */ 3703 3704 static void 3705 sd_read_unit_properties(struct sd_lun *un) 3706 { 3707 /* 3708 * sd_process_sdconf_file returns SD_FAILURE if it cannot find 3709 * the "sd-config-list" property (from the sd.conf file) or if 3710 * there was not a match for the inquiry vid/pid. If this event 3711 * occurs the static driver configuration table is searched for 3712 * a match. 3713 */ 3714 ASSERT(un != NULL); 3715 if (sd_process_sdconf_file(un) == SD_FAILURE) { 3716 sd_process_sdconf_table(un); 3717 } 3718 3719 /* check for LSI device */ 3720 sd_is_lsi(un); 3721 3722 3723 } 3724 3725 3726 /* 3727 * Function: sd_process_sdconf_file 3728 * 3729 * Description: Use ddi_prop_lookup(9F) to obtain the properties from the 3730 * driver's config file (ie, sd.conf) and update the driver 3731 * soft state structure accordingly. 3732 * 3733 * Arguments: un - driver soft state (unit) structure 3734 * 3735 * Return Code: SD_SUCCESS - The properties were successfully set according 3736 * to the driver configuration file. 3737 * SD_FAILURE - The driver config list was not obtained or 3738 * there was no vid/pid match. This indicates that 3739 * the static config table should be used. 3740 * 3741 * The config file has a property, "sd-config-list". Currently we support 3742 * two kinds of formats. For both formats, the value of this property 3743 * is a list of duplets: 3744 * 3745 * sd-config-list= 3746 * <duplet>, 3747 * [,<duplet>]*; 3748 * 3749 * For the improved format, where 3750 * 3751 * <duplet>:= "<vid+pid>","<tunable-list>" 3752 * 3753 * and 3754 * 3755 * <tunable-list>:= <tunable> [, <tunable> ]*; 3756 * <tunable> = <name> : <value> 3757 * 3758 * The <vid+pid> is the string that is returned by the target device on a 3759 * SCSI inquiry command, the <tunable-list> contains one or more tunables 3760 * to apply to all target devices with the specified <vid+pid>. 3761 * 3762 * Each <tunable> is a "<name> : <value>" pair. 3763 * 3764 * For the old format, the structure of each duplet is as follows: 3765 * 3766 * <duplet>:= "<vid+pid>","<data-property-name_list>" 3767 * 3768 * The first entry of the duplet is the device ID string (the concatenated 3769 * vid & pid; not to be confused with a device_id). This is defined in 3770 * the same way as in the sd_disk_table. 3771 * 3772 * The second part of the duplet is a string that identifies a 3773 * data-property-name-list. The data-property-name-list is defined as 3774 * follows: 3775 * 3776 * <data-property-name-list>:=<data-property-name> [<data-property-name>] 3777 * 3778 * The syntax of <data-property-name> depends on the <version> field. 3779 * 3780 * If version = SD_CONF_VERSION_1 we have the following syntax: 3781 * 3782 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3783 * 3784 * where the prop0 value will be used to set prop0 if bit0 set in the 3785 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1 3786 * 3787 */ 3788 3789 static int 3790 sd_process_sdconf_file(struct sd_lun *un) 3791 { 3792 char **config_list = NULL; 3793 uint_t nelements; 3794 char *vidptr; 3795 int vidlen; 3796 char *dnlist_ptr; 3797 char *dataname_ptr; 3798 char *dataname_lasts; 3799 int *data_list = NULL; 3800 uint_t data_list_len; 3801 int rval = SD_FAILURE; 3802 int i; 3803 3804 ASSERT(un != NULL); 3805 3806 /* Obtain the configuration list associated with the .conf file */ 3807 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, SD_DEVINFO(un), 3808 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, sd_config_list, 3809 &config_list, &nelements) != DDI_PROP_SUCCESS) { 3810 return (SD_FAILURE); 3811 } 3812 3813 /* 3814 * Compare vids in each duplet to the inquiry vid - if a match is 3815 * made, get the data value and update the soft state structure 3816 * accordingly. 3817 * 3818 * Each duplet should show as a pair of strings, return SD_FAILURE 3819 * otherwise. 3820 */ 3821 if (nelements & 1) { 3822 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3823 "sd-config-list should show as pairs of strings.\n"); 3824 if (config_list) 3825 ddi_prop_free(config_list); 3826 return (SD_FAILURE); 3827 } 3828 3829 for (i = 0; i < nelements; i += 2) { 3830 /* 3831 * Note: The assumption here is that each vid entry is on 3832 * a unique line from its associated duplet. 3833 */ 3834 vidptr = config_list[i]; 3835 vidlen = (int)strlen(vidptr); 3836 if ((vidlen == 0) || 3837 (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS)) { 3838 continue; 3839 } 3840 3841 /* 3842 * dnlist contains 1 or more blank separated 3843 * data-property-name entries 3844 */ 3845 dnlist_ptr = config_list[i + 1]; 3846 3847 if (strchr(dnlist_ptr, ':') != NULL) { 3848 /* 3849 * Decode the improved format sd-config-list. 3850 */ 3851 sd_nvpair_str_decode(un, dnlist_ptr); 3852 } else { 3853 /* 3854 * The old format sd-config-list, loop through all 3855 * data-property-name entries in the 3856 * data-property-name-list 3857 * setting the properties for each. 3858 */ 3859 for (dataname_ptr = sd_strtok_r(dnlist_ptr, " \t", 3860 &dataname_lasts); dataname_ptr != NULL; 3861 dataname_ptr = sd_strtok_r(NULL, " \t", 3862 &dataname_lasts)) { 3863 int version; 3864 3865 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3866 "sd_process_sdconf_file: disk:%s, " 3867 "data:%s\n", vidptr, dataname_ptr); 3868 3869 /* Get the data list */ 3870 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 3871 SD_DEVINFO(un), 0, dataname_ptr, &data_list, 3872 &data_list_len) != DDI_PROP_SUCCESS) { 3873 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3874 "sd_process_sdconf_file: data " 3875 "property (%s) has no value\n", 3876 dataname_ptr); 3877 continue; 3878 } 3879 3880 version = data_list[0]; 3881 3882 if (version == SD_CONF_VERSION_1) { 3883 sd_tunables values; 3884 3885 /* Set the properties */ 3886 if (sd_chk_vers1_data(un, data_list[1], 3887 &data_list[2], data_list_len, 3888 dataname_ptr) == SD_SUCCESS) { 3889 sd_get_tunables_from_conf(un, 3890 data_list[1], &data_list[2], 3891 &values); 3892 sd_set_vers1_properties(un, 3893 data_list[1], &values); 3894 rval = SD_SUCCESS; 3895 } else { 3896 rval = SD_FAILURE; 3897 } 3898 } else { 3899 scsi_log(SD_DEVINFO(un), sd_label, 3900 CE_WARN, "data property %s version " 3901 "0x%x is invalid.", 3902 dataname_ptr, version); 3903 rval = SD_FAILURE; 3904 } 3905 if (data_list) 3906 ddi_prop_free(data_list); 3907 } 3908 } 3909 } 3910 3911 /* free up the memory allocated by ddi_prop_lookup_string_array(). */ 3912 if (config_list) { 3913 ddi_prop_free(config_list); 3914 } 3915 3916 return (rval); 3917 } 3918 3919 /* 3920 * Function: sd_nvpair_str_decode() 3921 * 3922 * Description: Parse the improved format sd-config-list to get 3923 * each entry of tunable, which includes a name-value pair. 3924 * Then call sd_set_properties() to set the property. 3925 * 3926 * Arguments: un - driver soft state (unit) structure 3927 * nvpair_str - the tunable list 3928 */ 3929 static void 3930 sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str) 3931 { 3932 char *nv, *name, *value, *token; 3933 char *nv_lasts, *v_lasts, *x_lasts; 3934 3935 for (nv = sd_strtok_r(nvpair_str, ",", &nv_lasts); nv != NULL; 3936 nv = sd_strtok_r(NULL, ",", &nv_lasts)) { 3937 token = sd_strtok_r(nv, ":", &v_lasts); 3938 name = sd_strtok_r(token, " \t", &x_lasts); 3939 token = sd_strtok_r(NULL, ":", &v_lasts); 3940 value = sd_strtok_r(token, " \t", &x_lasts); 3941 if (name == NULL || value == NULL) { 3942 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3943 "sd_nvpair_str_decode: " 3944 "name or value is not valid!\n"); 3945 } else { 3946 sd_set_properties(un, name, value); 3947 } 3948 } 3949 } 3950 3951 /* 3952 * Function: sd_strtok_r() 3953 * 3954 * Description: This function uses strpbrk and strspn to break 3955 * string into tokens on sequentially subsequent calls. Return 3956 * NULL when no non-separator characters remain. The first 3957 * argument is NULL for subsequent calls. 3958 */ 3959 static char * 3960 sd_strtok_r(char *string, const char *sepset, char **lasts) 3961 { 3962 char *q, *r; 3963 3964 /* First or subsequent call */ 3965 if (string == NULL) 3966 string = *lasts; 3967 3968 if (string == NULL) 3969 return (NULL); 3970 3971 /* Skip leading separators */ 3972 q = string + strspn(string, sepset); 3973 3974 if (*q == '\0') 3975 return (NULL); 3976 3977 if ((r = strpbrk(q, sepset)) == NULL) 3978 *lasts = NULL; 3979 else { 3980 *r = '\0'; 3981 *lasts = r + 1; 3982 } 3983 return (q); 3984 } 3985 3986 /* 3987 * Function: sd_set_properties() 3988 * 3989 * Description: Set device properties based on the improved 3990 * format sd-config-list. 3991 * 3992 * Arguments: un - driver soft state (unit) structure 3993 * name - supported tunable name 3994 * value - tunable value 3995 */ 3996 static void 3997 sd_set_properties(struct sd_lun *un, char *name, char *value) 3998 { 3999 char *endptr = NULL; 4000 long val = 0; 4001 4002 if (strcasecmp(name, "cache-nonvolatile") == 0) { 4003 if (strcasecmp(value, "true") == 0) { 4004 un->un_f_suppress_cache_flush = TRUE; 4005 } else if (strcasecmp(value, "false") == 0) { 4006 un->un_f_suppress_cache_flush = FALSE; 4007 } else { 4008 goto value_invalid; 4009 } 4010 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4011 "suppress_cache_flush flag set to %d\n", 4012 un->un_f_suppress_cache_flush); 4013 return; 4014 } 4015 4016 if (strcasecmp(name, "controller-type") == 0) { 4017 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4018 un->un_ctype = val; 4019 } else { 4020 goto value_invalid; 4021 } 4022 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4023 "ctype set to %d\n", un->un_ctype); 4024 return; 4025 } 4026 4027 if (strcasecmp(name, "delay-busy") == 0) { 4028 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4029 un->un_busy_timeout = drv_usectohz(val / 1000); 4030 } else { 4031 goto value_invalid; 4032 } 4033 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4034 "busy_timeout set to %d\n", un->un_busy_timeout); 4035 return; 4036 } 4037 4038 if (strcasecmp(name, "disksort") == 0) { 4039 if (strcasecmp(value, "true") == 0) { 4040 un->un_f_disksort_disabled = FALSE; 4041 } else if (strcasecmp(value, "false") == 0) { 4042 un->un_f_disksort_disabled = TRUE; 4043 } else { 4044 goto value_invalid; 4045 } 4046 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4047 "disksort disabled flag set to %d\n", 4048 un->un_f_disksort_disabled); 4049 return; 4050 } 4051 4052 if (strcasecmp(name, "power-condition") == 0) { 4053 if (strcasecmp(value, "true") == 0) { 4054 un->un_f_power_condition_disabled = FALSE; 4055 } else if (strcasecmp(value, "false") == 0) { 4056 un->un_f_power_condition_disabled = TRUE; 4057 } else { 4058 goto value_invalid; 4059 } 4060 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4061 "power condition disabled flag set to %d\n", 4062 un->un_f_power_condition_disabled); 4063 return; 4064 } 4065 4066 if (strcasecmp(name, "timeout-releasereservation") == 0) { 4067 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4068 un->un_reserve_release_time = val; 4069 } else { 4070 goto value_invalid; 4071 } 4072 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4073 "reservation release timeout set to %d\n", 4074 un->un_reserve_release_time); 4075 return; 4076 } 4077 4078 if (strcasecmp(name, "reset-lun") == 0) { 4079 if (strcasecmp(value, "true") == 0) { 4080 un->un_f_lun_reset_enabled = TRUE; 4081 } else if (strcasecmp(value, "false") == 0) { 4082 un->un_f_lun_reset_enabled = FALSE; 4083 } else { 4084 goto value_invalid; 4085 } 4086 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4087 "lun reset enabled flag set to %d\n", 4088 un->un_f_lun_reset_enabled); 4089 return; 4090 } 4091 4092 if (strcasecmp(name, "retries-busy") == 0) { 4093 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4094 un->un_busy_retry_count = val; 4095 } else { 4096 goto value_invalid; 4097 } 4098 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4099 "busy retry count set to %d\n", un->un_busy_retry_count); 4100 return; 4101 } 4102 4103 if (strcasecmp(name, "retries-timeout") == 0) { 4104 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4105 un->un_retry_count = val; 4106 } else { 4107 goto value_invalid; 4108 } 4109 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4110 "timeout retry count set to %d\n", un->un_retry_count); 4111 return; 4112 } 4113 4114 if (strcasecmp(name, "retries-notready") == 0) { 4115 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4116 un->un_notready_retry_count = val; 4117 } else { 4118 goto value_invalid; 4119 } 4120 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4121 "notready retry count set to %d\n", 4122 un->un_notready_retry_count); 4123 return; 4124 } 4125 4126 if (strcasecmp(name, "retries-reset") == 0) { 4127 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4128 un->un_reset_retry_count = val; 4129 } else { 4130 goto value_invalid; 4131 } 4132 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4133 "reset retry count set to %d\n", 4134 un->un_reset_retry_count); 4135 return; 4136 } 4137 4138 if (strcasecmp(name, "throttle-max") == 0) { 4139 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4140 un->un_saved_throttle = un->un_throttle = val; 4141 } else { 4142 goto value_invalid; 4143 } 4144 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4145 "throttle set to %d\n", un->un_throttle); 4146 } 4147 4148 if (strcasecmp(name, "throttle-min") == 0) { 4149 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4150 un->un_min_throttle = val; 4151 } else { 4152 goto value_invalid; 4153 } 4154 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4155 "min throttle set to %d\n", un->un_min_throttle); 4156 } 4157 4158 if (strcasecmp(name, "rmw-type") == 0) { 4159 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4160 un->un_f_rmw_type = val; 4161 } else { 4162 goto value_invalid; 4163 } 4164 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4165 "RMW type set to %d\n", un->un_f_rmw_type); 4166 } 4167 4168 /* 4169 * Validate the throttle values. 4170 * If any of the numbers are invalid, set everything to defaults. 4171 */ 4172 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4173 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4174 (un->un_min_throttle > un->un_throttle)) { 4175 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4176 un->un_min_throttle = sd_min_throttle; 4177 } 4178 return; 4179 4180 value_invalid: 4181 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4182 "value of prop %s is invalid\n", name); 4183 } 4184 4185 /* 4186 * Function: sd_get_tunables_from_conf() 4187 * 4188 * 4189 * This function reads the data list from the sd.conf file and pulls 4190 * the values that can have numeric values as arguments and places 4191 * the values in the appropriate sd_tunables member. 4192 * Since the order of the data list members varies across platforms 4193 * This function reads them from the data list in a platform specific 4194 * order and places them into the correct sd_tunable member that is 4195 * consistent across all platforms. 4196 */ 4197 static void 4198 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list, 4199 sd_tunables *values) 4200 { 4201 int i; 4202 int mask; 4203 4204 bzero(values, sizeof (sd_tunables)); 4205 4206 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 4207 4208 mask = 1 << i; 4209 if (mask > flags) { 4210 break; 4211 } 4212 4213 switch (mask & flags) { 4214 case 0: /* This mask bit not set in flags */ 4215 continue; 4216 case SD_CONF_BSET_THROTTLE: 4217 values->sdt_throttle = data_list[i]; 4218 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4219 "sd_get_tunables_from_conf: throttle = %d\n", 4220 values->sdt_throttle); 4221 break; 4222 case SD_CONF_BSET_CTYPE: 4223 values->sdt_ctype = data_list[i]; 4224 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4225 "sd_get_tunables_from_conf: ctype = %d\n", 4226 values->sdt_ctype); 4227 break; 4228 case SD_CONF_BSET_NRR_COUNT: 4229 values->sdt_not_rdy_retries = data_list[i]; 4230 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4231 "sd_get_tunables_from_conf: not_rdy_retries = %d\n", 4232 values->sdt_not_rdy_retries); 4233 break; 4234 case SD_CONF_BSET_BSY_RETRY_COUNT: 4235 values->sdt_busy_retries = data_list[i]; 4236 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4237 "sd_get_tunables_from_conf: busy_retries = %d\n", 4238 values->sdt_busy_retries); 4239 break; 4240 case SD_CONF_BSET_RST_RETRIES: 4241 values->sdt_reset_retries = data_list[i]; 4242 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4243 "sd_get_tunables_from_conf: reset_retries = %d\n", 4244 values->sdt_reset_retries); 4245 break; 4246 case SD_CONF_BSET_RSV_REL_TIME: 4247 values->sdt_reserv_rel_time = data_list[i]; 4248 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4249 "sd_get_tunables_from_conf: reserv_rel_time = %d\n", 4250 values->sdt_reserv_rel_time); 4251 break; 4252 case SD_CONF_BSET_MIN_THROTTLE: 4253 values->sdt_min_throttle = data_list[i]; 4254 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4255 "sd_get_tunables_from_conf: min_throttle = %d\n", 4256 values->sdt_min_throttle); 4257 break; 4258 case SD_CONF_BSET_DISKSORT_DISABLED: 4259 values->sdt_disk_sort_dis = data_list[i]; 4260 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4261 "sd_get_tunables_from_conf: disk_sort_dis = %d\n", 4262 values->sdt_disk_sort_dis); 4263 break; 4264 case SD_CONF_BSET_LUN_RESET_ENABLED: 4265 values->sdt_lun_reset_enable = data_list[i]; 4266 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4267 "sd_get_tunables_from_conf: lun_reset_enable = %d" 4268 "\n", values->sdt_lun_reset_enable); 4269 break; 4270 case SD_CONF_BSET_CACHE_IS_NV: 4271 values->sdt_suppress_cache_flush = data_list[i]; 4272 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4273 "sd_get_tunables_from_conf: \ 4274 suppress_cache_flush = %d" 4275 "\n", values->sdt_suppress_cache_flush); 4276 break; 4277 case SD_CONF_BSET_PC_DISABLED: 4278 values->sdt_disk_sort_dis = data_list[i]; 4279 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4280 "sd_get_tunables_from_conf: power_condition_dis = " 4281 "%d\n", values->sdt_power_condition_dis); 4282 break; 4283 } 4284 } 4285 } 4286 4287 /* 4288 * Function: sd_process_sdconf_table 4289 * 4290 * Description: Search the static configuration table for a match on the 4291 * inquiry vid/pid and update the driver soft state structure 4292 * according to the table property values for the device. 4293 * 4294 * The form of a configuration table entry is: 4295 * <vid+pid>,<flags>,<property-data> 4296 * "SEAGATE ST42400N",1,0x40000, 4297 * 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1; 4298 * 4299 * Arguments: un - driver soft state (unit) structure 4300 */ 4301 4302 static void 4303 sd_process_sdconf_table(struct sd_lun *un) 4304 { 4305 char *id = NULL; 4306 int table_index; 4307 int idlen; 4308 4309 ASSERT(un != NULL); 4310 for (table_index = 0; table_index < sd_disk_table_size; 4311 table_index++) { 4312 id = sd_disk_table[table_index].device_id; 4313 idlen = strlen(id); 4314 if (idlen == 0) { 4315 continue; 4316 } 4317 4318 /* 4319 * The static configuration table currently does not 4320 * implement version 10 properties. Additionally, 4321 * multiple data-property-name entries are not 4322 * implemented in the static configuration table. 4323 */ 4324 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4325 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4326 "sd_process_sdconf_table: disk %s\n", id); 4327 sd_set_vers1_properties(un, 4328 sd_disk_table[table_index].flags, 4329 sd_disk_table[table_index].properties); 4330 break; 4331 } 4332 } 4333 } 4334 4335 4336 /* 4337 * Function: sd_sdconf_id_match 4338 * 4339 * Description: This local function implements a case sensitive vid/pid 4340 * comparison as well as the boundary cases of wild card and 4341 * multiple blanks. 4342 * 4343 * Note: An implicit assumption made here is that the scsi 4344 * inquiry structure will always keep the vid, pid and 4345 * revision strings in consecutive sequence, so they can be 4346 * read as a single string. If this assumption is not the 4347 * case, a separate string, to be used for the check, needs 4348 * to be built with these strings concatenated. 4349 * 4350 * Arguments: un - driver soft state (unit) structure 4351 * id - table or config file vid/pid 4352 * idlen - length of the vid/pid (bytes) 4353 * 4354 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 4355 * SD_FAILURE - Indicates no match with the inquiry vid/pid 4356 */ 4357 4358 static int 4359 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen) 4360 { 4361 struct scsi_inquiry *sd_inq; 4362 int rval = SD_SUCCESS; 4363 4364 ASSERT(un != NULL); 4365 sd_inq = un->un_sd->sd_inq; 4366 ASSERT(id != NULL); 4367 4368 /* 4369 * We use the inq_vid as a pointer to a buffer containing the 4370 * vid and pid and use the entire vid/pid length of the table 4371 * entry for the comparison. This works because the inq_pid 4372 * data member follows inq_vid in the scsi_inquiry structure. 4373 */ 4374 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) { 4375 /* 4376 * The user id string is compared to the inquiry vid/pid 4377 * using a case insensitive comparison and ignoring 4378 * multiple spaces. 4379 */ 4380 rval = sd_blank_cmp(un, id, idlen); 4381 if (rval != SD_SUCCESS) { 4382 /* 4383 * User id strings that start and end with a "*" 4384 * are a special case. These do not have a 4385 * specific vendor, and the product string can 4386 * appear anywhere in the 16 byte PID portion of 4387 * the inquiry data. This is a simple strstr() 4388 * type search for the user id in the inquiry data. 4389 */ 4390 if ((id[0] == '*') && (id[idlen - 1] == '*')) { 4391 char *pidptr = &id[1]; 4392 int i; 4393 int j; 4394 int pidstrlen = idlen - 2; 4395 j = sizeof (SD_INQUIRY(un)->inq_pid) - 4396 pidstrlen; 4397 4398 if (j < 0) { 4399 return (SD_FAILURE); 4400 } 4401 for (i = 0; i < j; i++) { 4402 if (bcmp(&SD_INQUIRY(un)->inq_pid[i], 4403 pidptr, pidstrlen) == 0) { 4404 rval = SD_SUCCESS; 4405 break; 4406 } 4407 } 4408 } 4409 } 4410 } 4411 return (rval); 4412 } 4413 4414 4415 /* 4416 * Function: sd_blank_cmp 4417 * 4418 * Description: If the id string starts and ends with a space, treat 4419 * multiple consecutive spaces as equivalent to a single 4420 * space. For example, this causes a sd_disk_table entry 4421 * of " NEC CDROM " to match a device's id string of 4422 * "NEC CDROM". 4423 * 4424 * Note: The success exit condition for this routine is if 4425 * the pointer to the table entry is '\0' and the cnt of 4426 * the inquiry length is zero. This will happen if the inquiry 4427 * string returned by the device is padded with spaces to be 4428 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The 4429 * SCSI spec states that the inquiry string is to be padded with 4430 * spaces. 4431 * 4432 * Arguments: un - driver soft state (unit) structure 4433 * id - table or config file vid/pid 4434 * idlen - length of the vid/pid (bytes) 4435 * 4436 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 4437 * SD_FAILURE - Indicates no match with the inquiry vid/pid 4438 */ 4439 4440 static int 4441 sd_blank_cmp(struct sd_lun *un, char *id, int idlen) 4442 { 4443 char *p1; 4444 char *p2; 4445 int cnt; 4446 cnt = sizeof (SD_INQUIRY(un)->inq_vid) + 4447 sizeof (SD_INQUIRY(un)->inq_pid); 4448 4449 ASSERT(un != NULL); 4450 p2 = un->un_sd->sd_inq->inq_vid; 4451 ASSERT(id != NULL); 4452 p1 = id; 4453 4454 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) { 4455 /* 4456 * Note: string p1 is terminated by a NUL but string p2 4457 * isn't. The end of p2 is determined by cnt. 4458 */ 4459 for (;;) { 4460 /* skip over any extra blanks in both strings */ 4461 while ((*p1 != '\0') && (*p1 == ' ')) { 4462 p1++; 4463 } 4464 while ((cnt != 0) && (*p2 == ' ')) { 4465 p2++; 4466 cnt--; 4467 } 4468 4469 /* compare the two strings */ 4470 if ((cnt == 0) || 4471 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) { 4472 break; 4473 } 4474 while ((cnt > 0) && 4475 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) { 4476 p1++; 4477 p2++; 4478 cnt--; 4479 } 4480 } 4481 } 4482 4483 /* return SD_SUCCESS if both strings match */ 4484 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE); 4485 } 4486 4487 4488 /* 4489 * Function: sd_chk_vers1_data 4490 * 4491 * Description: Verify the version 1 device properties provided by the 4492 * user via the configuration file 4493 * 4494 * Arguments: un - driver soft state (unit) structure 4495 * flags - integer mask indicating properties to be set 4496 * prop_list - integer list of property values 4497 * list_len - number of the elements 4498 * 4499 * Return Code: SD_SUCCESS - Indicates the user provided data is valid 4500 * SD_FAILURE - Indicates the user provided data is invalid 4501 */ 4502 4503 static int 4504 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 4505 int list_len, char *dataname_ptr) 4506 { 4507 int i; 4508 int mask = 1; 4509 int index = 0; 4510 4511 ASSERT(un != NULL); 4512 4513 /* Check for a NULL property name and list */ 4514 if (dataname_ptr == NULL) { 4515 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4516 "sd_chk_vers1_data: NULL data property name."); 4517 return (SD_FAILURE); 4518 } 4519 if (prop_list == NULL) { 4520 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4521 "sd_chk_vers1_data: %s NULL data property list.", 4522 dataname_ptr); 4523 return (SD_FAILURE); 4524 } 4525 4526 /* Display a warning if undefined bits are set in the flags */ 4527 if (flags & ~SD_CONF_BIT_MASK) { 4528 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4529 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. " 4530 "Properties not set.", 4531 (flags & ~SD_CONF_BIT_MASK), dataname_ptr); 4532 return (SD_FAILURE); 4533 } 4534 4535 /* 4536 * Verify the length of the list by identifying the highest bit set 4537 * in the flags and validating that the property list has a length 4538 * up to the index of this bit. 4539 */ 4540 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 4541 if (flags & mask) { 4542 index++; 4543 } 4544 mask = 1 << i; 4545 } 4546 if (list_len < (index + 2)) { 4547 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4548 "sd_chk_vers1_data: " 4549 "Data property list %s size is incorrect. " 4550 "Properties not set.", dataname_ptr); 4551 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: " 4552 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS); 4553 return (SD_FAILURE); 4554 } 4555 return (SD_SUCCESS); 4556 } 4557 4558 4559 /* 4560 * Function: sd_set_vers1_properties 4561 * 4562 * Description: Set version 1 device properties based on a property list 4563 * retrieved from the driver configuration file or static 4564 * configuration table. Version 1 properties have the format: 4565 * 4566 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 4567 * 4568 * where the prop0 value will be used to set prop0 if bit0 4569 * is set in the flags 4570 * 4571 * Arguments: un - driver soft state (unit) structure 4572 * flags - integer mask indicating properties to be set 4573 * prop_list - integer list of property values 4574 */ 4575 4576 static void 4577 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list) 4578 { 4579 ASSERT(un != NULL); 4580 4581 /* 4582 * Set the flag to indicate cache is to be disabled. An attempt 4583 * to disable the cache via sd_cache_control() will be made 4584 * later during attach once the basic initialization is complete. 4585 */ 4586 if (flags & SD_CONF_BSET_NOCACHE) { 4587 un->un_f_opt_disable_cache = TRUE; 4588 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4589 "sd_set_vers1_properties: caching disabled flag set\n"); 4590 } 4591 4592 /* CD-specific configuration parameters */ 4593 if (flags & SD_CONF_BSET_PLAYMSF_BCD) { 4594 un->un_f_cfg_playmsf_bcd = TRUE; 4595 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4596 "sd_set_vers1_properties: playmsf_bcd set\n"); 4597 } 4598 if (flags & SD_CONF_BSET_READSUB_BCD) { 4599 un->un_f_cfg_readsub_bcd = TRUE; 4600 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4601 "sd_set_vers1_properties: readsub_bcd set\n"); 4602 } 4603 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) { 4604 un->un_f_cfg_read_toc_trk_bcd = TRUE; 4605 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4606 "sd_set_vers1_properties: read_toc_trk_bcd set\n"); 4607 } 4608 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) { 4609 un->un_f_cfg_read_toc_addr_bcd = TRUE; 4610 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4611 "sd_set_vers1_properties: read_toc_addr_bcd set\n"); 4612 } 4613 if (flags & SD_CONF_BSET_NO_READ_HEADER) { 4614 un->un_f_cfg_no_read_header = TRUE; 4615 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4616 "sd_set_vers1_properties: no_read_header set\n"); 4617 } 4618 if (flags & SD_CONF_BSET_READ_CD_XD4) { 4619 un->un_f_cfg_read_cd_xd4 = TRUE; 4620 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4621 "sd_set_vers1_properties: read_cd_xd4 set\n"); 4622 } 4623 4624 /* Support for devices which do not have valid/unique serial numbers */ 4625 if (flags & SD_CONF_BSET_FAB_DEVID) { 4626 un->un_f_opt_fab_devid = TRUE; 4627 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4628 "sd_set_vers1_properties: fab_devid bit set\n"); 4629 } 4630 4631 /* Support for user throttle configuration */ 4632 if (flags & SD_CONF_BSET_THROTTLE) { 4633 ASSERT(prop_list != NULL); 4634 un->un_saved_throttle = un->un_throttle = 4635 prop_list->sdt_throttle; 4636 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4637 "sd_set_vers1_properties: throttle set to %d\n", 4638 prop_list->sdt_throttle); 4639 } 4640 4641 /* Set the per disk retry count according to the conf file or table. */ 4642 if (flags & SD_CONF_BSET_NRR_COUNT) { 4643 ASSERT(prop_list != NULL); 4644 if (prop_list->sdt_not_rdy_retries) { 4645 un->un_notready_retry_count = 4646 prop_list->sdt_not_rdy_retries; 4647 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4648 "sd_set_vers1_properties: not ready retry count" 4649 " set to %d\n", un->un_notready_retry_count); 4650 } 4651 } 4652 4653 /* The controller type is reported for generic disk driver ioctls */ 4654 if (flags & SD_CONF_BSET_CTYPE) { 4655 ASSERT(prop_list != NULL); 4656 switch (prop_list->sdt_ctype) { 4657 case CTYPE_CDROM: 4658 un->un_ctype = prop_list->sdt_ctype; 4659 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4660 "sd_set_vers1_properties: ctype set to " 4661 "CTYPE_CDROM\n"); 4662 break; 4663 case CTYPE_CCS: 4664 un->un_ctype = prop_list->sdt_ctype; 4665 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4666 "sd_set_vers1_properties: ctype set to " 4667 "CTYPE_CCS\n"); 4668 break; 4669 case CTYPE_ROD: /* RW optical */ 4670 un->un_ctype = prop_list->sdt_ctype; 4671 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4672 "sd_set_vers1_properties: ctype set to " 4673 "CTYPE_ROD\n"); 4674 break; 4675 default: 4676 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4677 "sd_set_vers1_properties: Could not set " 4678 "invalid ctype value (%d)", 4679 prop_list->sdt_ctype); 4680 } 4681 } 4682 4683 /* Purple failover timeout */ 4684 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) { 4685 ASSERT(prop_list != NULL); 4686 un->un_busy_retry_count = 4687 prop_list->sdt_busy_retries; 4688 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4689 "sd_set_vers1_properties: " 4690 "busy retry count set to %d\n", 4691 un->un_busy_retry_count); 4692 } 4693 4694 /* Purple reset retry count */ 4695 if (flags & SD_CONF_BSET_RST_RETRIES) { 4696 ASSERT(prop_list != NULL); 4697 un->un_reset_retry_count = 4698 prop_list->sdt_reset_retries; 4699 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4700 "sd_set_vers1_properties: " 4701 "reset retry count set to %d\n", 4702 un->un_reset_retry_count); 4703 } 4704 4705 /* Purple reservation release timeout */ 4706 if (flags & SD_CONF_BSET_RSV_REL_TIME) { 4707 ASSERT(prop_list != NULL); 4708 un->un_reserve_release_time = 4709 prop_list->sdt_reserv_rel_time; 4710 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4711 "sd_set_vers1_properties: " 4712 "reservation release timeout set to %d\n", 4713 un->un_reserve_release_time); 4714 } 4715 4716 /* 4717 * Driver flag telling the driver to verify that no commands are pending 4718 * for a device before issuing a Test Unit Ready. This is a workaround 4719 * for a firmware bug in some Seagate eliteI drives. 4720 */ 4721 if (flags & SD_CONF_BSET_TUR_CHECK) { 4722 un->un_f_cfg_tur_check = TRUE; 4723 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4724 "sd_set_vers1_properties: tur queue check set\n"); 4725 } 4726 4727 if (flags & SD_CONF_BSET_MIN_THROTTLE) { 4728 un->un_min_throttle = prop_list->sdt_min_throttle; 4729 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4730 "sd_set_vers1_properties: min throttle set to %d\n", 4731 un->un_min_throttle); 4732 } 4733 4734 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) { 4735 un->un_f_disksort_disabled = 4736 (prop_list->sdt_disk_sort_dis != 0) ? 4737 TRUE : FALSE; 4738 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4739 "sd_set_vers1_properties: disksort disabled " 4740 "flag set to %d\n", 4741 prop_list->sdt_disk_sort_dis); 4742 } 4743 4744 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) { 4745 un->un_f_lun_reset_enabled = 4746 (prop_list->sdt_lun_reset_enable != 0) ? 4747 TRUE : FALSE; 4748 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4749 "sd_set_vers1_properties: lun reset enabled " 4750 "flag set to %d\n", 4751 prop_list->sdt_lun_reset_enable); 4752 } 4753 4754 if (flags & SD_CONF_BSET_CACHE_IS_NV) { 4755 un->un_f_suppress_cache_flush = 4756 (prop_list->sdt_suppress_cache_flush != 0) ? 4757 TRUE : FALSE; 4758 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4759 "sd_set_vers1_properties: suppress_cache_flush " 4760 "flag set to %d\n", 4761 prop_list->sdt_suppress_cache_flush); 4762 } 4763 4764 if (flags & SD_CONF_BSET_PC_DISABLED) { 4765 un->un_f_power_condition_disabled = 4766 (prop_list->sdt_power_condition_dis != 0) ? 4767 TRUE : FALSE; 4768 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4769 "sd_set_vers1_properties: power_condition_disabled " 4770 "flag set to %d\n", 4771 prop_list->sdt_power_condition_dis); 4772 } 4773 4774 /* 4775 * Validate the throttle values. 4776 * If any of the numbers are invalid, set everything to defaults. 4777 */ 4778 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4779 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4780 (un->un_min_throttle > un->un_throttle)) { 4781 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4782 un->un_min_throttle = sd_min_throttle; 4783 } 4784 } 4785 4786 /* 4787 * Function: sd_is_lsi() 4788 * 4789 * Description: Check for lsi devices, step through the static device 4790 * table to match vid/pid. 4791 * 4792 * Args: un - ptr to sd_lun 4793 * 4794 * Notes: When creating new LSI property, need to add the new LSI property 4795 * to this function. 4796 */ 4797 static void 4798 sd_is_lsi(struct sd_lun *un) 4799 { 4800 char *id = NULL; 4801 int table_index; 4802 int idlen; 4803 void *prop; 4804 4805 ASSERT(un != NULL); 4806 for (table_index = 0; table_index < sd_disk_table_size; 4807 table_index++) { 4808 id = sd_disk_table[table_index].device_id; 4809 idlen = strlen(id); 4810 if (idlen == 0) { 4811 continue; 4812 } 4813 4814 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4815 prop = sd_disk_table[table_index].properties; 4816 if (prop == &lsi_properties || 4817 prop == &lsi_oem_properties || 4818 prop == &lsi_properties_scsi || 4819 prop == &symbios_properties) { 4820 un->un_f_cfg_is_lsi = TRUE; 4821 } 4822 break; 4823 } 4824 } 4825 } 4826 4827 /* 4828 * Function: sd_get_physical_geometry 4829 * 4830 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and 4831 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the 4832 * target, and use this information to initialize the physical 4833 * geometry cache specified by pgeom_p. 4834 * 4835 * MODE SENSE is an optional command, so failure in this case 4836 * does not necessarily denote an error. We want to use the 4837 * MODE SENSE commands to derive the physical geometry of the 4838 * device, but if either command fails, the logical geometry is 4839 * used as the fallback for disk label geometry in cmlb. 4840 * 4841 * This requires that un->un_blockcount and un->un_tgt_blocksize 4842 * have already been initialized for the current target and 4843 * that the current values be passed as args so that we don't 4844 * end up ever trying to use -1 as a valid value. This could 4845 * happen if either value is reset while we're not holding 4846 * the mutex. 4847 * 4848 * Arguments: un - driver soft state (unit) structure 4849 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4850 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4851 * to use the USCSI "direct" chain and bypass the normal 4852 * command waitq. 4853 * 4854 * Context: Kernel thread only (can sleep). 4855 */ 4856 4857 static int 4858 sd_get_physical_geometry(struct sd_lun *un, cmlb_geom_t *pgeom_p, 4859 diskaddr_t capacity, int lbasize, int path_flag) 4860 { 4861 struct mode_format *page3p; 4862 struct mode_geometry *page4p; 4863 struct mode_header *headerp; 4864 int sector_size; 4865 int nsect; 4866 int nhead; 4867 int ncyl; 4868 int intrlv; 4869 int spc; 4870 diskaddr_t modesense_capacity; 4871 int rpm; 4872 int bd_len; 4873 int mode_header_length; 4874 uchar_t *p3bufp; 4875 uchar_t *p4bufp; 4876 int cdbsize; 4877 int ret = EIO; 4878 sd_ssc_t *ssc; 4879 int status; 4880 4881 ASSERT(un != NULL); 4882 4883 if (lbasize == 0) { 4884 if (ISCD(un)) { 4885 lbasize = 2048; 4886 } else { 4887 lbasize = un->un_sys_blocksize; 4888 } 4889 } 4890 pgeom_p->g_secsize = (unsigned short)lbasize; 4891 4892 /* 4893 * If the unit is a cd/dvd drive MODE SENSE page three 4894 * and MODE SENSE page four are reserved (see SBC spec 4895 * and MMC spec). To prevent soft errors just return 4896 * using the default LBA size. 4897 */ 4898 if (ISCD(un)) 4899 return (ret); 4900 4901 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0; 4902 4903 /* 4904 * Retrieve MODE SENSE page 3 - Format Device Page 4905 */ 4906 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP); 4907 ssc = sd_ssc_init(un); 4908 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p3bufp, 4909 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag); 4910 if (status != 0) { 4911 SD_ERROR(SD_LOG_COMMON, un, 4912 "sd_get_physical_geometry: mode sense page 3 failed\n"); 4913 goto page3_exit; 4914 } 4915 4916 /* 4917 * Determine size of Block Descriptors in order to locate the mode 4918 * page data. ATAPI devices return 0, SCSI devices should return 4919 * MODE_BLK_DESC_LENGTH. 4920 */ 4921 headerp = (struct mode_header *)p3bufp; 4922 if (un->un_f_cfg_is_atapi == TRUE) { 4923 struct mode_header_grp2 *mhp = 4924 (struct mode_header_grp2 *)headerp; 4925 mode_header_length = MODE_HEADER_LENGTH_GRP2; 4926 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4927 } else { 4928 mode_header_length = MODE_HEADER_LENGTH; 4929 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4930 } 4931 4932 if (bd_len > MODE_BLK_DESC_LENGTH) { 4933 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 4934 "sd_get_physical_geometry: received unexpected bd_len " 4935 "of %d, page3\n", bd_len); 4936 status = EIO; 4937 goto page3_exit; 4938 } 4939 4940 page3p = (struct mode_format *) 4941 ((caddr_t)headerp + mode_header_length + bd_len); 4942 4943 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) { 4944 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 4945 "sd_get_physical_geometry: mode sense pg3 code mismatch " 4946 "%d\n", page3p->mode_page.code); 4947 status = EIO; 4948 goto page3_exit; 4949 } 4950 4951 /* 4952 * Use this physical geometry data only if BOTH MODE SENSE commands 4953 * complete successfully; otherwise, revert to the logical geometry. 4954 * So, we need to save everything in temporary variables. 4955 */ 4956 sector_size = BE_16(page3p->data_bytes_sect); 4957 4958 /* 4959 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size 4960 */ 4961 if (sector_size == 0) { 4962 sector_size = un->un_sys_blocksize; 4963 } else { 4964 sector_size &= ~(un->un_sys_blocksize - 1); 4965 } 4966 4967 nsect = BE_16(page3p->sect_track); 4968 intrlv = BE_16(page3p->interleave); 4969 4970 SD_INFO(SD_LOG_COMMON, un, 4971 "sd_get_physical_geometry: Format Parameters (page 3)\n"); 4972 SD_INFO(SD_LOG_COMMON, un, 4973 " mode page: %d; nsect: %d; sector size: %d;\n", 4974 page3p->mode_page.code, nsect, sector_size); 4975 SD_INFO(SD_LOG_COMMON, un, 4976 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv, 4977 BE_16(page3p->track_skew), 4978 BE_16(page3p->cylinder_skew)); 4979 4980 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 4981 4982 /* 4983 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page 4984 */ 4985 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP); 4986 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p4bufp, 4987 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag); 4988 if (status != 0) { 4989 SD_ERROR(SD_LOG_COMMON, un, 4990 "sd_get_physical_geometry: mode sense page 4 failed\n"); 4991 goto page4_exit; 4992 } 4993 4994 /* 4995 * Determine size of Block Descriptors in order to locate the mode 4996 * page data. ATAPI devices return 0, SCSI devices should return 4997 * MODE_BLK_DESC_LENGTH. 4998 */ 4999 headerp = (struct mode_header *)p4bufp; 5000 if (un->un_f_cfg_is_atapi == TRUE) { 5001 struct mode_header_grp2 *mhp = 5002 (struct mode_header_grp2 *)headerp; 5003 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 5004 } else { 5005 bd_len = ((struct mode_header *)headerp)->bdesc_length; 5006 } 5007 5008 if (bd_len > MODE_BLK_DESC_LENGTH) { 5009 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 5010 "sd_get_physical_geometry: received unexpected bd_len of " 5011 "%d, page4\n", bd_len); 5012 status = EIO; 5013 goto page4_exit; 5014 } 5015 5016 page4p = (struct mode_geometry *) 5017 ((caddr_t)headerp + mode_header_length + bd_len); 5018 5019 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) { 5020 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 5021 "sd_get_physical_geometry: mode sense pg4 code mismatch " 5022 "%d\n", page4p->mode_page.code); 5023 status = EIO; 5024 goto page4_exit; 5025 } 5026 5027 /* 5028 * Stash the data now, after we know that both commands completed. 5029 */ 5030 5031 5032 nhead = (int)page4p->heads; /* uchar, so no conversion needed */ 5033 spc = nhead * nsect; 5034 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb; 5035 rpm = BE_16(page4p->rpm); 5036 5037 modesense_capacity = spc * ncyl; 5038 5039 SD_INFO(SD_LOG_COMMON, un, 5040 "sd_get_physical_geometry: Geometry Parameters (page 4)\n"); 5041 SD_INFO(SD_LOG_COMMON, un, 5042 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm); 5043 SD_INFO(SD_LOG_COMMON, un, 5044 " computed capacity(h*s*c): %d;\n", modesense_capacity); 5045 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n", 5046 (void *)pgeom_p, capacity); 5047 5048 /* 5049 * Compensate if the drive's geometry is not rectangular, i.e., 5050 * the product of C * H * S returned by MODE SENSE >= that returned 5051 * by read capacity. This is an idiosyncrasy of the original x86 5052 * disk subsystem. 5053 */ 5054 if (modesense_capacity >= capacity) { 5055 SD_INFO(SD_LOG_COMMON, un, 5056 "sd_get_physical_geometry: adjusting acyl; " 5057 "old: %d; new: %d\n", pgeom_p->g_acyl, 5058 (modesense_capacity - capacity + spc - 1) / spc); 5059 if (sector_size != 0) { 5060 /* 1243403: NEC D38x7 drives don't support sec size */ 5061 pgeom_p->g_secsize = (unsigned short)sector_size; 5062 } 5063 pgeom_p->g_nsect = (unsigned short)nsect; 5064 pgeom_p->g_nhead = (unsigned short)nhead; 5065 pgeom_p->g_capacity = capacity; 5066 pgeom_p->g_acyl = 5067 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc; 5068 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl; 5069 } 5070 5071 pgeom_p->g_rpm = (unsigned short)rpm; 5072 pgeom_p->g_intrlv = (unsigned short)intrlv; 5073 ret = 0; 5074 5075 SD_INFO(SD_LOG_COMMON, un, 5076 "sd_get_physical_geometry: mode sense geometry:\n"); 5077 SD_INFO(SD_LOG_COMMON, un, 5078 " nsect: %d; sector size: %d; interlv: %d\n", 5079 nsect, sector_size, intrlv); 5080 SD_INFO(SD_LOG_COMMON, un, 5081 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n", 5082 nhead, ncyl, rpm, modesense_capacity); 5083 SD_INFO(SD_LOG_COMMON, un, 5084 "sd_get_physical_geometry: (cached)\n"); 5085 SD_INFO(SD_LOG_COMMON, un, 5086 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 5087 pgeom_p->g_ncyl, pgeom_p->g_acyl, 5088 pgeom_p->g_nhead, pgeom_p->g_nsect); 5089 SD_INFO(SD_LOG_COMMON, un, 5090 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n", 5091 pgeom_p->g_secsize, pgeom_p->g_capacity, 5092 pgeom_p->g_intrlv, pgeom_p->g_rpm); 5093 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 5094 5095 page4_exit: 5096 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH); 5097 5098 page3_exit: 5099 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH); 5100 5101 if (status != 0) { 5102 if (status == EIO) { 5103 /* 5104 * Some disks do not support mode sense(6), we 5105 * should ignore this kind of error(sense key is 5106 * 0x5 - illegal request). 5107 */ 5108 uint8_t *sensep; 5109 int senlen; 5110 5111 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 5112 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 5113 ssc->ssc_uscsi_cmd->uscsi_rqresid); 5114 5115 if (senlen > 0 && 5116 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 5117 sd_ssc_assessment(ssc, 5118 SD_FMT_IGNORE_COMPROMISE); 5119 } else { 5120 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 5121 } 5122 } else { 5123 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5124 } 5125 } 5126 sd_ssc_fini(ssc); 5127 return (ret); 5128 } 5129 5130 /* 5131 * Function: sd_get_virtual_geometry 5132 * 5133 * Description: Ask the controller to tell us about the target device. 5134 * 5135 * Arguments: un - pointer to softstate 5136 * capacity - disk capacity in #blocks 5137 * lbasize - disk block size in bytes 5138 * 5139 * Context: Kernel thread only 5140 */ 5141 5142 static int 5143 sd_get_virtual_geometry(struct sd_lun *un, cmlb_geom_t *lgeom_p, 5144 diskaddr_t capacity, int lbasize) 5145 { 5146 uint_t geombuf; 5147 int spc; 5148 5149 ASSERT(un != NULL); 5150 5151 /* Set sector size, and total number of sectors */ 5152 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1); 5153 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1); 5154 5155 /* Let the HBA tell us its geometry */ 5156 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1); 5157 5158 /* A value of -1 indicates an undefined "geometry" property */ 5159 if (geombuf == (-1)) { 5160 return (EINVAL); 5161 } 5162 5163 /* Initialize the logical geometry cache. */ 5164 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff; 5165 lgeom_p->g_nsect = geombuf & 0xffff; 5166 lgeom_p->g_secsize = un->un_sys_blocksize; 5167 5168 spc = lgeom_p->g_nhead * lgeom_p->g_nsect; 5169 5170 /* 5171 * Note: The driver originally converted the capacity value from 5172 * target blocks to system blocks. However, the capacity value passed 5173 * to this routine is already in terms of system blocks (this scaling 5174 * is done when the READ CAPACITY command is issued and processed). 5175 * This 'error' may have gone undetected because the usage of g_ncyl 5176 * (which is based upon g_capacity) is very limited within the driver 5177 */ 5178 lgeom_p->g_capacity = capacity; 5179 5180 /* 5181 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The 5182 * hba may return zero values if the device has been removed. 5183 */ 5184 if (spc == 0) { 5185 lgeom_p->g_ncyl = 0; 5186 } else { 5187 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc; 5188 } 5189 lgeom_p->g_acyl = 0; 5190 5191 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n"); 5192 return (0); 5193 5194 } 5195 /* 5196 * Function: sd_update_block_info 5197 * 5198 * Description: Calculate a byte count to sector count bitshift value 5199 * from sector size. 5200 * 5201 * Arguments: un: unit struct. 5202 * lbasize: new target sector size 5203 * capacity: new target capacity, ie. block count 5204 * 5205 * Context: Kernel thread context 5206 */ 5207 5208 static void 5209 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity) 5210 { 5211 if (lbasize != 0) { 5212 un->un_tgt_blocksize = lbasize; 5213 un->un_f_tgt_blocksize_is_valid = TRUE; 5214 if (!un->un_f_has_removable_media) { 5215 un->un_sys_blocksize = lbasize; 5216 } 5217 } 5218 5219 if (capacity != 0) { 5220 un->un_blockcount = capacity; 5221 un->un_f_blockcount_is_valid = TRUE; 5222 } 5223 } 5224 5225 5226 /* 5227 * Function: sd_register_devid 5228 * 5229 * Description: This routine will obtain the device id information from the 5230 * target, obtain the serial number, and register the device 5231 * id with the ddi framework. 5232 * 5233 * Arguments: devi - the system's dev_info_t for the device. 5234 * un - driver soft state (unit) structure 5235 * reservation_flag - indicates if a reservation conflict 5236 * occurred during attach 5237 * 5238 * Context: Kernel Thread 5239 */ 5240 static void 5241 sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi, int reservation_flag) 5242 { 5243 int rval = 0; 5244 uchar_t *inq80 = NULL; 5245 size_t inq80_len = MAX_INQUIRY_SIZE; 5246 size_t inq80_resid = 0; 5247 uchar_t *inq83 = NULL; 5248 size_t inq83_len = MAX_INQUIRY_SIZE; 5249 size_t inq83_resid = 0; 5250 int dlen, len; 5251 char *sn; 5252 struct sd_lun *un; 5253 5254 ASSERT(ssc != NULL); 5255 un = ssc->ssc_un; 5256 ASSERT(un != NULL); 5257 ASSERT(mutex_owned(SD_MUTEX(un))); 5258 ASSERT((SD_DEVINFO(un)) == devi); 5259 5260 5261 /* 5262 * We check the availability of the World Wide Name (0x83) and Unit 5263 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using 5264 * un_vpd_page_mask from them, we decide which way to get the WWN. If 5265 * 0x83 is available, that is the best choice. Our next choice is 5266 * 0x80. If neither are available, we munge the devid from the device 5267 * vid/pid/serial # for Sun qualified disks, or use the ddi framework 5268 * to fabricate a devid for non-Sun qualified disks. 5269 */ 5270 if (sd_check_vpd_page_support(ssc) == 0) { 5271 /* collect page 80 data if available */ 5272 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) { 5273 5274 mutex_exit(SD_MUTEX(un)); 5275 inq80 = kmem_zalloc(inq80_len, KM_SLEEP); 5276 5277 rval = sd_send_scsi_INQUIRY(ssc, inq80, inq80_len, 5278 0x01, 0x80, &inq80_resid); 5279 5280 if (rval != 0) { 5281 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5282 kmem_free(inq80, inq80_len); 5283 inq80 = NULL; 5284 inq80_len = 0; 5285 } else if (ddi_prop_exists( 5286 DDI_DEV_T_NONE, SD_DEVINFO(un), 5287 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 5288 INQUIRY_SERIAL_NO) == 0) { 5289 /* 5290 * If we don't already have a serial number 5291 * property, do quick verify of data returned 5292 * and define property. 5293 */ 5294 dlen = inq80_len - inq80_resid; 5295 len = (size_t)inq80[3]; 5296 if ((dlen >= 4) && ((len + 4) <= dlen)) { 5297 /* 5298 * Ensure sn termination, skip leading 5299 * blanks, and create property 5300 * 'inquiry-serial-no'. 5301 */ 5302 sn = (char *)&inq80[4]; 5303 sn[len] = 0; 5304 while (*sn && (*sn == ' ')) 5305 sn++; 5306 if (*sn) { 5307 (void) ddi_prop_update_string( 5308 DDI_DEV_T_NONE, 5309 SD_DEVINFO(un), 5310 INQUIRY_SERIAL_NO, sn); 5311 } 5312 } 5313 } 5314 mutex_enter(SD_MUTEX(un)); 5315 } 5316 5317 /* collect page 83 data if available */ 5318 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) { 5319 mutex_exit(SD_MUTEX(un)); 5320 inq83 = kmem_zalloc(inq83_len, KM_SLEEP); 5321 5322 rval = sd_send_scsi_INQUIRY(ssc, inq83, inq83_len, 5323 0x01, 0x83, &inq83_resid); 5324 5325 if (rval != 0) { 5326 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5327 kmem_free(inq83, inq83_len); 5328 inq83 = NULL; 5329 inq83_len = 0; 5330 } 5331 mutex_enter(SD_MUTEX(un)); 5332 } 5333 } 5334 5335 /* 5336 * If transport has already registered a devid for this target 5337 * then that takes precedence over the driver's determination 5338 * of the devid. 5339 * 5340 * NOTE: The reason this check is done here instead of at the beginning 5341 * of the function is to allow the code above to create the 5342 * 'inquiry-serial-no' property. 5343 */ 5344 if (ddi_devid_get(SD_DEVINFO(un), &un->un_devid) == DDI_SUCCESS) { 5345 ASSERT(un->un_devid); 5346 un->un_f_devid_transport_defined = TRUE; 5347 goto cleanup; /* use devid registered by the transport */ 5348 } 5349 5350 /* 5351 * This is the case of antiquated Sun disk drives that have the 5352 * FAB_DEVID property set in the disk_table. These drives 5353 * manage the devid's by storing them in last 2 available sectors 5354 * on the drive and have them fabricated by the ddi layer by calling 5355 * ddi_devid_init and passing the DEVID_FAB flag. 5356 */ 5357 if (un->un_f_opt_fab_devid == TRUE) { 5358 /* 5359 * Depending on EINVAL isn't reliable, since a reserved disk 5360 * may result in invalid geometry, so check to make sure a 5361 * reservation conflict did not occur during attach. 5362 */ 5363 if ((sd_get_devid(ssc) == EINVAL) && 5364 (reservation_flag != SD_TARGET_IS_RESERVED)) { 5365 /* 5366 * The devid is invalid AND there is no reservation 5367 * conflict. Fabricate a new devid. 5368 */ 5369 (void) sd_create_devid(ssc); 5370 } 5371 5372 /* Register the devid if it exists */ 5373 if (un->un_devid != NULL) { 5374 (void) ddi_devid_register(SD_DEVINFO(un), 5375 un->un_devid); 5376 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5377 "sd_register_devid: Devid Fabricated\n"); 5378 } 5379 goto cleanup; 5380 } 5381 5382 /* encode best devid possible based on data available */ 5383 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, 5384 (char *)ddi_driver_name(SD_DEVINFO(un)), 5385 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)), 5386 inq80, inq80_len - inq80_resid, inq83, inq83_len - 5387 inq83_resid, &un->un_devid) == DDI_SUCCESS) { 5388 5389 /* devid successfully encoded, register devid */ 5390 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); 5391 5392 } else { 5393 /* 5394 * Unable to encode a devid based on data available. 5395 * This is not a Sun qualified disk. Older Sun disk 5396 * drives that have the SD_FAB_DEVID property 5397 * set in the disk_table and non Sun qualified 5398 * disks are treated in the same manner. These 5399 * drives manage the devid's by storing them in 5400 * last 2 available sectors on the drive and 5401 * have them fabricated by the ddi layer by 5402 * calling ddi_devid_init and passing the 5403 * DEVID_FAB flag. 5404 * Create a fabricate devid only if there's no 5405 * fabricate devid existed. 5406 */ 5407 if (sd_get_devid(ssc) == EINVAL) { 5408 (void) sd_create_devid(ssc); 5409 } 5410 un->un_f_opt_fab_devid = TRUE; 5411 5412 /* Register the devid if it exists */ 5413 if (un->un_devid != NULL) { 5414 (void) ddi_devid_register(SD_DEVINFO(un), 5415 un->un_devid); 5416 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5417 "sd_register_devid: devid fabricated using " 5418 "ddi framework\n"); 5419 } 5420 } 5421 5422 cleanup: 5423 /* clean up resources */ 5424 if (inq80 != NULL) { 5425 kmem_free(inq80, inq80_len); 5426 } 5427 if (inq83 != NULL) { 5428 kmem_free(inq83, inq83_len); 5429 } 5430 } 5431 5432 5433 5434 /* 5435 * Function: sd_get_devid 5436 * 5437 * Description: This routine will return 0 if a valid device id has been 5438 * obtained from the target and stored in the soft state. If a 5439 * valid device id has not been previously read and stored, a 5440 * read attempt will be made. 5441 * 5442 * Arguments: un - driver soft state (unit) structure 5443 * 5444 * Return Code: 0 if we successfully get the device id 5445 * 5446 * Context: Kernel Thread 5447 */ 5448 5449 static int 5450 sd_get_devid(sd_ssc_t *ssc) 5451 { 5452 struct dk_devid *dkdevid; 5453 ddi_devid_t tmpid; 5454 uint_t *ip; 5455 size_t sz; 5456 diskaddr_t blk; 5457 int status; 5458 int chksum; 5459 int i; 5460 size_t buffer_size; 5461 struct sd_lun *un; 5462 5463 ASSERT(ssc != NULL); 5464 un = ssc->ssc_un; 5465 ASSERT(un != NULL); 5466 ASSERT(mutex_owned(SD_MUTEX(un))); 5467 5468 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n", 5469 un); 5470 5471 if (un->un_devid != NULL) { 5472 return (0); 5473 } 5474 5475 mutex_exit(SD_MUTEX(un)); 5476 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5477 (void *)SD_PATH_DIRECT) != 0) { 5478 mutex_enter(SD_MUTEX(un)); 5479 return (EINVAL); 5480 } 5481 5482 /* 5483 * Read and verify device id, stored in the reserved cylinders at the 5484 * end of the disk. Backup label is on the odd sectors of the last 5485 * track of the last cylinder. Device id will be on track of the next 5486 * to last cylinder. 5487 */ 5488 mutex_enter(SD_MUTEX(un)); 5489 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid)); 5490 mutex_exit(SD_MUTEX(un)); 5491 dkdevid = kmem_alloc(buffer_size, KM_SLEEP); 5492 status = sd_send_scsi_READ(ssc, dkdevid, buffer_size, blk, 5493 SD_PATH_DIRECT); 5494 5495 if (status != 0) { 5496 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5497 goto error; 5498 } 5499 5500 /* Validate the revision */ 5501 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 5502 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 5503 status = EINVAL; 5504 goto error; 5505 } 5506 5507 /* Calculate the checksum */ 5508 chksum = 0; 5509 ip = (uint_t *)dkdevid; 5510 for (i = 0; i < ((DEV_BSIZE - sizeof (int)) / sizeof (int)); 5511 i++) { 5512 chksum ^= ip[i]; 5513 } 5514 5515 /* Compare the checksums */ 5516 if (DKD_GETCHKSUM(dkdevid) != chksum) { 5517 status = EINVAL; 5518 goto error; 5519 } 5520 5521 /* Validate the device id */ 5522 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 5523 status = EINVAL; 5524 goto error; 5525 } 5526 5527 /* 5528 * Store the device id in the driver soft state 5529 */ 5530 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 5531 tmpid = kmem_alloc(sz, KM_SLEEP); 5532 5533 mutex_enter(SD_MUTEX(un)); 5534 5535 un->un_devid = tmpid; 5536 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 5537 5538 kmem_free(dkdevid, buffer_size); 5539 5540 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un); 5541 5542 return (status); 5543 error: 5544 mutex_enter(SD_MUTEX(un)); 5545 kmem_free(dkdevid, buffer_size); 5546 return (status); 5547 } 5548 5549 5550 /* 5551 * Function: sd_create_devid 5552 * 5553 * Description: This routine will fabricate the device id and write it 5554 * to the disk. 5555 * 5556 * Arguments: un - driver soft state (unit) structure 5557 * 5558 * Return Code: value of the fabricated device id 5559 * 5560 * Context: Kernel Thread 5561 */ 5562 5563 static ddi_devid_t 5564 sd_create_devid(sd_ssc_t *ssc) 5565 { 5566 struct sd_lun *un; 5567 5568 ASSERT(ssc != NULL); 5569 un = ssc->ssc_un; 5570 ASSERT(un != NULL); 5571 5572 /* Fabricate the devid */ 5573 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid) 5574 == DDI_FAILURE) { 5575 return (NULL); 5576 } 5577 5578 /* Write the devid to disk */ 5579 if (sd_write_deviceid(ssc) != 0) { 5580 ddi_devid_free(un->un_devid); 5581 un->un_devid = NULL; 5582 } 5583 5584 return (un->un_devid); 5585 } 5586 5587 5588 /* 5589 * Function: sd_write_deviceid 5590 * 5591 * Description: This routine will write the device id to the disk 5592 * reserved sector. 5593 * 5594 * Arguments: un - driver soft state (unit) structure 5595 * 5596 * Return Code: EINVAL 5597 * value returned by sd_send_scsi_cmd 5598 * 5599 * Context: Kernel Thread 5600 */ 5601 5602 static int 5603 sd_write_deviceid(sd_ssc_t *ssc) 5604 { 5605 struct dk_devid *dkdevid; 5606 uchar_t *buf; 5607 diskaddr_t blk; 5608 uint_t *ip, chksum; 5609 int status; 5610 int i; 5611 struct sd_lun *un; 5612 5613 ASSERT(ssc != NULL); 5614 un = ssc->ssc_un; 5615 ASSERT(un != NULL); 5616 ASSERT(mutex_owned(SD_MUTEX(un))); 5617 5618 mutex_exit(SD_MUTEX(un)); 5619 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5620 (void *)SD_PATH_DIRECT) != 0) { 5621 mutex_enter(SD_MUTEX(un)); 5622 return (-1); 5623 } 5624 5625 5626 /* Allocate the buffer */ 5627 buf = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP); 5628 dkdevid = (struct dk_devid *)buf; 5629 5630 /* Fill in the revision */ 5631 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 5632 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 5633 5634 /* Copy in the device id */ 5635 mutex_enter(SD_MUTEX(un)); 5636 bcopy(un->un_devid, &dkdevid->dkd_devid, 5637 ddi_devid_sizeof(un->un_devid)); 5638 mutex_exit(SD_MUTEX(un)); 5639 5640 /* Calculate the checksum */ 5641 chksum = 0; 5642 ip = (uint_t *)dkdevid; 5643 for (i = 0; i < ((DEV_BSIZE - sizeof (int)) / sizeof (int)); 5644 i++) { 5645 chksum ^= ip[i]; 5646 } 5647 5648 /* Fill-in checksum */ 5649 DKD_FORMCHKSUM(chksum, dkdevid); 5650 5651 /* Write the reserved sector */ 5652 status = sd_send_scsi_WRITE(ssc, buf, un->un_sys_blocksize, blk, 5653 SD_PATH_DIRECT); 5654 if (status != 0) 5655 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5656 5657 kmem_free(buf, un->un_sys_blocksize); 5658 5659 mutex_enter(SD_MUTEX(un)); 5660 return (status); 5661 } 5662 5663 5664 /* 5665 * Function: sd_check_vpd_page_support 5666 * 5667 * Description: This routine sends an inquiry command with the EVPD bit set and 5668 * a page code of 0x00 to the device. It is used to determine which 5669 * vital product pages are available to find the devid. We are 5670 * looking for pages 0x83 or 0x80. If we return a negative 1, the 5671 * device does not support that command. 5672 * 5673 * Arguments: un - driver soft state (unit) structure 5674 * 5675 * Return Code: 0 - success 5676 * 1 - check condition 5677 * 5678 * Context: This routine can sleep. 5679 */ 5680 5681 static int 5682 sd_check_vpd_page_support(sd_ssc_t *ssc) 5683 { 5684 uchar_t *page_list = NULL; 5685 uchar_t page_length = 0xff; /* Use max possible length */ 5686 uchar_t evpd = 0x01; /* Set the EVPD bit */ 5687 uchar_t page_code = 0x00; /* Supported VPD Pages */ 5688 int rval = 0; 5689 int counter; 5690 struct sd_lun *un; 5691 5692 ASSERT(ssc != NULL); 5693 un = ssc->ssc_un; 5694 ASSERT(un != NULL); 5695 ASSERT(mutex_owned(SD_MUTEX(un))); 5696 5697 mutex_exit(SD_MUTEX(un)); 5698 5699 /* 5700 * We'll set the page length to the maximum to save figuring it out 5701 * with an additional call. 5702 */ 5703 page_list = kmem_zalloc(page_length, KM_SLEEP); 5704 5705 rval = sd_send_scsi_INQUIRY(ssc, page_list, page_length, evpd, 5706 page_code, NULL); 5707 5708 if (rval != 0) 5709 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5710 5711 mutex_enter(SD_MUTEX(un)); 5712 5713 /* 5714 * Now we must validate that the device accepted the command, as some 5715 * drives do not support it. If the drive does support it, we will 5716 * return 0, and the supported pages will be in un_vpd_page_mask. If 5717 * not, we return -1. 5718 */ 5719 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) { 5720 /* Loop to find one of the 2 pages we need */ 5721 counter = 4; /* Supported pages start at byte 4, with 0x00 */ 5722 5723 /* 5724 * Pages are returned in ascending order, and 0x83 is what we 5725 * are hoping for. 5726 */ 5727 while ((page_list[counter] <= 0x86) && 5728 (counter <= (page_list[VPD_PAGE_LENGTH] + 5729 VPD_HEAD_OFFSET))) { 5730 /* 5731 * Add 3 because page_list[3] is the number of 5732 * pages minus 3 5733 */ 5734 5735 switch (page_list[counter]) { 5736 case 0x00: 5737 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG; 5738 break; 5739 case 0x80: 5740 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG; 5741 break; 5742 case 0x81: 5743 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG; 5744 break; 5745 case 0x82: 5746 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG; 5747 break; 5748 case 0x83: 5749 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG; 5750 break; 5751 case 0x86: 5752 un->un_vpd_page_mask |= SD_VPD_EXTENDED_DATA_PG; 5753 break; 5754 } 5755 counter++; 5756 } 5757 5758 } else { 5759 rval = -1; 5760 5761 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5762 "sd_check_vpd_page_support: This drive does not implement " 5763 "VPD pages.\n"); 5764 } 5765 5766 kmem_free(page_list, page_length); 5767 5768 return (rval); 5769 } 5770 5771 5772 /* 5773 * Function: sd_setup_pm 5774 * 5775 * Description: Initialize Power Management on the device 5776 * 5777 * Context: Kernel Thread 5778 */ 5779 5780 static void 5781 sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi) 5782 { 5783 uint_t log_page_size; 5784 uchar_t *log_page_data; 5785 int rval = 0; 5786 struct sd_lun *un; 5787 5788 ASSERT(ssc != NULL); 5789 un = ssc->ssc_un; 5790 ASSERT(un != NULL); 5791 5792 /* 5793 * Since we are called from attach, holding a mutex for 5794 * un is unnecessary. Because some of the routines called 5795 * from here require SD_MUTEX to not be held, assert this 5796 * right up front. 5797 */ 5798 ASSERT(!mutex_owned(SD_MUTEX(un))); 5799 /* 5800 * Since the sd device does not have the 'reg' property, 5801 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 5802 * The following code is to tell cpr that this device 5803 * DOES need to be suspended and resumed. 5804 */ 5805 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 5806 "pm-hardware-state", "needs-suspend-resume"); 5807 5808 /* 5809 * This complies with the new power management framework 5810 * for certain desktop machines. Create the pm_components 5811 * property as a string array property. 5812 * If un_f_pm_supported is TRUE, that means the disk 5813 * attached HBA has set the "pm-capable" property and 5814 * the value of this property is bigger than 0. 5815 */ 5816 if (un->un_f_pm_supported) { 5817 /* 5818 * not all devices have a motor, try it first. 5819 * some devices may return ILLEGAL REQUEST, some 5820 * will hang 5821 * The following START_STOP_UNIT is used to check if target 5822 * device has a motor. 5823 */ 5824 un->un_f_start_stop_supported = TRUE; 5825 5826 if (un->un_f_power_condition_supported) { 5827 rval = sd_send_scsi_START_STOP_UNIT(ssc, 5828 SD_POWER_CONDITION, SD_TARGET_ACTIVE, 5829 SD_PATH_DIRECT); 5830 if (rval != 0) { 5831 un->un_f_power_condition_supported = FALSE; 5832 } 5833 } 5834 if (!un->un_f_power_condition_supported) { 5835 rval = sd_send_scsi_START_STOP_UNIT(ssc, 5836 SD_START_STOP, SD_TARGET_START, SD_PATH_DIRECT); 5837 } 5838 if (rval != 0) { 5839 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5840 un->un_f_start_stop_supported = FALSE; 5841 } 5842 5843 /* 5844 * create pm properties anyways otherwise the parent can't 5845 * go to sleep 5846 */ 5847 un->un_f_pm_is_enabled = TRUE; 5848 (void) sd_create_pm_components(devi, un); 5849 5850 /* 5851 * If it claims that log sense is supported, check it out. 5852 */ 5853 if (un->un_f_log_sense_supported) { 5854 rval = sd_log_page_supported(ssc, 5855 START_STOP_CYCLE_PAGE); 5856 if (rval == 1) { 5857 /* Page found, use it. */ 5858 un->un_start_stop_cycle_page = 5859 START_STOP_CYCLE_PAGE; 5860 } else { 5861 /* 5862 * Page not found or log sense is not 5863 * supported. 5864 * Notice we do not check the old style 5865 * START_STOP_CYCLE_VU_PAGE because this 5866 * code path does not apply to old disks. 5867 */ 5868 un->un_f_log_sense_supported = FALSE; 5869 un->un_f_pm_log_sense_smart = FALSE; 5870 } 5871 } 5872 5873 return; 5874 } 5875 5876 /* 5877 * For the disk whose attached HBA has not set the "pm-capable" 5878 * property, check if it supports the power management. 5879 */ 5880 if (!un->un_f_log_sense_supported) { 5881 un->un_power_level = SD_SPINDLE_ON; 5882 un->un_f_pm_is_enabled = FALSE; 5883 return; 5884 } 5885 5886 rval = sd_log_page_supported(ssc, START_STOP_CYCLE_PAGE); 5887 5888 #ifdef SDDEBUG 5889 if (sd_force_pm_supported) { 5890 /* Force a successful result */ 5891 rval = 1; 5892 } 5893 #endif 5894 5895 /* 5896 * If the start-stop cycle counter log page is not supported 5897 * or if the pm-capable property is set to be false (0), 5898 * then we should not create the pm_components property. 5899 */ 5900 if (rval == -1) { 5901 /* 5902 * Error. 5903 * Reading log sense failed, most likely this is 5904 * an older drive that does not support log sense. 5905 * If this fails auto-pm is not supported. 5906 */ 5907 un->un_power_level = SD_SPINDLE_ON; 5908 un->un_f_pm_is_enabled = FALSE; 5909 5910 } else if (rval == 0) { 5911 /* 5912 * Page not found. 5913 * The start stop cycle counter is implemented as page 5914 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For 5915 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE). 5916 */ 5917 if (sd_log_page_supported(ssc, START_STOP_CYCLE_VU_PAGE) == 1) { 5918 /* 5919 * Page found, use this one. 5920 */ 5921 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE; 5922 un->un_f_pm_is_enabled = TRUE; 5923 } else { 5924 /* 5925 * Error or page not found. 5926 * auto-pm is not supported for this device. 5927 */ 5928 un->un_power_level = SD_SPINDLE_ON; 5929 un->un_f_pm_is_enabled = FALSE; 5930 } 5931 } else { 5932 /* 5933 * Page found, use it. 5934 */ 5935 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE; 5936 un->un_f_pm_is_enabled = TRUE; 5937 } 5938 5939 5940 if (un->un_f_pm_is_enabled == TRUE) { 5941 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5942 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5943 5944 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 5945 log_page_size, un->un_start_stop_cycle_page, 5946 0x01, 0, SD_PATH_DIRECT); 5947 5948 if (rval != 0) { 5949 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5950 } 5951 5952 #ifdef SDDEBUG 5953 if (sd_force_pm_supported) { 5954 /* Force a successful result */ 5955 rval = 0; 5956 } 5957 #endif 5958 5959 /* 5960 * If the Log sense for Page( Start/stop cycle counter page) 5961 * succeeds, then power management is supported and we can 5962 * enable auto-pm. 5963 */ 5964 if (rval == 0) { 5965 (void) sd_create_pm_components(devi, un); 5966 } else { 5967 un->un_power_level = SD_SPINDLE_ON; 5968 un->un_f_pm_is_enabled = FALSE; 5969 } 5970 5971 kmem_free(log_page_data, log_page_size); 5972 } 5973 } 5974 5975 5976 /* 5977 * Function: sd_create_pm_components 5978 * 5979 * Description: Initialize PM property. 5980 * 5981 * Context: Kernel thread context 5982 */ 5983 5984 static void 5985 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un) 5986 { 5987 ASSERT(!mutex_owned(SD_MUTEX(un))); 5988 5989 if (un->un_f_power_condition_supported) { 5990 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 5991 "pm-components", sd_pwr_pc.pm_comp, 5) 5992 != DDI_PROP_SUCCESS) { 5993 un->un_power_level = SD_SPINDLE_ACTIVE; 5994 un->un_f_pm_is_enabled = FALSE; 5995 return; 5996 } 5997 } else { 5998 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 5999 "pm-components", sd_pwr_ss.pm_comp, 3) 6000 != DDI_PROP_SUCCESS) { 6001 un->un_power_level = SD_SPINDLE_ON; 6002 un->un_f_pm_is_enabled = FALSE; 6003 return; 6004 } 6005 } 6006 /* 6007 * When components are initially created they are idle, 6008 * power up any non-removables. 6009 * Note: the return value of pm_raise_power can't be used 6010 * for determining if PM should be enabled for this device. 6011 * Even if you check the return values and remove this 6012 * property created above, the PM framework will not honor the 6013 * change after the first call to pm_raise_power. Hence, 6014 * removal of that property does not help if pm_raise_power 6015 * fails. In the case of removable media, the start/stop 6016 * will fail if the media is not present. 6017 */ 6018 if (un->un_f_attach_spinup && (pm_raise_power(SD_DEVINFO(un), 0, 6019 SD_PM_STATE_ACTIVE(un)) == DDI_SUCCESS)) { 6020 mutex_enter(SD_MUTEX(un)); 6021 un->un_power_level = SD_PM_STATE_ACTIVE(un); 6022 mutex_enter(&un->un_pm_mutex); 6023 /* Set to on and not busy. */ 6024 un->un_pm_count = 0; 6025 } else { 6026 mutex_enter(SD_MUTEX(un)); 6027 un->un_power_level = SD_PM_STATE_STOPPED(un); 6028 mutex_enter(&un->un_pm_mutex); 6029 /* Set to off. */ 6030 un->un_pm_count = -1; 6031 } 6032 mutex_exit(&un->un_pm_mutex); 6033 mutex_exit(SD_MUTEX(un)); 6034 } 6035 6036 6037 /* 6038 * Function: sd_ddi_suspend 6039 * 6040 * Description: Performs system power-down operations. This includes 6041 * setting the drive state to indicate its suspended so 6042 * that no new commands will be accepted. Also, wait for 6043 * all commands that are in transport or queued to a timer 6044 * for retry to complete. All timeout threads are cancelled. 6045 * 6046 * Return Code: DDI_FAILURE or DDI_SUCCESS 6047 * 6048 * Context: Kernel thread context 6049 */ 6050 6051 static int 6052 sd_ddi_suspend(dev_info_t *devi) 6053 { 6054 struct sd_lun *un; 6055 clock_t wait_cmds_complete; 6056 6057 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 6058 if (un == NULL) { 6059 return (DDI_FAILURE); 6060 } 6061 6062 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n"); 6063 6064 mutex_enter(SD_MUTEX(un)); 6065 6066 /* Return success if the device is already suspended. */ 6067 if (un->un_state == SD_STATE_SUSPENDED) { 6068 mutex_exit(SD_MUTEX(un)); 6069 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 6070 "device already suspended, exiting\n"); 6071 return (DDI_SUCCESS); 6072 } 6073 6074 /* Return failure if the device is being used by HA */ 6075 if (un->un_resvd_status & 6076 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) { 6077 mutex_exit(SD_MUTEX(un)); 6078 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 6079 "device in use by HA, exiting\n"); 6080 return (DDI_FAILURE); 6081 } 6082 6083 /* 6084 * Return failure if the device is in a resource wait 6085 * or power changing state. 6086 */ 6087 if ((un->un_state == SD_STATE_RWAIT) || 6088 (un->un_state == SD_STATE_PM_CHANGING)) { 6089 mutex_exit(SD_MUTEX(un)); 6090 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 6091 "device in resource wait state, exiting\n"); 6092 return (DDI_FAILURE); 6093 } 6094 6095 6096 un->un_save_state = un->un_last_state; 6097 New_state(un, SD_STATE_SUSPENDED); 6098 6099 /* 6100 * Wait for all commands that are in transport or queued to a timer 6101 * for retry to complete. 6102 * 6103 * While waiting, no new commands will be accepted or sent because of 6104 * the new state we set above. 6105 * 6106 * Wait till current operation has completed. If we are in the resource 6107 * wait state (with an intr outstanding) then we need to wait till the 6108 * intr completes and starts the next cmd. We want to wait for 6109 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND. 6110 */ 6111 wait_cmds_complete = ddi_get_lbolt() + 6112 (sd_wait_cmds_complete * drv_usectohz(1000000)); 6113 6114 while (un->un_ncmds_in_transport != 0) { 6115 /* 6116 * Fail if commands do not finish in the specified time. 6117 */ 6118 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un), 6119 wait_cmds_complete) == -1) { 6120 /* 6121 * Undo the state changes made above. Everything 6122 * must go back to it's original value. 6123 */ 6124 Restore_state(un); 6125 un->un_last_state = un->un_save_state; 6126 /* Wake up any threads that might be waiting. */ 6127 cv_broadcast(&un->un_suspend_cv); 6128 mutex_exit(SD_MUTEX(un)); 6129 SD_ERROR(SD_LOG_IO_PM, un, 6130 "sd_ddi_suspend: failed due to outstanding cmds\n"); 6131 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n"); 6132 return (DDI_FAILURE); 6133 } 6134 } 6135 6136 /* 6137 * Cancel SCSI watch thread and timeouts, if any are active 6138 */ 6139 6140 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) { 6141 opaque_t temp_token = un->un_swr_token; 6142 mutex_exit(SD_MUTEX(un)); 6143 scsi_watch_suspend(temp_token); 6144 mutex_enter(SD_MUTEX(un)); 6145 } 6146 6147 if (un->un_reset_throttle_timeid != NULL) { 6148 timeout_id_t temp_id = un->un_reset_throttle_timeid; 6149 un->un_reset_throttle_timeid = NULL; 6150 mutex_exit(SD_MUTEX(un)); 6151 (void) untimeout(temp_id); 6152 mutex_enter(SD_MUTEX(un)); 6153 } 6154 6155 if (un->un_dcvb_timeid != NULL) { 6156 timeout_id_t temp_id = un->un_dcvb_timeid; 6157 un->un_dcvb_timeid = NULL; 6158 mutex_exit(SD_MUTEX(un)); 6159 (void) untimeout(temp_id); 6160 mutex_enter(SD_MUTEX(un)); 6161 } 6162 6163 mutex_enter(&un->un_pm_mutex); 6164 if (un->un_pm_timeid != NULL) { 6165 timeout_id_t temp_id = un->un_pm_timeid; 6166 un->un_pm_timeid = NULL; 6167 mutex_exit(&un->un_pm_mutex); 6168 mutex_exit(SD_MUTEX(un)); 6169 (void) untimeout(temp_id); 6170 mutex_enter(SD_MUTEX(un)); 6171 } else { 6172 mutex_exit(&un->un_pm_mutex); 6173 } 6174 6175 if (un->un_rmw_msg_timeid != NULL) { 6176 timeout_id_t temp_id = un->un_rmw_msg_timeid; 6177 un->un_rmw_msg_timeid = NULL; 6178 mutex_exit(SD_MUTEX(un)); 6179 (void) untimeout(temp_id); 6180 mutex_enter(SD_MUTEX(un)); 6181 } 6182 6183 if (un->un_retry_timeid != NULL) { 6184 timeout_id_t temp_id = un->un_retry_timeid; 6185 un->un_retry_timeid = NULL; 6186 mutex_exit(SD_MUTEX(un)); 6187 (void) untimeout(temp_id); 6188 mutex_enter(SD_MUTEX(un)); 6189 6190 if (un->un_retry_bp != NULL) { 6191 un->un_retry_bp->av_forw = un->un_waitq_headp; 6192 un->un_waitq_headp = un->un_retry_bp; 6193 if (un->un_waitq_tailp == NULL) { 6194 un->un_waitq_tailp = un->un_retry_bp; 6195 } 6196 un->un_retry_bp = NULL; 6197 un->un_retry_statp = NULL; 6198 } 6199 } 6200 6201 if (un->un_direct_priority_timeid != NULL) { 6202 timeout_id_t temp_id = un->un_direct_priority_timeid; 6203 un->un_direct_priority_timeid = NULL; 6204 mutex_exit(SD_MUTEX(un)); 6205 (void) untimeout(temp_id); 6206 mutex_enter(SD_MUTEX(un)); 6207 } 6208 6209 if (un->un_f_is_fibre == TRUE) { 6210 /* 6211 * Remove callbacks for insert and remove events 6212 */ 6213 if (un->un_insert_event != NULL) { 6214 mutex_exit(SD_MUTEX(un)); 6215 (void) ddi_remove_event_handler(un->un_insert_cb_id); 6216 mutex_enter(SD_MUTEX(un)); 6217 un->un_insert_event = NULL; 6218 } 6219 6220 if (un->un_remove_event != NULL) { 6221 mutex_exit(SD_MUTEX(un)); 6222 (void) ddi_remove_event_handler(un->un_remove_cb_id); 6223 mutex_enter(SD_MUTEX(un)); 6224 un->un_remove_event = NULL; 6225 } 6226 } 6227 6228 mutex_exit(SD_MUTEX(un)); 6229 6230 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n"); 6231 6232 return (DDI_SUCCESS); 6233 } 6234 6235 6236 /* 6237 * Function: sd_ddi_resume 6238 * 6239 * Description: Performs system power-up operations.. 6240 * 6241 * Return Code: DDI_SUCCESS 6242 * DDI_FAILURE 6243 * 6244 * Context: Kernel thread context 6245 */ 6246 6247 static int 6248 sd_ddi_resume(dev_info_t *devi) 6249 { 6250 struct sd_lun *un; 6251 6252 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 6253 if (un == NULL) { 6254 return (DDI_FAILURE); 6255 } 6256 6257 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n"); 6258 6259 mutex_enter(SD_MUTEX(un)); 6260 Restore_state(un); 6261 6262 /* 6263 * Restore the state which was saved to give the 6264 * the right state in un_last_state 6265 */ 6266 un->un_last_state = un->un_save_state; 6267 /* 6268 * Note: throttle comes back at full. 6269 * Also note: this MUST be done before calling pm_raise_power 6270 * otherwise the system can get hung in biowait. The scenario where 6271 * this'll happen is under cpr suspend. Writing of the system 6272 * state goes through sddump, which writes 0 to un_throttle. If 6273 * writing the system state then fails, example if the partition is 6274 * too small, then cpr attempts a resume. If throttle isn't restored 6275 * from the saved value until after calling pm_raise_power then 6276 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs 6277 * in biowait. 6278 */ 6279 un->un_throttle = un->un_saved_throttle; 6280 6281 /* 6282 * The chance of failure is very rare as the only command done in power 6283 * entry point is START command when you transition from 0->1 or 6284 * unknown->1. Put it to SPINDLE ON state irrespective of the state at 6285 * which suspend was done. Ignore the return value as the resume should 6286 * not be failed. In the case of removable media the media need not be 6287 * inserted and hence there is a chance that raise power will fail with 6288 * media not present. 6289 */ 6290 if (un->un_f_attach_spinup) { 6291 mutex_exit(SD_MUTEX(un)); 6292 (void) pm_raise_power(SD_DEVINFO(un), 0, 6293 SD_PM_STATE_ACTIVE(un)); 6294 mutex_enter(SD_MUTEX(un)); 6295 } 6296 6297 /* 6298 * Don't broadcast to the suspend cv and therefore possibly 6299 * start I/O until after power has been restored. 6300 */ 6301 cv_broadcast(&un->un_suspend_cv); 6302 cv_broadcast(&un->un_state_cv); 6303 6304 /* restart thread */ 6305 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) { 6306 scsi_watch_resume(un->un_swr_token); 6307 } 6308 6309 #if (defined(__fibre)) 6310 if (un->un_f_is_fibre == TRUE) { 6311 /* 6312 * Add callbacks for insert and remove events 6313 */ 6314 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 6315 sd_init_event_callbacks(un); 6316 } 6317 } 6318 #endif 6319 6320 /* 6321 * Transport any pending commands to the target. 6322 * 6323 * If this is a low-activity device commands in queue will have to wait 6324 * until new commands come in, which may take awhile. Also, we 6325 * specifically don't check un_ncmds_in_transport because we know that 6326 * there really are no commands in progress after the unit was 6327 * suspended and we could have reached the throttle level, been 6328 * suspended, and have no new commands coming in for awhile. Highly 6329 * unlikely, but so is the low-activity disk scenario. 6330 */ 6331 ddi_xbuf_dispatch(un->un_xbuf_attr); 6332 6333 sd_start_cmds(un, NULL); 6334 mutex_exit(SD_MUTEX(un)); 6335 6336 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n"); 6337 6338 return (DDI_SUCCESS); 6339 } 6340 6341 6342 /* 6343 * Function: sd_pm_state_change 6344 * 6345 * Description: Change the driver power state. 6346 * Someone else is required to actually change the driver 6347 * power level. 6348 * 6349 * Arguments: un - driver soft state (unit) structure 6350 * level - the power level that is changed to 6351 * flag - to decide how to change the power state 6352 * 6353 * Return Code: DDI_SUCCESS 6354 * 6355 * Context: Kernel thread context 6356 */ 6357 static int 6358 sd_pm_state_change(struct sd_lun *un, int level, int flag) 6359 { 6360 ASSERT(un != NULL); 6361 SD_TRACE(SD_LOG_POWER, un, "sd_pm_state_change: entry\n"); 6362 6363 ASSERT(!mutex_owned(SD_MUTEX(un))); 6364 mutex_enter(SD_MUTEX(un)); 6365 6366 if (flag == SD_PM_STATE_ROLLBACK || SD_PM_IS_IO_CAPABLE(un, level)) { 6367 un->un_power_level = level; 6368 ASSERT(!mutex_owned(&un->un_pm_mutex)); 6369 mutex_enter(&un->un_pm_mutex); 6370 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 6371 un->un_pm_count++; 6372 ASSERT(un->un_pm_count == 0); 6373 } 6374 mutex_exit(&un->un_pm_mutex); 6375 } else { 6376 /* 6377 * Exit if power management is not enabled for this device, 6378 * or if the device is being used by HA. 6379 */ 6380 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status & 6381 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) { 6382 mutex_exit(SD_MUTEX(un)); 6383 SD_TRACE(SD_LOG_POWER, un, 6384 "sd_pm_state_change: exiting\n"); 6385 return (DDI_FAILURE); 6386 } 6387 6388 SD_INFO(SD_LOG_POWER, un, "sd_pm_state_change: " 6389 "un_ncmds_in_driver=%ld\n", un->un_ncmds_in_driver); 6390 6391 /* 6392 * See if the device is not busy, ie.: 6393 * - we have no commands in the driver for this device 6394 * - not waiting for resources 6395 */ 6396 if ((un->un_ncmds_in_driver == 0) && 6397 (un->un_state != SD_STATE_RWAIT)) { 6398 /* 6399 * The device is not busy, so it is OK to go to low 6400 * power state. Indicate low power, but rely on someone 6401 * else to actually change it. 6402 */ 6403 mutex_enter(&un->un_pm_mutex); 6404 un->un_pm_count = -1; 6405 mutex_exit(&un->un_pm_mutex); 6406 un->un_power_level = level; 6407 } 6408 } 6409 6410 mutex_exit(SD_MUTEX(un)); 6411 6412 SD_TRACE(SD_LOG_POWER, un, "sd_pm_state_change: exit\n"); 6413 6414 return (DDI_SUCCESS); 6415 } 6416 6417 6418 /* 6419 * Function: sd_pm_idletimeout_handler 6420 * 6421 * Description: A timer routine that's active only while a device is busy. 6422 * The purpose is to extend slightly the pm framework's busy 6423 * view of the device to prevent busy/idle thrashing for 6424 * back-to-back commands. Do this by comparing the current time 6425 * to the time at which the last command completed and when the 6426 * difference is greater than sd_pm_idletime, call 6427 * pm_idle_component. In addition to indicating idle to the pm 6428 * framework, update the chain type to again use the internal pm 6429 * layers of the driver. 6430 * 6431 * Arguments: arg - driver soft state (unit) structure 6432 * 6433 * Context: Executes in a timeout(9F) thread context 6434 */ 6435 6436 static void 6437 sd_pm_idletimeout_handler(void *arg) 6438 { 6439 struct sd_lun *un = arg; 6440 6441 time_t now; 6442 6443 mutex_enter(&sd_detach_mutex); 6444 if (un->un_detach_count != 0) { 6445 /* Abort if the instance is detaching */ 6446 mutex_exit(&sd_detach_mutex); 6447 return; 6448 } 6449 mutex_exit(&sd_detach_mutex); 6450 6451 now = ddi_get_time(); 6452 /* 6453 * Grab both mutexes, in the proper order, since we're accessing 6454 * both PM and softstate variables. 6455 */ 6456 mutex_enter(SD_MUTEX(un)); 6457 mutex_enter(&un->un_pm_mutex); 6458 if (((now - un->un_pm_idle_time) > sd_pm_idletime) && 6459 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) { 6460 /* 6461 * Update the chain types. 6462 * This takes affect on the next new command received. 6463 */ 6464 if (un->un_f_non_devbsize_supported) { 6465 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 6466 } else { 6467 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 6468 } 6469 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 6470 6471 SD_TRACE(SD_LOG_IO_PM, un, 6472 "sd_pm_idletimeout_handler: idling device\n"); 6473 (void) pm_idle_component(SD_DEVINFO(un), 0); 6474 un->un_pm_idle_timeid = NULL; 6475 } else { 6476 un->un_pm_idle_timeid = 6477 timeout(sd_pm_idletimeout_handler, un, 6478 (drv_usectohz((clock_t)300000))); /* 300 ms. */ 6479 } 6480 mutex_exit(&un->un_pm_mutex); 6481 mutex_exit(SD_MUTEX(un)); 6482 } 6483 6484 6485 /* 6486 * Function: sd_pm_timeout_handler 6487 * 6488 * Description: Callback to tell framework we are idle. 6489 * 6490 * Context: timeout(9f) thread context. 6491 */ 6492 6493 static void 6494 sd_pm_timeout_handler(void *arg) 6495 { 6496 struct sd_lun *un = arg; 6497 6498 (void) pm_idle_component(SD_DEVINFO(un), 0); 6499 mutex_enter(&un->un_pm_mutex); 6500 un->un_pm_timeid = NULL; 6501 mutex_exit(&un->un_pm_mutex); 6502 } 6503 6504 6505 /* 6506 * Function: sdpower 6507 * 6508 * Description: PM entry point. 6509 * 6510 * Return Code: DDI_SUCCESS 6511 * DDI_FAILURE 6512 * 6513 * Context: Kernel thread context 6514 */ 6515 6516 static int 6517 sdpower(dev_info_t *devi, int component, int level) 6518 { 6519 struct sd_lun *un; 6520 int instance; 6521 int rval = DDI_SUCCESS; 6522 uint_t i, log_page_size, maxcycles, ncycles; 6523 uchar_t *log_page_data; 6524 int log_sense_page; 6525 int medium_present; 6526 time_t intvlp; 6527 dev_t dev; 6528 struct pm_trans_data sd_pm_tran_data; 6529 uchar_t save_state; 6530 int sval; 6531 uchar_t state_before_pm; 6532 int got_semaphore_here; 6533 sd_ssc_t *ssc; 6534 int last_power_level; 6535 6536 instance = ddi_get_instance(devi); 6537 6538 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 6539 !SD_PM_IS_LEVEL_VALID(un, level) || component != 0) { 6540 return (DDI_FAILURE); 6541 } 6542 6543 dev = sd_make_device(SD_DEVINFO(un)); 6544 ssc = sd_ssc_init(un); 6545 6546 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level); 6547 6548 /* 6549 * Must synchronize power down with close. 6550 * Attempt to decrement/acquire the open/close semaphore, 6551 * but do NOT wait on it. If it's not greater than zero, 6552 * ie. it can't be decremented without waiting, then 6553 * someone else, either open or close, already has it 6554 * and the try returns 0. Use that knowledge here to determine 6555 * if it's OK to change the device power level. 6556 * Also, only increment it on exit if it was decremented, ie. gotten, 6557 * here. 6558 */ 6559 got_semaphore_here = sema_tryp(&un->un_semoclose); 6560 6561 mutex_enter(SD_MUTEX(un)); 6562 6563 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n", 6564 un->un_ncmds_in_driver); 6565 6566 /* 6567 * If un_ncmds_in_driver is non-zero it indicates commands are 6568 * already being processed in the driver, or if the semaphore was 6569 * not gotten here it indicates an open or close is being processed. 6570 * At the same time somebody is requesting to go to a lower power 6571 * that can't perform I/O, which can't happen, therefore we need to 6572 * return failure. 6573 */ 6574 if ((!SD_PM_IS_IO_CAPABLE(un, level)) && 6575 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) { 6576 mutex_exit(SD_MUTEX(un)); 6577 6578 if (got_semaphore_here != 0) { 6579 sema_v(&un->un_semoclose); 6580 } 6581 SD_TRACE(SD_LOG_IO_PM, un, 6582 "sdpower: exit, device has queued cmds.\n"); 6583 6584 goto sdpower_failed; 6585 } 6586 6587 /* 6588 * if it is OFFLINE that means the disk is completely dead 6589 * in our case we have to put the disk in on or off by sending commands 6590 * Of course that will fail anyway so return back here. 6591 * 6592 * Power changes to a device that's OFFLINE or SUSPENDED 6593 * are not allowed. 6594 */ 6595 if ((un->un_state == SD_STATE_OFFLINE) || 6596 (un->un_state == SD_STATE_SUSPENDED)) { 6597 mutex_exit(SD_MUTEX(un)); 6598 6599 if (got_semaphore_here != 0) { 6600 sema_v(&un->un_semoclose); 6601 } 6602 SD_TRACE(SD_LOG_IO_PM, un, 6603 "sdpower: exit, device is off-line.\n"); 6604 6605 goto sdpower_failed; 6606 } 6607 6608 /* 6609 * Change the device's state to indicate it's power level 6610 * is being changed. Do this to prevent a power off in the 6611 * middle of commands, which is especially bad on devices 6612 * that are really powered off instead of just spun down. 6613 */ 6614 state_before_pm = un->un_state; 6615 un->un_state = SD_STATE_PM_CHANGING; 6616 6617 mutex_exit(SD_MUTEX(un)); 6618 6619 /* 6620 * If log sense command is not supported, bypass the 6621 * following checking, otherwise, check the log sense 6622 * information for this device. 6623 */ 6624 if (SD_PM_STOP_MOTOR_NEEDED(un, level) && 6625 un->un_f_log_sense_supported) { 6626 /* 6627 * Get the log sense information to understand whether the 6628 * the powercycle counts have gone beyond the threshhold. 6629 */ 6630 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 6631 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 6632 6633 mutex_enter(SD_MUTEX(un)); 6634 log_sense_page = un->un_start_stop_cycle_page; 6635 mutex_exit(SD_MUTEX(un)); 6636 6637 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 6638 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT); 6639 6640 if (rval != 0) { 6641 if (rval == EIO) 6642 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 6643 else 6644 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6645 } 6646 6647 #ifdef SDDEBUG 6648 if (sd_force_pm_supported) { 6649 /* Force a successful result */ 6650 rval = 0; 6651 } 6652 #endif 6653 if (rval != 0) { 6654 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 6655 "Log Sense Failed\n"); 6656 6657 kmem_free(log_page_data, log_page_size); 6658 /* Cannot support power management on those drives */ 6659 6660 if (got_semaphore_here != 0) { 6661 sema_v(&un->un_semoclose); 6662 } 6663 /* 6664 * On exit put the state back to it's original value 6665 * and broadcast to anyone waiting for the power 6666 * change completion. 6667 */ 6668 mutex_enter(SD_MUTEX(un)); 6669 un->un_state = state_before_pm; 6670 cv_broadcast(&un->un_suspend_cv); 6671 mutex_exit(SD_MUTEX(un)); 6672 SD_TRACE(SD_LOG_IO_PM, un, 6673 "sdpower: exit, Log Sense Failed.\n"); 6674 6675 goto sdpower_failed; 6676 } 6677 6678 /* 6679 * From the page data - Convert the essential information to 6680 * pm_trans_data 6681 */ 6682 maxcycles = 6683 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) | 6684 (log_page_data[0x1E] << 8) | log_page_data[0x1F]; 6685 6686 ncycles = 6687 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) | 6688 (log_page_data[0x26] << 8) | log_page_data[0x27]; 6689 6690 if (un->un_f_pm_log_sense_smart) { 6691 sd_pm_tran_data.un.smart_count.allowed = maxcycles; 6692 sd_pm_tran_data.un.smart_count.consumed = ncycles; 6693 sd_pm_tran_data.un.smart_count.flag = 0; 6694 sd_pm_tran_data.format = DC_SMART_FORMAT; 6695 } else { 6696 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles; 6697 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles; 6698 for (i = 0; i < DC_SCSI_MFR_LEN; i++) { 6699 sd_pm_tran_data.un.scsi_cycles.svc_date[i] = 6700 log_page_data[8+i]; 6701 } 6702 sd_pm_tran_data.un.scsi_cycles.flag = 0; 6703 sd_pm_tran_data.format = DC_SCSI_FORMAT; 6704 } 6705 6706 kmem_free(log_page_data, log_page_size); 6707 6708 /* 6709 * Call pm_trans_check routine to get the Ok from 6710 * the global policy 6711 */ 6712 rval = pm_trans_check(&sd_pm_tran_data, &intvlp); 6713 #ifdef SDDEBUG 6714 if (sd_force_pm_supported) { 6715 /* Force a successful result */ 6716 rval = 1; 6717 } 6718 #endif 6719 switch (rval) { 6720 case 0: 6721 /* 6722 * Not Ok to Power cycle or error in parameters passed 6723 * Would have given the advised time to consider power 6724 * cycle. Based on the new intvlp parameter we are 6725 * supposed to pretend we are busy so that pm framework 6726 * will never call our power entry point. Because of 6727 * that install a timeout handler and wait for the 6728 * recommended time to elapse so that power management 6729 * can be effective again. 6730 * 6731 * To effect this behavior, call pm_busy_component to 6732 * indicate to the framework this device is busy. 6733 * By not adjusting un_pm_count the rest of PM in 6734 * the driver will function normally, and independent 6735 * of this but because the framework is told the device 6736 * is busy it won't attempt powering down until it gets 6737 * a matching idle. The timeout handler sends this. 6738 * Note: sd_pm_entry can't be called here to do this 6739 * because sdpower may have been called as a result 6740 * of a call to pm_raise_power from within sd_pm_entry. 6741 * 6742 * If a timeout handler is already active then 6743 * don't install another. 6744 */ 6745 mutex_enter(&un->un_pm_mutex); 6746 if (un->un_pm_timeid == NULL) { 6747 un->un_pm_timeid = 6748 timeout(sd_pm_timeout_handler, 6749 un, intvlp * drv_usectohz(1000000)); 6750 mutex_exit(&un->un_pm_mutex); 6751 (void) pm_busy_component(SD_DEVINFO(un), 0); 6752 } else { 6753 mutex_exit(&un->un_pm_mutex); 6754 } 6755 if (got_semaphore_here != 0) { 6756 sema_v(&un->un_semoclose); 6757 } 6758 /* 6759 * On exit put the state back to it's original value 6760 * and broadcast to anyone waiting for the power 6761 * change completion. 6762 */ 6763 mutex_enter(SD_MUTEX(un)); 6764 un->un_state = state_before_pm; 6765 cv_broadcast(&un->un_suspend_cv); 6766 mutex_exit(SD_MUTEX(un)); 6767 6768 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, " 6769 "trans check Failed, not ok to power cycle.\n"); 6770 6771 goto sdpower_failed; 6772 case -1: 6773 if (got_semaphore_here != 0) { 6774 sema_v(&un->un_semoclose); 6775 } 6776 /* 6777 * On exit put the state back to it's original value 6778 * and broadcast to anyone waiting for the power 6779 * change completion. 6780 */ 6781 mutex_enter(SD_MUTEX(un)); 6782 un->un_state = state_before_pm; 6783 cv_broadcast(&un->un_suspend_cv); 6784 mutex_exit(SD_MUTEX(un)); 6785 SD_TRACE(SD_LOG_IO_PM, un, 6786 "sdpower: exit, trans check command Failed.\n"); 6787 6788 goto sdpower_failed; 6789 } 6790 } 6791 6792 if (!SD_PM_IS_IO_CAPABLE(un, level)) { 6793 /* 6794 * Save the last state... if the STOP FAILS we need it 6795 * for restoring 6796 */ 6797 mutex_enter(SD_MUTEX(un)); 6798 save_state = un->un_last_state; 6799 last_power_level = un->un_power_level; 6800 /* 6801 * There must not be any cmds. getting processed 6802 * in the driver when we get here. Power to the 6803 * device is potentially going off. 6804 */ 6805 ASSERT(un->un_ncmds_in_driver == 0); 6806 mutex_exit(SD_MUTEX(un)); 6807 6808 /* 6809 * For now PM suspend the device completely before spindle is 6810 * turned off 6811 */ 6812 if ((rval = sd_pm_state_change(un, level, SD_PM_STATE_CHANGE)) 6813 == DDI_FAILURE) { 6814 if (got_semaphore_here != 0) { 6815 sema_v(&un->un_semoclose); 6816 } 6817 /* 6818 * On exit put the state back to it's original value 6819 * and broadcast to anyone waiting for the power 6820 * change completion. 6821 */ 6822 mutex_enter(SD_MUTEX(un)); 6823 un->un_state = state_before_pm; 6824 un->un_power_level = last_power_level; 6825 cv_broadcast(&un->un_suspend_cv); 6826 mutex_exit(SD_MUTEX(un)); 6827 SD_TRACE(SD_LOG_IO_PM, un, 6828 "sdpower: exit, PM suspend Failed.\n"); 6829 6830 goto sdpower_failed; 6831 } 6832 } 6833 6834 /* 6835 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open, 6836 * close, or strategy. Dump no long uses this routine, it uses it's 6837 * own code so it can be done in polled mode. 6838 */ 6839 6840 medium_present = TRUE; 6841 6842 /* 6843 * When powering up, issue a TUR in case the device is at unit 6844 * attention. Don't do retries. Bypass the PM layer, otherwise 6845 * a deadlock on un_pm_busy_cv will occur. 6846 */ 6847 if (SD_PM_IS_IO_CAPABLE(un, level)) { 6848 sval = sd_send_scsi_TEST_UNIT_READY(ssc, 6849 SD_DONT_RETRY_TUR | SD_BYPASS_PM); 6850 if (sval != 0) 6851 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6852 } 6853 6854 if (un->un_f_power_condition_supported) { 6855 char *pm_condition_name[] = {"STOPPED", "STANDBY", 6856 "IDLE", "ACTIVE"}; 6857 SD_TRACE(SD_LOG_IO_PM, un, 6858 "sdpower: sending \'%s\' power condition", 6859 pm_condition_name[level]); 6860 sval = sd_send_scsi_START_STOP_UNIT(ssc, SD_POWER_CONDITION, 6861 sd_pl2pc[level], SD_PATH_DIRECT); 6862 } else { 6863 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n", 6864 ((level == SD_SPINDLE_ON) ? "START" : "STOP")); 6865 sval = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 6866 ((level == SD_SPINDLE_ON) ? SD_TARGET_START : 6867 SD_TARGET_STOP), SD_PATH_DIRECT); 6868 } 6869 if (sval != 0) { 6870 if (sval == EIO) 6871 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 6872 else 6873 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6874 } 6875 6876 /* Command failed, check for media present. */ 6877 if ((sval == ENXIO) && un->un_f_has_removable_media) { 6878 medium_present = FALSE; 6879 } 6880 6881 /* 6882 * The conditions of interest here are: 6883 * if a spindle off with media present fails, 6884 * then restore the state and return an error. 6885 * else if a spindle on fails, 6886 * then return an error (there's no state to restore). 6887 * In all other cases we setup for the new state 6888 * and return success. 6889 */ 6890 if (!SD_PM_IS_IO_CAPABLE(un, level)) { 6891 if ((medium_present == TRUE) && (sval != 0)) { 6892 /* The stop command from above failed */ 6893 rval = DDI_FAILURE; 6894 /* 6895 * The stop command failed, and we have media 6896 * present. Put the level back by calling the 6897 * sd_pm_resume() and set the state back to 6898 * it's previous value. 6899 */ 6900 (void) sd_pm_state_change(un, last_power_level, 6901 SD_PM_STATE_ROLLBACK); 6902 mutex_enter(SD_MUTEX(un)); 6903 un->un_last_state = save_state; 6904 mutex_exit(SD_MUTEX(un)); 6905 } else if (un->un_f_monitor_media_state) { 6906 /* 6907 * The stop command from above succeeded. 6908 * Terminate watch thread in case of removable media 6909 * devices going into low power state. This is as per 6910 * the requirements of pm framework, otherwise commands 6911 * will be generated for the device (through watch 6912 * thread), even when the device is in low power state. 6913 */ 6914 mutex_enter(SD_MUTEX(un)); 6915 un->un_f_watcht_stopped = FALSE; 6916 if (un->un_swr_token != NULL) { 6917 opaque_t temp_token = un->un_swr_token; 6918 un->un_f_watcht_stopped = TRUE; 6919 un->un_swr_token = NULL; 6920 mutex_exit(SD_MUTEX(un)); 6921 (void) scsi_watch_request_terminate(temp_token, 6922 SCSI_WATCH_TERMINATE_ALL_WAIT); 6923 } else { 6924 mutex_exit(SD_MUTEX(un)); 6925 } 6926 } 6927 } else { 6928 /* 6929 * The level requested is I/O capable. 6930 * Legacy behavior: return success on a failed spinup 6931 * if there is no media in the drive. 6932 * Do this by looking at medium_present here. 6933 */ 6934 if ((sval != 0) && medium_present) { 6935 /* The start command from above failed */ 6936 rval = DDI_FAILURE; 6937 } else { 6938 /* 6939 * The start command from above succeeded 6940 * PM resume the devices now that we have 6941 * started the disks 6942 */ 6943 (void) sd_pm_state_change(un, level, 6944 SD_PM_STATE_CHANGE); 6945 6946 /* 6947 * Resume the watch thread since it was suspended 6948 * when the device went into low power mode. 6949 */ 6950 if (un->un_f_monitor_media_state) { 6951 mutex_enter(SD_MUTEX(un)); 6952 if (un->un_f_watcht_stopped == TRUE) { 6953 opaque_t temp_token; 6954 6955 un->un_f_watcht_stopped = FALSE; 6956 mutex_exit(SD_MUTEX(un)); 6957 temp_token = scsi_watch_request_submit( 6958 SD_SCSI_DEVP(un), 6959 sd_check_media_time, 6960 SENSE_LENGTH, sd_media_watch_cb, 6961 (caddr_t)dev); 6962 mutex_enter(SD_MUTEX(un)); 6963 un->un_swr_token = temp_token; 6964 } 6965 mutex_exit(SD_MUTEX(un)); 6966 } 6967 } 6968 } 6969 6970 if (got_semaphore_here != 0) { 6971 sema_v(&un->un_semoclose); 6972 } 6973 /* 6974 * On exit put the state back to it's original value 6975 * and broadcast to anyone waiting for the power 6976 * change completion. 6977 */ 6978 mutex_enter(SD_MUTEX(un)); 6979 un->un_state = state_before_pm; 6980 cv_broadcast(&un->un_suspend_cv); 6981 mutex_exit(SD_MUTEX(un)); 6982 6983 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval); 6984 6985 sd_ssc_fini(ssc); 6986 return (rval); 6987 6988 sdpower_failed: 6989 6990 sd_ssc_fini(ssc); 6991 return (DDI_FAILURE); 6992 } 6993 6994 6995 6996 /* 6997 * Function: sdattach 6998 * 6999 * Description: Driver's attach(9e) entry point function. 7000 * 7001 * Arguments: devi - opaque device info handle 7002 * cmd - attach type 7003 * 7004 * Return Code: DDI_SUCCESS 7005 * DDI_FAILURE 7006 * 7007 * Context: Kernel thread context 7008 */ 7009 7010 static int 7011 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 7012 { 7013 switch (cmd) { 7014 case DDI_ATTACH: 7015 return (sd_unit_attach(devi)); 7016 case DDI_RESUME: 7017 return (sd_ddi_resume(devi)); 7018 default: 7019 break; 7020 } 7021 return (DDI_FAILURE); 7022 } 7023 7024 7025 /* 7026 * Function: sddetach 7027 * 7028 * Description: Driver's detach(9E) entry point function. 7029 * 7030 * Arguments: devi - opaque device info handle 7031 * cmd - detach type 7032 * 7033 * Return Code: DDI_SUCCESS 7034 * DDI_FAILURE 7035 * 7036 * Context: Kernel thread context 7037 */ 7038 7039 static int 7040 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 7041 { 7042 switch (cmd) { 7043 case DDI_DETACH: 7044 return (sd_unit_detach(devi)); 7045 case DDI_SUSPEND: 7046 return (sd_ddi_suspend(devi)); 7047 default: 7048 break; 7049 } 7050 return (DDI_FAILURE); 7051 } 7052 7053 7054 /* 7055 * Function: sd_sync_with_callback 7056 * 7057 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft 7058 * state while the callback routine is active. 7059 * 7060 * Arguments: un: softstate structure for the instance 7061 * 7062 * Context: Kernel thread context 7063 */ 7064 7065 static void 7066 sd_sync_with_callback(struct sd_lun *un) 7067 { 7068 ASSERT(un != NULL); 7069 7070 mutex_enter(SD_MUTEX(un)); 7071 7072 ASSERT(un->un_in_callback >= 0); 7073 7074 while (un->un_in_callback > 0) { 7075 mutex_exit(SD_MUTEX(un)); 7076 delay(2); 7077 mutex_enter(SD_MUTEX(un)); 7078 } 7079 7080 mutex_exit(SD_MUTEX(un)); 7081 } 7082 7083 /* 7084 * Function: sd_unit_attach 7085 * 7086 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates 7087 * the soft state structure for the device and performs 7088 * all necessary structure and device initializations. 7089 * 7090 * Arguments: devi: the system's dev_info_t for the device. 7091 * 7092 * Return Code: DDI_SUCCESS if attach is successful. 7093 * DDI_FAILURE if any part of the attach fails. 7094 * 7095 * Context: Called at attach(9e) time for the DDI_ATTACH flag. 7096 * Kernel thread context only. Can sleep. 7097 */ 7098 7099 static int 7100 sd_unit_attach(dev_info_t *devi) 7101 { 7102 struct scsi_device *devp; 7103 struct sd_lun *un; 7104 char *variantp; 7105 char name_str[48]; 7106 int reservation_flag = SD_TARGET_IS_UNRESERVED; 7107 int instance; 7108 int rval; 7109 int wc_enabled; 7110 int tgt; 7111 uint64_t capacity; 7112 uint_t lbasize = 0; 7113 dev_info_t *pdip = ddi_get_parent(devi); 7114 int offbyone = 0; 7115 int geom_label_valid = 0; 7116 sd_ssc_t *ssc; 7117 int status; 7118 struct sd_fm_internal *sfip = NULL; 7119 int max_xfer_size; 7120 7121 /* 7122 * Retrieve the target driver's private data area. This was set 7123 * up by the HBA. 7124 */ 7125 devp = ddi_get_driver_private(devi); 7126 7127 /* 7128 * Retrieve the target ID of the device. 7129 */ 7130 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7131 SCSI_ADDR_PROP_TARGET, -1); 7132 7133 /* 7134 * Since we have no idea what state things were left in by the last 7135 * user of the device, set up some 'default' settings, ie. turn 'em 7136 * off. The scsi_ifsetcap calls force re-negotiations with the drive. 7137 * Do this before the scsi_probe, which sends an inquiry. 7138 * This is a fix for bug (4430280). 7139 * Of special importance is wide-xfer. The drive could have been left 7140 * in wide transfer mode by the last driver to communicate with it, 7141 * this includes us. If that's the case, and if the following is not 7142 * setup properly or we don't re-negotiate with the drive prior to 7143 * transferring data to/from the drive, it causes bus parity errors, 7144 * data overruns, and unexpected interrupts. This first occurred when 7145 * the fix for bug (4378686) was made. 7146 */ 7147 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1); 7148 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1); 7149 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1); 7150 7151 /* 7152 * Currently, scsi_ifsetcap sets tagged-qing capability for all LUNs 7153 * on a target. Setting it per lun instance actually sets the 7154 * capability of this target, which affects those luns already 7155 * attached on the same target. So during attach, we can only disable 7156 * this capability only when no other lun has been attached on this 7157 * target. By doing this, we assume a target has the same tagged-qing 7158 * capability for every lun. The condition can be removed when HBA 7159 * is changed to support per lun based tagged-qing capability. 7160 */ 7161 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 7162 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1); 7163 } 7164 7165 /* 7166 * Use scsi_probe() to issue an INQUIRY command to the device. 7167 * This call will allocate and fill in the scsi_inquiry structure 7168 * and point the sd_inq member of the scsi_device structure to it. 7169 * If the attach succeeds, then this memory will not be de-allocated 7170 * (via scsi_unprobe()) until the instance is detached. 7171 */ 7172 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 7173 goto probe_failed; 7174 } 7175 7176 /* 7177 * Check the device type as specified in the inquiry data and 7178 * claim it if it is of a type that we support. 7179 */ 7180 switch (devp->sd_inq->inq_dtype) { 7181 case DTYPE_DIRECT: 7182 break; 7183 case DTYPE_RODIRECT: 7184 break; 7185 case DTYPE_OPTICAL: 7186 break; 7187 case DTYPE_NOTPRESENT: 7188 default: 7189 /* Unsupported device type; fail the attach. */ 7190 goto probe_failed; 7191 } 7192 7193 /* 7194 * Allocate the soft state structure for this unit. 7195 * 7196 * We rely upon this memory being set to all zeroes by 7197 * ddi_soft_state_zalloc(). We assume that any member of the 7198 * soft state structure that is not explicitly initialized by 7199 * this routine will have a value of zero. 7200 */ 7201 instance = ddi_get_instance(devp->sd_dev); 7202 #ifndef XPV_HVM_DRIVER 7203 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) { 7204 goto probe_failed; 7205 } 7206 #endif /* !XPV_HVM_DRIVER */ 7207 7208 /* 7209 * Retrieve a pointer to the newly-allocated soft state. 7210 * 7211 * This should NEVER fail if the ddi_soft_state_zalloc() call above 7212 * was successful, unless something has gone horribly wrong and the 7213 * ddi's soft state internals are corrupt (in which case it is 7214 * probably better to halt here than just fail the attach....) 7215 */ 7216 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 7217 panic("sd_unit_attach: NULL soft state on instance:0x%x", 7218 instance); 7219 /*NOTREACHED*/ 7220 } 7221 7222 /* 7223 * Link the back ptr of the driver soft state to the scsi_device 7224 * struct for this lun. 7225 * Save a pointer to the softstate in the driver-private area of 7226 * the scsi_device struct. 7227 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until 7228 * we first set un->un_sd below. 7229 */ 7230 un->un_sd = devp; 7231 devp->sd_private = (opaque_t)un; 7232 7233 /* 7234 * The following must be after devp is stored in the soft state struct. 7235 */ 7236 #ifdef SDDEBUG 7237 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7238 "%s_unit_attach: un:0x%p instance:%d\n", 7239 ddi_driver_name(devi), un, instance); 7240 #endif 7241 7242 /* 7243 * Set up the device type and node type (for the minor nodes). 7244 * By default we assume that the device can at least support the 7245 * Common Command Set. Call it a CD-ROM if it reports itself 7246 * as a RODIRECT device. 7247 */ 7248 switch (devp->sd_inq->inq_dtype) { 7249 case DTYPE_RODIRECT: 7250 un->un_node_type = DDI_NT_CD_CHAN; 7251 un->un_ctype = CTYPE_CDROM; 7252 break; 7253 case DTYPE_OPTICAL: 7254 un->un_node_type = DDI_NT_BLOCK_CHAN; 7255 un->un_ctype = CTYPE_ROD; 7256 break; 7257 default: 7258 un->un_node_type = DDI_NT_BLOCK_CHAN; 7259 un->un_ctype = CTYPE_CCS; 7260 break; 7261 } 7262 7263 /* 7264 * Try to read the interconnect type from the HBA. 7265 * 7266 * Note: This driver is currently compiled as two binaries, a parallel 7267 * scsi version (sd) and a fibre channel version (ssd). All functional 7268 * differences are determined at compile time. In the future a single 7269 * binary will be provided and the interconnect type will be used to 7270 * differentiate between fibre and parallel scsi behaviors. At that time 7271 * it will be necessary for all fibre channel HBAs to support this 7272 * property. 7273 * 7274 * set un_f_is_fiber to TRUE ( default fiber ) 7275 */ 7276 un->un_f_is_fibre = TRUE; 7277 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { 7278 case INTERCONNECT_SSA: 7279 un->un_interconnect_type = SD_INTERCONNECT_SSA; 7280 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7281 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un); 7282 break; 7283 case INTERCONNECT_PARALLEL: 7284 un->un_f_is_fibre = FALSE; 7285 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7286 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7287 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); 7288 break; 7289 case INTERCONNECT_SAS: 7290 un->un_f_is_fibre = FALSE; 7291 un->un_interconnect_type = SD_INTERCONNECT_SAS; 7292 un->un_node_type = DDI_NT_BLOCK_SAS; 7293 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7294 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SAS\n", un); 7295 break; 7296 case INTERCONNECT_SATA: 7297 un->un_f_is_fibre = FALSE; 7298 un->un_interconnect_type = SD_INTERCONNECT_SATA; 7299 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7300 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un); 7301 break; 7302 case INTERCONNECT_FIBRE: 7303 un->un_interconnect_type = SD_INTERCONNECT_FIBRE; 7304 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7305 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); 7306 break; 7307 case INTERCONNECT_FABRIC: 7308 un->un_interconnect_type = SD_INTERCONNECT_FABRIC; 7309 un->un_node_type = DDI_NT_BLOCK_FABRIC; 7310 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7311 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); 7312 break; 7313 default: 7314 #ifdef SD_DEFAULT_INTERCONNECT_TYPE 7315 /* 7316 * The HBA does not support the "interconnect-type" property 7317 * (or did not provide a recognized type). 7318 * 7319 * Note: This will be obsoleted when a single fibre channel 7320 * and parallel scsi driver is delivered. In the meantime the 7321 * interconnect type will be set to the platform default.If that 7322 * type is not parallel SCSI, it means that we should be 7323 * assuming "ssd" semantics. However, here this also means that 7324 * the FC HBA is not supporting the "interconnect-type" property 7325 * like we expect it to, so log this occurrence. 7326 */ 7327 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE; 7328 if (!SD_IS_PARALLEL_SCSI(un)) { 7329 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7330 "sd_unit_attach: un:0x%p Assuming " 7331 "INTERCONNECT_FIBRE\n", un); 7332 } else { 7333 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7334 "sd_unit_attach: un:0x%p Assuming " 7335 "INTERCONNECT_PARALLEL\n", un); 7336 un->un_f_is_fibre = FALSE; 7337 } 7338 #else 7339 /* 7340 * Note: This source will be implemented when a single fibre 7341 * channel and parallel scsi driver is delivered. The default 7342 * will be to assume that if a device does not support the 7343 * "interconnect-type" property it is a parallel SCSI HBA and 7344 * we will set the interconnect type for parallel scsi. 7345 */ 7346 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7347 un->un_f_is_fibre = FALSE; 7348 #endif 7349 break; 7350 } 7351 7352 if (un->un_f_is_fibre == TRUE) { 7353 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) == 7354 SCSI_VERSION_3) { 7355 switch (un->un_interconnect_type) { 7356 case SD_INTERCONNECT_FIBRE: 7357 case SD_INTERCONNECT_SSA: 7358 un->un_node_type = DDI_NT_BLOCK_WWN; 7359 break; 7360 default: 7361 break; 7362 } 7363 } 7364 } 7365 7366 /* 7367 * Initialize the Request Sense command for the target 7368 */ 7369 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { 7370 goto alloc_rqs_failed; 7371 } 7372 7373 /* 7374 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc 7375 * with separate binary for sd and ssd. 7376 * 7377 * x86 has 1 binary, un_retry_count is set base on connection type. 7378 * The hardcoded values will go away when Sparc uses 1 binary 7379 * for sd and ssd. This hardcoded values need to match 7380 * SD_RETRY_COUNT in sddef.h 7381 * The value used is base on interconnect type. 7382 * fibre = 3, parallel = 5 7383 */ 7384 #if defined(__i386) || defined(__amd64) 7385 un->un_retry_count = un->un_f_is_fibre ? 3 : 5; 7386 #else 7387 un->un_retry_count = SD_RETRY_COUNT; 7388 #endif 7389 7390 /* 7391 * Set the per disk retry count to the default number of retries 7392 * for disks and CDROMs. This value can be overridden by the 7393 * disk property list or an entry in sd.conf. 7394 */ 7395 un->un_notready_retry_count = 7396 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un) 7397 : DISK_NOT_READY_RETRY_COUNT(un); 7398 7399 /* 7400 * Set the busy retry count to the default value of un_retry_count. 7401 * This can be overridden by entries in sd.conf or the device 7402 * config table. 7403 */ 7404 un->un_busy_retry_count = un->un_retry_count; 7405 7406 /* 7407 * Init the reset threshold for retries. This number determines 7408 * how many retries must be performed before a reset can be issued 7409 * (for certain error conditions). This can be overridden by entries 7410 * in sd.conf or the device config table. 7411 */ 7412 un->un_reset_retry_count = (un->un_retry_count / 2); 7413 7414 /* 7415 * Set the victim_retry_count to the default un_retry_count 7416 */ 7417 un->un_victim_retry_count = (2 * un->un_retry_count); 7418 7419 /* 7420 * Set the reservation release timeout to the default value of 7421 * 5 seconds. This can be overridden by entries in ssd.conf or the 7422 * device config table. 7423 */ 7424 un->un_reserve_release_time = 5; 7425 7426 /* 7427 * Set up the default maximum transfer size. Note that this may 7428 * get updated later in the attach, when setting up default wide 7429 * operations for disks. 7430 */ 7431 #if defined(__i386) || defined(__amd64) 7432 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; 7433 un->un_partial_dma_supported = 1; 7434 #else 7435 un->un_max_xfer_size = (uint_t)maxphys; 7436 #endif 7437 7438 /* 7439 * Get "allow bus device reset" property (defaults to "enabled" if 7440 * the property was not defined). This is to disable bus resets for 7441 * certain kinds of error recovery. Note: In the future when a run-time 7442 * fibre check is available the soft state flag should default to 7443 * enabled. 7444 */ 7445 if (un->un_f_is_fibre == TRUE) { 7446 un->un_f_allow_bus_device_reset = TRUE; 7447 } else { 7448 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7449 "allow-bus-device-reset", 1) != 0) { 7450 un->un_f_allow_bus_device_reset = TRUE; 7451 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7452 "sd_unit_attach: un:0x%p Bus device reset " 7453 "enabled\n", un); 7454 } else { 7455 un->un_f_allow_bus_device_reset = FALSE; 7456 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7457 "sd_unit_attach: un:0x%p Bus device reset " 7458 "disabled\n", un); 7459 } 7460 } 7461 7462 /* 7463 * Check if this is an ATAPI device. ATAPI devices use Group 1 7464 * Read/Write commands and Group 2 Mode Sense/Select commands. 7465 * 7466 * Note: The "obsolete" way of doing this is to check for the "atapi" 7467 * property. The new "variant" property with a value of "atapi" has been 7468 * introduced so that future 'variants' of standard SCSI behavior (like 7469 * atapi) could be specified by the underlying HBA drivers by supplying 7470 * a new value for the "variant" property, instead of having to define a 7471 * new property. 7472 */ 7473 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { 7474 un->un_f_cfg_is_atapi = TRUE; 7475 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7476 "sd_unit_attach: un:0x%p Atapi device\n", un); 7477 } 7478 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", 7479 &variantp) == DDI_PROP_SUCCESS) { 7480 if (strcmp(variantp, "atapi") == 0) { 7481 un->un_f_cfg_is_atapi = TRUE; 7482 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7483 "sd_unit_attach: un:0x%p Atapi device\n", un); 7484 } 7485 ddi_prop_free(variantp); 7486 } 7487 7488 un->un_cmd_timeout = SD_IO_TIME; 7489 7490 un->un_busy_timeout = SD_BSY_TIMEOUT; 7491 7492 /* Info on current states, statuses, etc. (Updated frequently) */ 7493 un->un_state = SD_STATE_NORMAL; 7494 un->un_last_state = SD_STATE_NORMAL; 7495 7496 /* Control & status info for command throttling */ 7497 un->un_throttle = sd_max_throttle; 7498 un->un_saved_throttle = sd_max_throttle; 7499 un->un_min_throttle = sd_min_throttle; 7500 7501 if (un->un_f_is_fibre == TRUE) { 7502 un->un_f_use_adaptive_throttle = TRUE; 7503 } else { 7504 un->un_f_use_adaptive_throttle = FALSE; 7505 } 7506 7507 /* Removable media support. */ 7508 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 7509 un->un_mediastate = DKIO_NONE; 7510 un->un_specified_mediastate = DKIO_NONE; 7511 7512 /* CVs for suspend/resume (PM or DR) */ 7513 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 7514 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 7515 7516 /* Power management support. */ 7517 un->un_power_level = SD_SPINDLE_UNINIT; 7518 7519 cv_init(&un->un_wcc_cv, NULL, CV_DRIVER, NULL); 7520 un->un_f_wcc_inprog = 0; 7521 7522 /* 7523 * The open/close semaphore is used to serialize threads executing 7524 * in the driver's open & close entry point routines for a given 7525 * instance. 7526 */ 7527 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); 7528 7529 /* 7530 * The conf file entry and softstate variable is a forceful override, 7531 * meaning a non-zero value must be entered to change the default. 7532 */ 7533 un->un_f_disksort_disabled = FALSE; 7534 un->un_f_rmw_type = SD_RMW_TYPE_DEFAULT; 7535 7536 /* 7537 * Retrieve the properties from the static driver table or the driver 7538 * configuration file (.conf) for this unit and update the soft state 7539 * for the device as needed for the indicated properties. 7540 * Note: the property configuration needs to occur here as some of the 7541 * following routines may have dependencies on soft state flags set 7542 * as part of the driver property configuration. 7543 */ 7544 sd_read_unit_properties(un); 7545 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7546 "sd_unit_attach: un:0x%p property configuration complete.\n", un); 7547 7548 /* 7549 * Only if a device has "hotpluggable" property, it is 7550 * treated as hotpluggable device. Otherwise, it is 7551 * regarded as non-hotpluggable one. 7552 */ 7553 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "hotpluggable", 7554 -1) != -1) { 7555 un->un_f_is_hotpluggable = TRUE; 7556 } 7557 7558 /* 7559 * set unit's attributes(flags) according to "hotpluggable" and 7560 * RMB bit in INQUIRY data. 7561 */ 7562 sd_set_unit_attributes(un, devi); 7563 7564 /* 7565 * By default, we mark the capacity, lbasize, and geometry 7566 * as invalid. Only if we successfully read a valid capacity 7567 * will we update the un_blockcount and un_tgt_blocksize with the 7568 * valid values (the geometry will be validated later). 7569 */ 7570 un->un_f_blockcount_is_valid = FALSE; 7571 un->un_f_tgt_blocksize_is_valid = FALSE; 7572 7573 /* 7574 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine 7575 * otherwise. 7576 */ 7577 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE; 7578 un->un_blockcount = 0; 7579 7580 /* 7581 * Set up the per-instance info needed to determine the correct 7582 * CDBs and other info for issuing commands to the target. 7583 */ 7584 sd_init_cdb_limits(un); 7585 7586 /* 7587 * Set up the IO chains to use, based upon the target type. 7588 */ 7589 if (un->un_f_non_devbsize_supported) { 7590 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 7591 } else { 7592 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 7593 } 7594 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 7595 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD; 7596 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD; 7597 7598 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf), 7599 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit, 7600 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER); 7601 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi); 7602 7603 7604 if (ISCD(un)) { 7605 un->un_additional_codes = sd_additional_codes; 7606 } else { 7607 un->un_additional_codes = NULL; 7608 } 7609 7610 /* 7611 * Create the kstats here so they can be available for attach-time 7612 * routines that send commands to the unit (either polled or via 7613 * sd_send_scsi_cmd). 7614 * 7615 * Note: This is a critical sequence that needs to be maintained: 7616 * 1) Instantiate the kstats here, before any routines using the 7617 * iopath (i.e. sd_send_scsi_cmd). 7618 * 2) Instantiate and initialize the partition stats 7619 * (sd_set_pstats). 7620 * 3) Initialize the error stats (sd_set_errstats), following 7621 * sd_validate_geometry(),sd_register_devid(), 7622 * and sd_cache_control(). 7623 */ 7624 7625 un->un_stats = kstat_create(sd_label, instance, 7626 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 7627 if (un->un_stats != NULL) { 7628 un->un_stats->ks_lock = SD_MUTEX(un); 7629 kstat_install(un->un_stats); 7630 } 7631 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7632 "sd_unit_attach: un:0x%p un_stats created\n", un); 7633 7634 sd_create_errstats(un, instance); 7635 if (un->un_errstats == NULL) { 7636 goto create_errstats_failed; 7637 } 7638 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7639 "sd_unit_attach: un:0x%p errstats created\n", un); 7640 7641 /* 7642 * The following if/else code was relocated here from below as part 7643 * of the fix for bug (4430280). However with the default setup added 7644 * on entry to this routine, it's no longer absolutely necessary for 7645 * this to be before the call to sd_spin_up_unit. 7646 */ 7647 if (SD_IS_PARALLEL_SCSI(un) || SD_IS_SERIAL(un)) { 7648 int tq_trigger_flag = (((devp->sd_inq->inq_ansi == 4) || 7649 (devp->sd_inq->inq_ansi == 5)) && 7650 devp->sd_inq->inq_bque) || devp->sd_inq->inq_cmdque; 7651 7652 /* 7653 * If tagged queueing is supported by the target 7654 * and by the host adapter then we will enable it 7655 */ 7656 un->un_tagflags = 0; 7657 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && tq_trigger_flag && 7658 (un->un_f_arq_enabled == TRUE)) { 7659 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 7660 1, 1) == 1) { 7661 un->un_tagflags = FLAG_STAG; 7662 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7663 "sd_unit_attach: un:0x%p tag queueing " 7664 "enabled\n", un); 7665 } else if (scsi_ifgetcap(SD_ADDRESS(un), 7666 "untagged-qing", 0) == 1) { 7667 un->un_f_opt_queueing = TRUE; 7668 un->un_saved_throttle = un->un_throttle = 7669 min(un->un_throttle, 3); 7670 } else { 7671 un->un_f_opt_queueing = FALSE; 7672 un->un_saved_throttle = un->un_throttle = 1; 7673 } 7674 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) 7675 == 1) && (un->un_f_arq_enabled == TRUE)) { 7676 /* The Host Adapter supports internal queueing. */ 7677 un->un_f_opt_queueing = TRUE; 7678 un->un_saved_throttle = un->un_throttle = 7679 min(un->un_throttle, 3); 7680 } else { 7681 un->un_f_opt_queueing = FALSE; 7682 un->un_saved_throttle = un->un_throttle = 1; 7683 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7684 "sd_unit_attach: un:0x%p no tag queueing\n", un); 7685 } 7686 7687 /* 7688 * Enable large transfers for SATA/SAS drives 7689 */ 7690 if (SD_IS_SERIAL(un)) { 7691 un->un_max_xfer_size = 7692 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7693 sd_max_xfer_size, SD_MAX_XFER_SIZE); 7694 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7695 "sd_unit_attach: un:0x%p max transfer " 7696 "size=0x%x\n", un, un->un_max_xfer_size); 7697 7698 } 7699 7700 /* Setup or tear down default wide operations for disks */ 7701 7702 /* 7703 * Note: Legacy: it may be possible for both "sd_max_xfer_size" 7704 * and "ssd_max_xfer_size" to exist simultaneously on the same 7705 * system and be set to different values. In the future this 7706 * code may need to be updated when the ssd module is 7707 * obsoleted and removed from the system. (4299588) 7708 */ 7709 if (SD_IS_PARALLEL_SCSI(un) && 7710 (devp->sd_inq->inq_rdf == RDF_SCSI2) && 7711 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { 7712 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7713 1, 1) == 1) { 7714 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7715 "sd_unit_attach: un:0x%p Wide Transfer " 7716 "enabled\n", un); 7717 } 7718 7719 /* 7720 * If tagged queuing has also been enabled, then 7721 * enable large xfers 7722 */ 7723 if (un->un_saved_throttle == sd_max_throttle) { 7724 un->un_max_xfer_size = 7725 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7726 sd_max_xfer_size, SD_MAX_XFER_SIZE); 7727 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7728 "sd_unit_attach: un:0x%p max transfer " 7729 "size=0x%x\n", un, un->un_max_xfer_size); 7730 } 7731 } else { 7732 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7733 0, 1) == 1) { 7734 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7735 "sd_unit_attach: un:0x%p " 7736 "Wide Transfer disabled\n", un); 7737 } 7738 } 7739 } else { 7740 un->un_tagflags = FLAG_STAG; 7741 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, 7742 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE); 7743 } 7744 7745 /* 7746 * If this target supports LUN reset, try to enable it. 7747 */ 7748 if (un->un_f_lun_reset_enabled) { 7749 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { 7750 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7751 "un:0x%p lun_reset capability set\n", un); 7752 } else { 7753 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7754 "un:0x%p lun-reset capability not set\n", un); 7755 } 7756 } 7757 7758 /* 7759 * Adjust the maximum transfer size. This is to fix 7760 * the problem of partial DMA support on SPARC. Some 7761 * HBA driver, like aac, has very small dma_attr_maxxfer 7762 * size, which requires partial DMA support on SPARC. 7763 * In the future the SPARC pci nexus driver may solve 7764 * the problem instead of this fix. 7765 */ 7766 max_xfer_size = scsi_ifgetcap(SD_ADDRESS(un), "dma-max", 1); 7767 if ((max_xfer_size > 0) && (max_xfer_size < un->un_max_xfer_size)) { 7768 /* We need DMA partial even on sparc to ensure sddump() works */ 7769 un->un_max_xfer_size = max_xfer_size; 7770 if (un->un_partial_dma_supported == 0) 7771 un->un_partial_dma_supported = 1; 7772 } 7773 if (ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 7774 DDI_PROP_DONTPASS, "buf_break", 0) == 1) { 7775 if (ddi_xbuf_attr_setup_brk(un->un_xbuf_attr, 7776 un->un_max_xfer_size) == 1) { 7777 un->un_buf_breakup_supported = 1; 7778 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7779 "un:0x%p Buf breakup enabled\n", un); 7780 } 7781 } 7782 7783 /* 7784 * Set PKT_DMA_PARTIAL flag. 7785 */ 7786 if (un->un_partial_dma_supported == 1) { 7787 un->un_pkt_flags = PKT_DMA_PARTIAL; 7788 } else { 7789 un->un_pkt_flags = 0; 7790 } 7791 7792 /* Initialize sd_ssc_t for internal uscsi commands */ 7793 ssc = sd_ssc_init(un); 7794 scsi_fm_init(devp); 7795 7796 /* 7797 * Allocate memory for SCSI FMA stuffs. 7798 */ 7799 un->un_fm_private = 7800 kmem_zalloc(sizeof (struct sd_fm_internal), KM_SLEEP); 7801 sfip = (struct sd_fm_internal *)un->un_fm_private; 7802 sfip->fm_ssc.ssc_uscsi_cmd = &sfip->fm_ucmd; 7803 sfip->fm_ssc.ssc_uscsi_info = &sfip->fm_uinfo; 7804 sfip->fm_ssc.ssc_un = un; 7805 7806 if (ISCD(un) || 7807 un->un_f_has_removable_media || 7808 devp->sd_fm_capable == DDI_FM_NOT_CAPABLE) { 7809 /* 7810 * We don't touch CDROM or the DDI_FM_NOT_CAPABLE device. 7811 * Their log are unchanged. 7812 */ 7813 sfip->fm_log_level = SD_FM_LOG_NSUP; 7814 } else { 7815 /* 7816 * If enter here, it should be non-CDROM and FM-capable 7817 * device, and it will not keep the old scsi_log as before 7818 * in /var/adm/messages. However, the property 7819 * "fm-scsi-log" will control whether the FM telemetry will 7820 * be logged in /var/adm/messages. 7821 */ 7822 int fm_scsi_log; 7823 fm_scsi_log = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 7824 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "fm-scsi-log", 0); 7825 7826 if (fm_scsi_log) 7827 sfip->fm_log_level = SD_FM_LOG_EREPORT; 7828 else 7829 sfip->fm_log_level = SD_FM_LOG_SILENT; 7830 } 7831 7832 /* 7833 * At this point in the attach, we have enough info in the 7834 * soft state to be able to issue commands to the target. 7835 * 7836 * All command paths used below MUST issue their commands as 7837 * SD_PATH_DIRECT. This is important as intermediate layers 7838 * are not all initialized yet (such as PM). 7839 */ 7840 7841 /* 7842 * Send a TEST UNIT READY command to the device. This should clear 7843 * any outstanding UNIT ATTENTION that may be present. 7844 * 7845 * Note: Don't check for success, just track if there is a reservation, 7846 * this is a throw away command to clear any unit attentions. 7847 * 7848 * Note: This MUST be the first command issued to the target during 7849 * attach to ensure power on UNIT ATTENTIONS are cleared. 7850 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated 7851 * with attempts at spinning up a device with no media. 7852 */ 7853 status = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR); 7854 if (status != 0) { 7855 if (status == EACCES) 7856 reservation_flag = SD_TARGET_IS_RESERVED; 7857 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7858 } 7859 7860 /* 7861 * If the device is NOT a removable media device, attempt to spin 7862 * it up (using the START_STOP_UNIT command) and read its capacity 7863 * (using the READ CAPACITY command). Note, however, that either 7864 * of these could fail and in some cases we would continue with 7865 * the attach despite the failure (see below). 7866 */ 7867 if (un->un_f_descr_format_supported) { 7868 7869 switch (sd_spin_up_unit(ssc)) { 7870 case 0: 7871 /* 7872 * Spin-up was successful; now try to read the 7873 * capacity. If successful then save the results 7874 * and mark the capacity & lbasize as valid. 7875 */ 7876 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7877 "sd_unit_attach: un:0x%p spin-up successful\n", un); 7878 7879 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity, 7880 &lbasize, SD_PATH_DIRECT); 7881 7882 switch (status) { 7883 case 0: { 7884 if (capacity > DK_MAX_BLOCKS) { 7885 #ifdef _LP64 7886 if ((capacity + 1) > 7887 SD_GROUP1_MAX_ADDRESS) { 7888 /* 7889 * Enable descriptor format 7890 * sense data so that we can 7891 * get 64 bit sense data 7892 * fields. 7893 */ 7894 sd_enable_descr_sense(ssc); 7895 } 7896 #else 7897 /* 32-bit kernels can't handle this */ 7898 scsi_log(SD_DEVINFO(un), 7899 sd_label, CE_WARN, 7900 "disk has %llu blocks, which " 7901 "is too large for a 32-bit " 7902 "kernel", capacity); 7903 7904 #if defined(__i386) || defined(__amd64) 7905 /* 7906 * 1TB disk was treated as (1T - 512)B 7907 * in the past, so that it might have 7908 * valid VTOC and solaris partitions, 7909 * we have to allow it to continue to 7910 * work. 7911 */ 7912 if (capacity -1 > DK_MAX_BLOCKS) 7913 #endif 7914 goto spinup_failed; 7915 #endif 7916 } 7917 7918 /* 7919 * Here it's not necessary to check the case: 7920 * the capacity of the device is bigger than 7921 * what the max hba cdb can support. Because 7922 * sd_send_scsi_READ_CAPACITY will retrieve 7923 * the capacity by sending USCSI command, which 7924 * is constrained by the max hba cdb. Actually, 7925 * sd_send_scsi_READ_CAPACITY will return 7926 * EINVAL when using bigger cdb than required 7927 * cdb length. Will handle this case in 7928 * "case EINVAL". 7929 */ 7930 7931 /* 7932 * The following relies on 7933 * sd_send_scsi_READ_CAPACITY never 7934 * returning 0 for capacity and/or lbasize. 7935 */ 7936 sd_update_block_info(un, lbasize, capacity); 7937 7938 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7939 "sd_unit_attach: un:0x%p capacity = %ld " 7940 "blocks; lbasize= %ld.\n", un, 7941 un->un_blockcount, un->un_tgt_blocksize); 7942 7943 break; 7944 } 7945 case EINVAL: 7946 /* 7947 * In the case where the max-cdb-length property 7948 * is smaller than the required CDB length for 7949 * a SCSI device, a target driver can fail to 7950 * attach to that device. 7951 */ 7952 scsi_log(SD_DEVINFO(un), 7953 sd_label, CE_WARN, 7954 "disk capacity is too large " 7955 "for current cdb length"); 7956 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7957 7958 goto spinup_failed; 7959 case EACCES: 7960 /* 7961 * Should never get here if the spin-up 7962 * succeeded, but code it in anyway. 7963 * From here, just continue with the attach... 7964 */ 7965 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7966 "sd_unit_attach: un:0x%p " 7967 "sd_send_scsi_READ_CAPACITY " 7968 "returned reservation conflict\n", un); 7969 reservation_flag = SD_TARGET_IS_RESERVED; 7970 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7971 break; 7972 default: 7973 /* 7974 * Likewise, should never get here if the 7975 * spin-up succeeded. Just continue with 7976 * the attach... 7977 */ 7978 if (status == EIO) 7979 sd_ssc_assessment(ssc, 7980 SD_FMT_STATUS_CHECK); 7981 else 7982 sd_ssc_assessment(ssc, 7983 SD_FMT_IGNORE); 7984 break; 7985 } 7986 break; 7987 case EACCES: 7988 /* 7989 * Device is reserved by another host. In this case 7990 * we could not spin it up or read the capacity, but 7991 * we continue with the attach anyway. 7992 */ 7993 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7994 "sd_unit_attach: un:0x%p spin-up reservation " 7995 "conflict.\n", un); 7996 reservation_flag = SD_TARGET_IS_RESERVED; 7997 break; 7998 default: 7999 /* Fail the attach if the spin-up failed. */ 8000 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8001 "sd_unit_attach: un:0x%p spin-up failed.", un); 8002 goto spinup_failed; 8003 } 8004 8005 } 8006 8007 /* 8008 * Check to see if this is a MMC drive 8009 */ 8010 if (ISCD(un)) { 8011 sd_set_mmc_caps(ssc); 8012 } 8013 8014 8015 /* 8016 * Add a zero-length attribute to tell the world we support 8017 * kernel ioctls (for layered drivers) 8018 */ 8019 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 8020 DDI_KERNEL_IOCTL, NULL, 0); 8021 8022 /* 8023 * Add a boolean property to tell the world we support 8024 * the B_FAILFAST flag (for layered drivers) 8025 */ 8026 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 8027 "ddi-failfast-supported", NULL, 0); 8028 8029 /* 8030 * Initialize power management 8031 */ 8032 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL); 8033 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL); 8034 sd_setup_pm(ssc, devi); 8035 if (un->un_f_pm_is_enabled == FALSE) { 8036 /* 8037 * For performance, point to a jump table that does 8038 * not include pm. 8039 * The direct and priority chains don't change with PM. 8040 * 8041 * Note: this is currently done based on individual device 8042 * capabilities. When an interface for determining system 8043 * power enabled state becomes available, or when additional 8044 * layers are added to the command chain, these values will 8045 * have to be re-evaluated for correctness. 8046 */ 8047 if (un->un_f_non_devbsize_supported) { 8048 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM; 8049 } else { 8050 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM; 8051 } 8052 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 8053 } 8054 8055 /* 8056 * This property is set to 0 by HA software to avoid retries 8057 * on a reserved disk. (The preferred property name is 8058 * "retry-on-reservation-conflict") (1189689) 8059 * 8060 * Note: The use of a global here can have unintended consequences. A 8061 * per instance variable is preferable to match the capabilities of 8062 * different underlying hba's (4402600) 8063 */ 8064 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, 8065 DDI_PROP_DONTPASS, "retry-on-reservation-conflict", 8066 sd_retry_on_reservation_conflict); 8067 if (sd_retry_on_reservation_conflict != 0) { 8068 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, 8069 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name, 8070 sd_retry_on_reservation_conflict); 8071 } 8072 8073 /* Set up options for QFULL handling. */ 8074 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 8075 "qfull-retries", -1)) != -1) { 8076 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries", 8077 rval, 1); 8078 } 8079 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 8080 "qfull-retry-interval", -1)) != -1) { 8081 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval", 8082 rval, 1); 8083 } 8084 8085 /* 8086 * This just prints a message that announces the existence of the 8087 * device. The message is always printed in the system logfile, but 8088 * only appears on the console if the system is booted with the 8089 * -v (verbose) argument. 8090 */ 8091 ddi_report_dev(devi); 8092 8093 un->un_mediastate = DKIO_NONE; 8094 8095 cmlb_alloc_handle(&un->un_cmlbhandle); 8096 8097 #if defined(__i386) || defined(__amd64) 8098 /* 8099 * On x86, compensate for off-by-1 legacy error 8100 */ 8101 if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable && 8102 (lbasize == un->un_sys_blocksize)) 8103 offbyone = CMLB_OFF_BY_ONE; 8104 #endif 8105 8106 if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype, 8107 VOID2BOOLEAN(un->un_f_has_removable_media != 0), 8108 VOID2BOOLEAN(un->un_f_is_hotpluggable != 0), 8109 un->un_node_type, offbyone, un->un_cmlbhandle, 8110 (void *)SD_PATH_DIRECT) != 0) { 8111 goto cmlb_attach_failed; 8112 } 8113 8114 8115 /* 8116 * Read and validate the device's geometry (ie, disk label) 8117 * A new unformatted drive will not have a valid geometry, but 8118 * the driver needs to successfully attach to this device so 8119 * the drive can be formatted via ioctls. 8120 */ 8121 geom_label_valid = (cmlb_validate(un->un_cmlbhandle, 0, 8122 (void *)SD_PATH_DIRECT) == 0) ? 1: 0; 8123 8124 mutex_enter(SD_MUTEX(un)); 8125 8126 /* 8127 * Read and initialize the devid for the unit. 8128 */ 8129 if (un->un_f_devid_supported) { 8130 sd_register_devid(ssc, devi, reservation_flag); 8131 } 8132 mutex_exit(SD_MUTEX(un)); 8133 8134 #if (defined(__fibre)) 8135 /* 8136 * Register callbacks for fibre only. You can't do this solely 8137 * on the basis of the devid_type because this is hba specific. 8138 * We need to query our hba capabilities to find out whether to 8139 * register or not. 8140 */ 8141 if (un->un_f_is_fibre) { 8142 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 8143 sd_init_event_callbacks(un); 8144 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8145 "sd_unit_attach: un:0x%p event callbacks inserted", 8146 un); 8147 } 8148 } 8149 #endif 8150 8151 if (un->un_f_opt_disable_cache == TRUE) { 8152 /* 8153 * Disable both read cache and write cache. This is 8154 * the historic behavior of the keywords in the config file. 8155 */ 8156 if (sd_cache_control(ssc, SD_CACHE_DISABLE, SD_CACHE_DISABLE) != 8157 0) { 8158 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8159 "sd_unit_attach: un:0x%p Could not disable " 8160 "caching", un); 8161 goto devid_failed; 8162 } 8163 } 8164 8165 /* 8166 * Check the value of the WCE bit now and 8167 * set un_f_write_cache_enabled accordingly. 8168 */ 8169 (void) sd_get_write_cache_enabled(ssc, &wc_enabled); 8170 mutex_enter(SD_MUTEX(un)); 8171 un->un_f_write_cache_enabled = (wc_enabled != 0); 8172 mutex_exit(SD_MUTEX(un)); 8173 8174 if (un->un_f_rmw_type != SD_RMW_TYPE_RETURN_ERROR && 8175 un->un_tgt_blocksize != DEV_BSIZE) { 8176 if (!(un->un_wm_cache)) { 8177 (void) snprintf(name_str, sizeof (name_str), 8178 "%s%d_cache", 8179 ddi_driver_name(SD_DEVINFO(un)), 8180 ddi_get_instance(SD_DEVINFO(un))); 8181 un->un_wm_cache = kmem_cache_create( 8182 name_str, sizeof (struct sd_w_map), 8183 8, sd_wm_cache_constructor, 8184 sd_wm_cache_destructor, NULL, 8185 (void *)un, NULL, 0); 8186 if (!(un->un_wm_cache)) { 8187 goto wm_cache_failed; 8188 } 8189 } 8190 } 8191 8192 /* 8193 * Check the value of the NV_SUP bit and set 8194 * un_f_suppress_cache_flush accordingly. 8195 */ 8196 sd_get_nv_sup(ssc); 8197 8198 /* 8199 * Find out what type of reservation this disk supports. 8200 */ 8201 status = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS, 0, NULL); 8202 8203 switch (status) { 8204 case 0: 8205 /* 8206 * SCSI-3 reservations are supported. 8207 */ 8208 un->un_reservation_type = SD_SCSI3_RESERVATION; 8209 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8210 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un); 8211 break; 8212 case ENOTSUP: 8213 /* 8214 * The PERSISTENT RESERVE IN command would not be recognized by 8215 * a SCSI-2 device, so assume the reservation type is SCSI-2. 8216 */ 8217 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8218 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un); 8219 un->un_reservation_type = SD_SCSI2_RESERVATION; 8220 8221 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 8222 break; 8223 default: 8224 /* 8225 * default to SCSI-3 reservations 8226 */ 8227 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8228 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un); 8229 un->un_reservation_type = SD_SCSI3_RESERVATION; 8230 8231 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 8232 break; 8233 } 8234 8235 /* 8236 * Set the pstat and error stat values here, so data obtained during the 8237 * previous attach-time routines is available. 8238 * 8239 * Note: This is a critical sequence that needs to be maintained: 8240 * 1) Instantiate the kstats before any routines using the iopath 8241 * (i.e. sd_send_scsi_cmd). 8242 * 2) Initialize the error stats (sd_set_errstats) and partition 8243 * stats (sd_set_pstats)here, following 8244 * cmlb_validate_geometry(), sd_register_devid(), and 8245 * sd_cache_control(). 8246 */ 8247 8248 if (un->un_f_pkstats_enabled && geom_label_valid) { 8249 sd_set_pstats(un); 8250 SD_TRACE(SD_LOG_IO_PARTITION, un, 8251 "sd_unit_attach: un:0x%p pstats created and set\n", un); 8252 } 8253 8254 sd_set_errstats(un); 8255 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8256 "sd_unit_attach: un:0x%p errstats set\n", un); 8257 8258 8259 /* 8260 * After successfully attaching an instance, we record the information 8261 * of how many luns have been attached on the relative target and 8262 * controller for parallel SCSI. This information is used when sd tries 8263 * to set the tagged queuing capability in HBA. 8264 */ 8265 if (SD_IS_PARALLEL_SCSI(un) && (tgt >= 0) && (tgt < NTARGETS_WIDE)) { 8266 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_ATTACH); 8267 } 8268 8269 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8270 "sd_unit_attach: un:0x%p exit success\n", un); 8271 8272 /* Uninitialize sd_ssc_t pointer */ 8273 sd_ssc_fini(ssc); 8274 8275 return (DDI_SUCCESS); 8276 8277 /* 8278 * An error occurred during the attach; clean up & return failure. 8279 */ 8280 wm_cache_failed: 8281 devid_failed: 8282 8283 setup_pm_failed: 8284 ddi_remove_minor_node(devi, NULL); 8285 8286 cmlb_attach_failed: 8287 /* 8288 * Cleanup from the scsi_ifsetcap() calls (437868) 8289 */ 8290 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 8291 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 8292 8293 /* 8294 * Refer to the comments of setting tagged-qing in the beginning of 8295 * sd_unit_attach. We can only disable tagged queuing when there is 8296 * no lun attached on the target. 8297 */ 8298 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 8299 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 8300 } 8301 8302 if (un->un_f_is_fibre == FALSE) { 8303 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 8304 } 8305 8306 spinup_failed: 8307 8308 /* Uninitialize sd_ssc_t pointer */ 8309 sd_ssc_fini(ssc); 8310 8311 mutex_enter(SD_MUTEX(un)); 8312 8313 /* Deallocate SCSI FMA memory spaces */ 8314 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal)); 8315 8316 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ 8317 if (un->un_direct_priority_timeid != NULL) { 8318 timeout_id_t temp_id = un->un_direct_priority_timeid; 8319 un->un_direct_priority_timeid = NULL; 8320 mutex_exit(SD_MUTEX(un)); 8321 (void) untimeout(temp_id); 8322 mutex_enter(SD_MUTEX(un)); 8323 } 8324 8325 /* Cancel any pending start/stop timeouts */ 8326 if (un->un_startstop_timeid != NULL) { 8327 timeout_id_t temp_id = un->un_startstop_timeid; 8328 un->un_startstop_timeid = NULL; 8329 mutex_exit(SD_MUTEX(un)); 8330 (void) untimeout(temp_id); 8331 mutex_enter(SD_MUTEX(un)); 8332 } 8333 8334 /* Cancel any pending reset-throttle timeouts */ 8335 if (un->un_reset_throttle_timeid != NULL) { 8336 timeout_id_t temp_id = un->un_reset_throttle_timeid; 8337 un->un_reset_throttle_timeid = NULL; 8338 mutex_exit(SD_MUTEX(un)); 8339 (void) untimeout(temp_id); 8340 mutex_enter(SD_MUTEX(un)); 8341 } 8342 8343 /* Cancel rmw warning message timeouts */ 8344 if (un->un_rmw_msg_timeid != NULL) { 8345 timeout_id_t temp_id = un->un_rmw_msg_timeid; 8346 un->un_rmw_msg_timeid = NULL; 8347 mutex_exit(SD_MUTEX(un)); 8348 (void) untimeout(temp_id); 8349 mutex_enter(SD_MUTEX(un)); 8350 } 8351 8352 /* Cancel any pending retry timeouts */ 8353 if (un->un_retry_timeid != NULL) { 8354 timeout_id_t temp_id = un->un_retry_timeid; 8355 un->un_retry_timeid = NULL; 8356 mutex_exit(SD_MUTEX(un)); 8357 (void) untimeout(temp_id); 8358 mutex_enter(SD_MUTEX(un)); 8359 } 8360 8361 /* Cancel any pending delayed cv broadcast timeouts */ 8362 if (un->un_dcvb_timeid != NULL) { 8363 timeout_id_t temp_id = un->un_dcvb_timeid; 8364 un->un_dcvb_timeid = NULL; 8365 mutex_exit(SD_MUTEX(un)); 8366 (void) untimeout(temp_id); 8367 mutex_enter(SD_MUTEX(un)); 8368 } 8369 8370 mutex_exit(SD_MUTEX(un)); 8371 8372 /* There should not be any in-progress I/O so ASSERT this check */ 8373 ASSERT(un->un_ncmds_in_transport == 0); 8374 ASSERT(un->un_ncmds_in_driver == 0); 8375 8376 /* Do not free the softstate if the callback routine is active */ 8377 sd_sync_with_callback(un); 8378 8379 /* 8380 * Partition stats apparently are not used with removables. These would 8381 * not have been created during attach, so no need to clean them up... 8382 */ 8383 if (un->un_errstats != NULL) { 8384 kstat_delete(un->un_errstats); 8385 un->un_errstats = NULL; 8386 } 8387 8388 create_errstats_failed: 8389 8390 if (un->un_stats != NULL) { 8391 kstat_delete(un->un_stats); 8392 un->un_stats = NULL; 8393 } 8394 8395 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8396 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8397 8398 ddi_prop_remove_all(devi); 8399 sema_destroy(&un->un_semoclose); 8400 cv_destroy(&un->un_state_cv); 8401 8402 getrbuf_failed: 8403 8404 sd_free_rqs(un); 8405 8406 alloc_rqs_failed: 8407 8408 devp->sd_private = NULL; 8409 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ 8410 8411 get_softstate_failed: 8412 /* 8413 * Note: the man pages are unclear as to whether or not doing a 8414 * ddi_soft_state_free(sd_state, instance) is the right way to 8415 * clean up after the ddi_soft_state_zalloc() if the subsequent 8416 * ddi_get_soft_state() fails. The implication seems to be 8417 * that the get_soft_state cannot fail if the zalloc succeeds. 8418 */ 8419 #ifndef XPV_HVM_DRIVER 8420 ddi_soft_state_free(sd_state, instance); 8421 #endif /* !XPV_HVM_DRIVER */ 8422 8423 probe_failed: 8424 scsi_unprobe(devp); 8425 8426 return (DDI_FAILURE); 8427 } 8428 8429 8430 /* 8431 * Function: sd_unit_detach 8432 * 8433 * Description: Performs DDI_DETACH processing for sddetach(). 8434 * 8435 * Return Code: DDI_SUCCESS 8436 * DDI_FAILURE 8437 * 8438 * Context: Kernel thread context 8439 */ 8440 8441 static int 8442 sd_unit_detach(dev_info_t *devi) 8443 { 8444 struct scsi_device *devp; 8445 struct sd_lun *un; 8446 int i; 8447 int tgt; 8448 dev_t dev; 8449 dev_info_t *pdip = ddi_get_parent(devi); 8450 #ifndef XPV_HVM_DRIVER 8451 int instance = ddi_get_instance(devi); 8452 #endif /* !XPV_HVM_DRIVER */ 8453 8454 mutex_enter(&sd_detach_mutex); 8455 8456 /* 8457 * Fail the detach for any of the following: 8458 * - Unable to get the sd_lun struct for the instance 8459 * - A layered driver has an outstanding open on the instance 8460 * - Another thread is already detaching this instance 8461 * - Another thread is currently performing an open 8462 */ 8463 devp = ddi_get_driver_private(devi); 8464 if ((devp == NULL) || 8465 ((un = (struct sd_lun *)devp->sd_private) == NULL) || 8466 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) || 8467 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) { 8468 mutex_exit(&sd_detach_mutex); 8469 return (DDI_FAILURE); 8470 } 8471 8472 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un); 8473 8474 /* 8475 * Mark this instance as currently in a detach, to inhibit any 8476 * opens from a layered driver. 8477 */ 8478 un->un_detach_count++; 8479 mutex_exit(&sd_detach_mutex); 8480 8481 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 8482 SCSI_ADDR_PROP_TARGET, -1); 8483 8484 dev = sd_make_device(SD_DEVINFO(un)); 8485 8486 #ifndef lint 8487 _NOTE(COMPETING_THREADS_NOW); 8488 #endif 8489 8490 mutex_enter(SD_MUTEX(un)); 8491 8492 /* 8493 * Fail the detach if there are any outstanding layered 8494 * opens on this device. 8495 */ 8496 for (i = 0; i < NDKMAP; i++) { 8497 if (un->un_ocmap.lyropen[i] != 0) { 8498 goto err_notclosed; 8499 } 8500 } 8501 8502 /* 8503 * Verify there are NO outstanding commands issued to this device. 8504 * ie, un_ncmds_in_transport == 0. 8505 * It's possible to have outstanding commands through the physio 8506 * code path, even though everything's closed. 8507 */ 8508 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) || 8509 (un->un_direct_priority_timeid != NULL) || 8510 (un->un_state == SD_STATE_RWAIT)) { 8511 mutex_exit(SD_MUTEX(un)); 8512 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8513 "sd_dr_detach: Detach failure due to outstanding cmds\n"); 8514 goto err_stillbusy; 8515 } 8516 8517 /* 8518 * If we have the device reserved, release the reservation. 8519 */ 8520 if ((un->un_resvd_status & SD_RESERVE) && 8521 !(un->un_resvd_status & SD_LOST_RESERVE)) { 8522 mutex_exit(SD_MUTEX(un)); 8523 /* 8524 * Note: sd_reserve_release sends a command to the device 8525 * via the sd_ioctlcmd() path, and can sleep. 8526 */ 8527 if (sd_reserve_release(dev, SD_RELEASE) != 0) { 8528 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8529 "sd_dr_detach: Cannot release reservation \n"); 8530 } 8531 } else { 8532 mutex_exit(SD_MUTEX(un)); 8533 } 8534 8535 /* 8536 * Untimeout any reserve recover, throttle reset, restart unit 8537 * and delayed broadcast timeout threads. Protect the timeout pointer 8538 * from getting nulled by their callback functions. 8539 */ 8540 mutex_enter(SD_MUTEX(un)); 8541 if (un->un_resvd_timeid != NULL) { 8542 timeout_id_t temp_id = un->un_resvd_timeid; 8543 un->un_resvd_timeid = NULL; 8544 mutex_exit(SD_MUTEX(un)); 8545 (void) untimeout(temp_id); 8546 mutex_enter(SD_MUTEX(un)); 8547 } 8548 8549 if (un->un_reset_throttle_timeid != NULL) { 8550 timeout_id_t temp_id = un->un_reset_throttle_timeid; 8551 un->un_reset_throttle_timeid = NULL; 8552 mutex_exit(SD_MUTEX(un)); 8553 (void) untimeout(temp_id); 8554 mutex_enter(SD_MUTEX(un)); 8555 } 8556 8557 if (un->un_startstop_timeid != NULL) { 8558 timeout_id_t temp_id = un->un_startstop_timeid; 8559 un->un_startstop_timeid = NULL; 8560 mutex_exit(SD_MUTEX(un)); 8561 (void) untimeout(temp_id); 8562 mutex_enter(SD_MUTEX(un)); 8563 } 8564 8565 if (un->un_rmw_msg_timeid != NULL) { 8566 timeout_id_t temp_id = un->un_rmw_msg_timeid; 8567 un->un_rmw_msg_timeid = NULL; 8568 mutex_exit(SD_MUTEX(un)); 8569 (void) untimeout(temp_id); 8570 mutex_enter(SD_MUTEX(un)); 8571 } 8572 8573 if (un->un_dcvb_timeid != NULL) { 8574 timeout_id_t temp_id = un->un_dcvb_timeid; 8575 un->un_dcvb_timeid = NULL; 8576 mutex_exit(SD_MUTEX(un)); 8577 (void) untimeout(temp_id); 8578 } else { 8579 mutex_exit(SD_MUTEX(un)); 8580 } 8581 8582 /* Remove any pending reservation reclaim requests for this device */ 8583 sd_rmv_resv_reclaim_req(dev); 8584 8585 mutex_enter(SD_MUTEX(un)); 8586 8587 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */ 8588 if (un->un_direct_priority_timeid != NULL) { 8589 timeout_id_t temp_id = un->un_direct_priority_timeid; 8590 un->un_direct_priority_timeid = NULL; 8591 mutex_exit(SD_MUTEX(un)); 8592 (void) untimeout(temp_id); 8593 mutex_enter(SD_MUTEX(un)); 8594 } 8595 8596 /* Cancel any active multi-host disk watch thread requests */ 8597 if (un->un_mhd_token != NULL) { 8598 mutex_exit(SD_MUTEX(un)); 8599 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token)); 8600 if (scsi_watch_request_terminate(un->un_mhd_token, 8601 SCSI_WATCH_TERMINATE_NOWAIT)) { 8602 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8603 "sd_dr_detach: Cannot cancel mhd watch request\n"); 8604 /* 8605 * Note: We are returning here after having removed 8606 * some driver timeouts above. This is consistent with 8607 * the legacy implementation but perhaps the watch 8608 * terminate call should be made with the wait flag set. 8609 */ 8610 goto err_stillbusy; 8611 } 8612 mutex_enter(SD_MUTEX(un)); 8613 un->un_mhd_token = NULL; 8614 } 8615 8616 if (un->un_swr_token != NULL) { 8617 mutex_exit(SD_MUTEX(un)); 8618 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token)); 8619 if (scsi_watch_request_terminate(un->un_swr_token, 8620 SCSI_WATCH_TERMINATE_NOWAIT)) { 8621 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8622 "sd_dr_detach: Cannot cancel swr watch request\n"); 8623 /* 8624 * Note: We are returning here after having removed 8625 * some driver timeouts above. This is consistent with 8626 * the legacy implementation but perhaps the watch 8627 * terminate call should be made with the wait flag set. 8628 */ 8629 goto err_stillbusy; 8630 } 8631 mutex_enter(SD_MUTEX(un)); 8632 un->un_swr_token = NULL; 8633 } 8634 8635 mutex_exit(SD_MUTEX(un)); 8636 8637 /* 8638 * Clear any scsi_reset_notifies. We clear the reset notifies 8639 * if we have not registered one. 8640 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! 8641 */ 8642 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 8643 sd_mhd_reset_notify_cb, (caddr_t)un); 8644 8645 /* 8646 * protect the timeout pointers from getting nulled by 8647 * their callback functions during the cancellation process. 8648 * In such a scenario untimeout can be invoked with a null value. 8649 */ 8650 _NOTE(NO_COMPETING_THREADS_NOW); 8651 8652 mutex_enter(&un->un_pm_mutex); 8653 if (un->un_pm_idle_timeid != NULL) { 8654 timeout_id_t temp_id = un->un_pm_idle_timeid; 8655 un->un_pm_idle_timeid = NULL; 8656 mutex_exit(&un->un_pm_mutex); 8657 8658 /* 8659 * Timeout is active; cancel it. 8660 * Note that it'll never be active on a device 8661 * that does not support PM therefore we don't 8662 * have to check before calling pm_idle_component. 8663 */ 8664 (void) untimeout(temp_id); 8665 (void) pm_idle_component(SD_DEVINFO(un), 0); 8666 mutex_enter(&un->un_pm_mutex); 8667 } 8668 8669 /* 8670 * Check whether there is already a timeout scheduled for power 8671 * management. If yes then don't lower the power here, that's. 8672 * the timeout handler's job. 8673 */ 8674 if (un->un_pm_timeid != NULL) { 8675 timeout_id_t temp_id = un->un_pm_timeid; 8676 un->un_pm_timeid = NULL; 8677 mutex_exit(&un->un_pm_mutex); 8678 /* 8679 * Timeout is active; cancel it. 8680 * Note that it'll never be active on a device 8681 * that does not support PM therefore we don't 8682 * have to check before calling pm_idle_component. 8683 */ 8684 (void) untimeout(temp_id); 8685 (void) pm_idle_component(SD_DEVINFO(un), 0); 8686 8687 } else { 8688 mutex_exit(&un->un_pm_mutex); 8689 if ((un->un_f_pm_is_enabled == TRUE) && 8690 (pm_lower_power(SD_DEVINFO(un), 0, SD_PM_STATE_STOPPED(un)) 8691 != DDI_SUCCESS)) { 8692 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8693 "sd_dr_detach: Lower power request failed, ignoring.\n"); 8694 /* 8695 * Fix for bug: 4297749, item # 13 8696 * The above test now includes a check to see if PM is 8697 * supported by this device before call 8698 * pm_lower_power(). 8699 * Note, the following is not dead code. The call to 8700 * pm_lower_power above will generate a call back into 8701 * our sdpower routine which might result in a timeout 8702 * handler getting activated. Therefore the following 8703 * code is valid and necessary. 8704 */ 8705 mutex_enter(&un->un_pm_mutex); 8706 if (un->un_pm_timeid != NULL) { 8707 timeout_id_t temp_id = un->un_pm_timeid; 8708 un->un_pm_timeid = NULL; 8709 mutex_exit(&un->un_pm_mutex); 8710 (void) untimeout(temp_id); 8711 (void) pm_idle_component(SD_DEVINFO(un), 0); 8712 } else { 8713 mutex_exit(&un->un_pm_mutex); 8714 } 8715 } 8716 } 8717 8718 /* 8719 * Cleanup from the scsi_ifsetcap() calls (437868) 8720 * Relocated here from above to be after the call to 8721 * pm_lower_power, which was getting errors. 8722 */ 8723 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 8724 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 8725 8726 /* 8727 * Currently, tagged queuing is supported per target based by HBA. 8728 * Setting this per lun instance actually sets the capability of this 8729 * target in HBA, which affects those luns already attached on the 8730 * same target. So during detach, we can only disable this capability 8731 * only when this is the only lun left on this target. By doing 8732 * this, we assume a target has the same tagged queuing capability 8733 * for every lun. The condition can be removed when HBA is changed to 8734 * support per lun based tagged queuing capability. 8735 */ 8736 if (sd_scsi_get_target_lun_count(pdip, tgt) <= 1) { 8737 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 8738 } 8739 8740 if (un->un_f_is_fibre == FALSE) { 8741 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 8742 } 8743 8744 /* 8745 * Remove any event callbacks, fibre only 8746 */ 8747 if (un->un_f_is_fibre == TRUE) { 8748 if ((un->un_insert_event != NULL) && 8749 (ddi_remove_event_handler(un->un_insert_cb_id) != 8750 DDI_SUCCESS)) { 8751 /* 8752 * Note: We are returning here after having done 8753 * substantial cleanup above. This is consistent 8754 * with the legacy implementation but this may not 8755 * be the right thing to do. 8756 */ 8757 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8758 "sd_dr_detach: Cannot cancel insert event\n"); 8759 goto err_remove_event; 8760 } 8761 un->un_insert_event = NULL; 8762 8763 if ((un->un_remove_event != NULL) && 8764 (ddi_remove_event_handler(un->un_remove_cb_id) != 8765 DDI_SUCCESS)) { 8766 /* 8767 * Note: We are returning here after having done 8768 * substantial cleanup above. This is consistent 8769 * with the legacy implementation but this may not 8770 * be the right thing to do. 8771 */ 8772 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8773 "sd_dr_detach: Cannot cancel remove event\n"); 8774 goto err_remove_event; 8775 } 8776 un->un_remove_event = NULL; 8777 } 8778 8779 /* Do not free the softstate if the callback routine is active */ 8780 sd_sync_with_callback(un); 8781 8782 cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 8783 cmlb_free_handle(&un->un_cmlbhandle); 8784 8785 /* 8786 * Hold the detach mutex here, to make sure that no other threads ever 8787 * can access a (partially) freed soft state structure. 8788 */ 8789 mutex_enter(&sd_detach_mutex); 8790 8791 /* 8792 * Clean up the soft state struct. 8793 * Cleanup is done in reverse order of allocs/inits. 8794 * At this point there should be no competing threads anymore. 8795 */ 8796 8797 scsi_fm_fini(devp); 8798 8799 /* 8800 * Deallocate memory for SCSI FMA. 8801 */ 8802 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal)); 8803 8804 /* 8805 * Unregister and free device id if it was not registered 8806 * by the transport. 8807 */ 8808 if (un->un_f_devid_transport_defined == FALSE) 8809 ddi_devid_unregister(devi); 8810 8811 /* 8812 * free the devid structure if allocated before (by ddi_devid_init() 8813 * or ddi_devid_get()). 8814 */ 8815 if (un->un_devid) { 8816 ddi_devid_free(un->un_devid); 8817 un->un_devid = NULL; 8818 } 8819 8820 /* 8821 * Destroy wmap cache if it exists. 8822 */ 8823 if (un->un_wm_cache != NULL) { 8824 kmem_cache_destroy(un->un_wm_cache); 8825 un->un_wm_cache = NULL; 8826 } 8827 8828 /* 8829 * kstat cleanup is done in detach for all device types (4363169). 8830 * We do not want to fail detach if the device kstats are not deleted 8831 * since there is a confusion about the devo_refcnt for the device. 8832 * We just delete the kstats and let detach complete successfully. 8833 */ 8834 if (un->un_stats != NULL) { 8835 kstat_delete(un->un_stats); 8836 un->un_stats = NULL; 8837 } 8838 if (un->un_errstats != NULL) { 8839 kstat_delete(un->un_errstats); 8840 un->un_errstats = NULL; 8841 } 8842 8843 /* Remove partition stats */ 8844 if (un->un_f_pkstats_enabled) { 8845 for (i = 0; i < NSDMAP; i++) { 8846 if (un->un_pstats[i] != NULL) { 8847 kstat_delete(un->un_pstats[i]); 8848 un->un_pstats[i] = NULL; 8849 } 8850 } 8851 } 8852 8853 /* Remove xbuf registration */ 8854 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8855 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8856 8857 /* Remove driver properties */ 8858 ddi_prop_remove_all(devi); 8859 8860 mutex_destroy(&un->un_pm_mutex); 8861 cv_destroy(&un->un_pm_busy_cv); 8862 8863 cv_destroy(&un->un_wcc_cv); 8864 8865 /* Open/close semaphore */ 8866 sema_destroy(&un->un_semoclose); 8867 8868 /* Removable media condvar. */ 8869 cv_destroy(&un->un_state_cv); 8870 8871 /* Suspend/resume condvar. */ 8872 cv_destroy(&un->un_suspend_cv); 8873 cv_destroy(&un->un_disk_busy_cv); 8874 8875 sd_free_rqs(un); 8876 8877 /* Free up soft state */ 8878 devp->sd_private = NULL; 8879 8880 bzero(un, sizeof (struct sd_lun)); 8881 #ifndef XPV_HVM_DRIVER 8882 ddi_soft_state_free(sd_state, instance); 8883 #endif /* !XPV_HVM_DRIVER */ 8884 8885 mutex_exit(&sd_detach_mutex); 8886 8887 /* This frees up the INQUIRY data associated with the device. */ 8888 scsi_unprobe(devp); 8889 8890 /* 8891 * After successfully detaching an instance, we update the information 8892 * of how many luns have been attached in the relative target and 8893 * controller for parallel SCSI. This information is used when sd tries 8894 * to set the tagged queuing capability in HBA. 8895 * Since un has been released, we can't use SD_IS_PARALLEL_SCSI(un) to 8896 * check if the device is parallel SCSI. However, we don't need to 8897 * check here because we've already checked during attach. No device 8898 * that is not parallel SCSI is in the chain. 8899 */ 8900 if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) { 8901 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH); 8902 } 8903 8904 return (DDI_SUCCESS); 8905 8906 err_notclosed: 8907 mutex_exit(SD_MUTEX(un)); 8908 8909 err_stillbusy: 8910 _NOTE(NO_COMPETING_THREADS_NOW); 8911 8912 err_remove_event: 8913 mutex_enter(&sd_detach_mutex); 8914 un->un_detach_count--; 8915 mutex_exit(&sd_detach_mutex); 8916 8917 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n"); 8918 return (DDI_FAILURE); 8919 } 8920 8921 8922 /* 8923 * Function: sd_create_errstats 8924 * 8925 * Description: This routine instantiates the device error stats. 8926 * 8927 * Note: During attach the stats are instantiated first so they are 8928 * available for attach-time routines that utilize the driver 8929 * iopath to send commands to the device. The stats are initialized 8930 * separately so data obtained during some attach-time routines is 8931 * available. (4362483) 8932 * 8933 * Arguments: un - driver soft state (unit) structure 8934 * instance - driver instance 8935 * 8936 * Context: Kernel thread context 8937 */ 8938 8939 static void 8940 sd_create_errstats(struct sd_lun *un, int instance) 8941 { 8942 struct sd_errstats *stp; 8943 char kstatmodule_err[KSTAT_STRLEN]; 8944 char kstatname[KSTAT_STRLEN]; 8945 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t)); 8946 8947 ASSERT(un != NULL); 8948 8949 if (un->un_errstats != NULL) { 8950 return; 8951 } 8952 8953 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err), 8954 "%serr", sd_label); 8955 (void) snprintf(kstatname, sizeof (kstatname), 8956 "%s%d,err", sd_label, instance); 8957 8958 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname, 8959 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT); 8960 8961 if (un->un_errstats == NULL) { 8962 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8963 "sd_create_errstats: Failed kstat_create\n"); 8964 return; 8965 } 8966 8967 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8968 kstat_named_init(&stp->sd_softerrs, "Soft Errors", 8969 KSTAT_DATA_UINT32); 8970 kstat_named_init(&stp->sd_harderrs, "Hard Errors", 8971 KSTAT_DATA_UINT32); 8972 kstat_named_init(&stp->sd_transerrs, "Transport Errors", 8973 KSTAT_DATA_UINT32); 8974 kstat_named_init(&stp->sd_vid, "Vendor", 8975 KSTAT_DATA_CHAR); 8976 kstat_named_init(&stp->sd_pid, "Product", 8977 KSTAT_DATA_CHAR); 8978 kstat_named_init(&stp->sd_revision, "Revision", 8979 KSTAT_DATA_CHAR); 8980 kstat_named_init(&stp->sd_serial, "Serial No", 8981 KSTAT_DATA_CHAR); 8982 kstat_named_init(&stp->sd_capacity, "Size", 8983 KSTAT_DATA_ULONGLONG); 8984 kstat_named_init(&stp->sd_rq_media_err, "Media Error", 8985 KSTAT_DATA_UINT32); 8986 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready", 8987 KSTAT_DATA_UINT32); 8988 kstat_named_init(&stp->sd_rq_nodev_err, "No Device", 8989 KSTAT_DATA_UINT32); 8990 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable", 8991 KSTAT_DATA_UINT32); 8992 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request", 8993 KSTAT_DATA_UINT32); 8994 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis", 8995 KSTAT_DATA_UINT32); 8996 8997 un->un_errstats->ks_private = un; 8998 un->un_errstats->ks_update = nulldev; 8999 9000 kstat_install(un->un_errstats); 9001 } 9002 9003 9004 /* 9005 * Function: sd_set_errstats 9006 * 9007 * Description: This routine sets the value of the vendor id, product id, 9008 * revision, serial number, and capacity device error stats. 9009 * 9010 * Note: During attach the stats are instantiated first so they are 9011 * available for attach-time routines that utilize the driver 9012 * iopath to send commands to the device. The stats are initialized 9013 * separately so data obtained during some attach-time routines is 9014 * available. (4362483) 9015 * 9016 * Arguments: un - driver soft state (unit) structure 9017 * 9018 * Context: Kernel thread context 9019 */ 9020 9021 static void 9022 sd_set_errstats(struct sd_lun *un) 9023 { 9024 struct sd_errstats *stp; 9025 9026 ASSERT(un != NULL); 9027 ASSERT(un->un_errstats != NULL); 9028 stp = (struct sd_errstats *)un->un_errstats->ks_data; 9029 ASSERT(stp != NULL); 9030 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8); 9031 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16); 9032 (void) strncpy(stp->sd_revision.value.c, 9033 un->un_sd->sd_inq->inq_revision, 4); 9034 9035 /* 9036 * All the errstats are persistent across detach/attach, 9037 * so reset all the errstats here in case of the hot 9038 * replacement of disk drives, except for not changed 9039 * Sun qualified drives. 9040 */ 9041 if ((bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) != 0) || 9042 (bcmp(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 9043 sizeof (SD_INQUIRY(un)->inq_serial)) != 0)) { 9044 stp->sd_softerrs.value.ui32 = 0; 9045 stp->sd_harderrs.value.ui32 = 0; 9046 stp->sd_transerrs.value.ui32 = 0; 9047 stp->sd_rq_media_err.value.ui32 = 0; 9048 stp->sd_rq_ntrdy_err.value.ui32 = 0; 9049 stp->sd_rq_nodev_err.value.ui32 = 0; 9050 stp->sd_rq_recov_err.value.ui32 = 0; 9051 stp->sd_rq_illrq_err.value.ui32 = 0; 9052 stp->sd_rq_pfa_err.value.ui32 = 0; 9053 } 9054 9055 /* 9056 * Set the "Serial No" kstat for Sun qualified drives (indicated by 9057 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid) 9058 * (4376302)) 9059 */ 9060 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) { 9061 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 9062 sizeof (SD_INQUIRY(un)->inq_serial)); 9063 } 9064 9065 if (un->un_f_blockcount_is_valid != TRUE) { 9066 /* 9067 * Set capacity error stat to 0 for no media. This ensures 9068 * a valid capacity is displayed in response to 'iostat -E' 9069 * when no media is present in the device. 9070 */ 9071 stp->sd_capacity.value.ui64 = 0; 9072 } else { 9073 /* 9074 * Multiply un_blockcount by un->un_sys_blocksize to get 9075 * capacity. 9076 * 9077 * Note: for non-512 blocksize devices "un_blockcount" has been 9078 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by 9079 * (un_tgt_blocksize / un->un_sys_blocksize). 9080 */ 9081 stp->sd_capacity.value.ui64 = (uint64_t) 9082 ((uint64_t)un->un_blockcount * un->un_sys_blocksize); 9083 } 9084 } 9085 9086 9087 /* 9088 * Function: sd_set_pstats 9089 * 9090 * Description: This routine instantiates and initializes the partition 9091 * stats for each partition with more than zero blocks. 9092 * (4363169) 9093 * 9094 * Arguments: un - driver soft state (unit) structure 9095 * 9096 * Context: Kernel thread context 9097 */ 9098 9099 static void 9100 sd_set_pstats(struct sd_lun *un) 9101 { 9102 char kstatname[KSTAT_STRLEN]; 9103 int instance; 9104 int i; 9105 diskaddr_t nblks = 0; 9106 char *partname = NULL; 9107 9108 ASSERT(un != NULL); 9109 9110 instance = ddi_get_instance(SD_DEVINFO(un)); 9111 9112 /* Note:x86: is this a VTOC8/VTOC16 difference? */ 9113 for (i = 0; i < NSDMAP; i++) { 9114 9115 if (cmlb_partinfo(un->un_cmlbhandle, i, 9116 &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0) 9117 continue; 9118 mutex_enter(SD_MUTEX(un)); 9119 9120 if ((un->un_pstats[i] == NULL) && 9121 (nblks != 0)) { 9122 9123 (void) snprintf(kstatname, sizeof (kstatname), 9124 "%s%d,%s", sd_label, instance, 9125 partname); 9126 9127 un->un_pstats[i] = kstat_create(sd_label, 9128 instance, kstatname, "partition", KSTAT_TYPE_IO, 9129 1, KSTAT_FLAG_PERSISTENT); 9130 if (un->un_pstats[i] != NULL) { 9131 un->un_pstats[i]->ks_lock = SD_MUTEX(un); 9132 kstat_install(un->un_pstats[i]); 9133 } 9134 } 9135 mutex_exit(SD_MUTEX(un)); 9136 } 9137 } 9138 9139 9140 #if (defined(__fibre)) 9141 /* 9142 * Function: sd_init_event_callbacks 9143 * 9144 * Description: This routine initializes the insertion and removal event 9145 * callbacks. (fibre only) 9146 * 9147 * Arguments: un - driver soft state (unit) structure 9148 * 9149 * Context: Kernel thread context 9150 */ 9151 9152 static void 9153 sd_init_event_callbacks(struct sd_lun *un) 9154 { 9155 ASSERT(un != NULL); 9156 9157 if ((un->un_insert_event == NULL) && 9158 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT, 9159 &un->un_insert_event) == DDI_SUCCESS)) { 9160 /* 9161 * Add the callback for an insertion event 9162 */ 9163 (void) ddi_add_event_handler(SD_DEVINFO(un), 9164 un->un_insert_event, sd_event_callback, (void *)un, 9165 &(un->un_insert_cb_id)); 9166 } 9167 9168 if ((un->un_remove_event == NULL) && 9169 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT, 9170 &un->un_remove_event) == DDI_SUCCESS)) { 9171 /* 9172 * Add the callback for a removal event 9173 */ 9174 (void) ddi_add_event_handler(SD_DEVINFO(un), 9175 un->un_remove_event, sd_event_callback, (void *)un, 9176 &(un->un_remove_cb_id)); 9177 } 9178 } 9179 9180 9181 /* 9182 * Function: sd_event_callback 9183 * 9184 * Description: This routine handles insert/remove events (photon). The 9185 * state is changed to OFFLINE which can be used to supress 9186 * error msgs. (fibre only) 9187 * 9188 * Arguments: un - driver soft state (unit) structure 9189 * 9190 * Context: Callout thread context 9191 */ 9192 /* ARGSUSED */ 9193 static void 9194 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg, 9195 void *bus_impldata) 9196 { 9197 struct sd_lun *un = (struct sd_lun *)arg; 9198 9199 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event)); 9200 if (event == un->un_insert_event) { 9201 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event"); 9202 mutex_enter(SD_MUTEX(un)); 9203 if (un->un_state == SD_STATE_OFFLINE) { 9204 if (un->un_last_state != SD_STATE_SUSPENDED) { 9205 un->un_state = un->un_last_state; 9206 } else { 9207 /* 9208 * We have gone through SUSPEND/RESUME while 9209 * we were offline. Restore the last state 9210 */ 9211 un->un_state = un->un_save_state; 9212 } 9213 } 9214 mutex_exit(SD_MUTEX(un)); 9215 9216 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event)); 9217 } else if (event == un->un_remove_event) { 9218 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event"); 9219 mutex_enter(SD_MUTEX(un)); 9220 /* 9221 * We need to handle an event callback that occurs during 9222 * the suspend operation, since we don't prevent it. 9223 */ 9224 if (un->un_state != SD_STATE_OFFLINE) { 9225 if (un->un_state != SD_STATE_SUSPENDED) { 9226 New_state(un, SD_STATE_OFFLINE); 9227 } else { 9228 un->un_last_state = SD_STATE_OFFLINE; 9229 } 9230 } 9231 mutex_exit(SD_MUTEX(un)); 9232 } else { 9233 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 9234 "!Unknown event\n"); 9235 } 9236 9237 } 9238 #endif 9239 9240 /* 9241 * Function: sd_cache_control() 9242 * 9243 * Description: This routine is the driver entry point for setting 9244 * read and write caching by modifying the WCE (write cache 9245 * enable) and RCD (read cache disable) bits of mode 9246 * page 8 (MODEPAGE_CACHING). 9247 * 9248 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 9249 * structure for this target. 9250 * rcd_flag - flag for controlling the read cache 9251 * wce_flag - flag for controlling the write cache 9252 * 9253 * Return Code: EIO 9254 * code returned by sd_send_scsi_MODE_SENSE and 9255 * sd_send_scsi_MODE_SELECT 9256 * 9257 * Context: Kernel Thread 9258 */ 9259 9260 static int 9261 sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag) 9262 { 9263 struct mode_caching *mode_caching_page; 9264 uchar_t *header; 9265 size_t buflen; 9266 int hdrlen; 9267 int bd_len; 9268 int rval = 0; 9269 struct mode_header_grp2 *mhp; 9270 struct sd_lun *un; 9271 int status; 9272 9273 ASSERT(ssc != NULL); 9274 un = ssc->ssc_un; 9275 ASSERT(un != NULL); 9276 9277 /* 9278 * Do a test unit ready, otherwise a mode sense may not work if this 9279 * is the first command sent to the device after boot. 9280 */ 9281 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 9282 if (status != 0) 9283 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9284 9285 if (un->un_f_cfg_is_atapi == TRUE) { 9286 hdrlen = MODE_HEADER_LENGTH_GRP2; 9287 } else { 9288 hdrlen = MODE_HEADER_LENGTH; 9289 } 9290 9291 /* 9292 * Allocate memory for the retrieved mode page and its headers. Set 9293 * a pointer to the page itself. Use mode_cache_scsi3 to insure 9294 * we get all of the mode sense data otherwise, the mode select 9295 * will fail. mode_cache_scsi3 is a superset of mode_caching. 9296 */ 9297 buflen = hdrlen + MODE_BLK_DESC_LENGTH + 9298 sizeof (struct mode_cache_scsi3); 9299 9300 header = kmem_zalloc(buflen, KM_SLEEP); 9301 9302 /* Get the information from the device. */ 9303 if (un->un_f_cfg_is_atapi == TRUE) { 9304 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, header, buflen, 9305 MODEPAGE_CACHING, SD_PATH_DIRECT); 9306 } else { 9307 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 9308 MODEPAGE_CACHING, SD_PATH_DIRECT); 9309 } 9310 9311 if (rval != 0) { 9312 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 9313 "sd_cache_control: Mode Sense Failed\n"); 9314 goto mode_sense_failed; 9315 } 9316 9317 /* 9318 * Determine size of Block Descriptors in order to locate 9319 * the mode page data. ATAPI devices return 0, SCSI devices 9320 * should return MODE_BLK_DESC_LENGTH. 9321 */ 9322 if (un->un_f_cfg_is_atapi == TRUE) { 9323 mhp = (struct mode_header_grp2 *)header; 9324 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 9325 } else { 9326 bd_len = ((struct mode_header *)header)->bdesc_length; 9327 } 9328 9329 if (bd_len > MODE_BLK_DESC_LENGTH) { 9330 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 0, 9331 "sd_cache_control: Mode Sense returned invalid block " 9332 "descriptor length\n"); 9333 rval = EIO; 9334 goto mode_sense_failed; 9335 } 9336 9337 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 9338 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 9339 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 9340 "sd_cache_control: Mode Sense caching page code mismatch " 9341 "%d\n", mode_caching_page->mode_page.code); 9342 rval = EIO; 9343 goto mode_sense_failed; 9344 } 9345 9346 /* Check the relevant bits on successful mode sense. */ 9347 if ((mode_caching_page->rcd && rcd_flag == SD_CACHE_ENABLE) || 9348 (!mode_caching_page->rcd && rcd_flag == SD_CACHE_DISABLE) || 9349 (mode_caching_page->wce && wce_flag == SD_CACHE_DISABLE) || 9350 (!mode_caching_page->wce && wce_flag == SD_CACHE_ENABLE)) { 9351 9352 size_t sbuflen; 9353 uchar_t save_pg; 9354 9355 /* 9356 * Construct select buffer length based on the 9357 * length of the sense data returned. 9358 */ 9359 sbuflen = hdrlen + bd_len + 9360 sizeof (struct mode_page) + 9361 (int)mode_caching_page->mode_page.length; 9362 9363 /* 9364 * Set the caching bits as requested. 9365 */ 9366 if (rcd_flag == SD_CACHE_ENABLE) 9367 mode_caching_page->rcd = 0; 9368 else if (rcd_flag == SD_CACHE_DISABLE) 9369 mode_caching_page->rcd = 1; 9370 9371 if (wce_flag == SD_CACHE_ENABLE) 9372 mode_caching_page->wce = 1; 9373 else if (wce_flag == SD_CACHE_DISABLE) 9374 mode_caching_page->wce = 0; 9375 9376 /* 9377 * Save the page if the mode sense says the 9378 * drive supports it. 9379 */ 9380 save_pg = mode_caching_page->mode_page.ps ? 9381 SD_SAVE_PAGE : SD_DONTSAVE_PAGE; 9382 9383 /* Clear reserved bits before mode select. */ 9384 mode_caching_page->mode_page.ps = 0; 9385 9386 /* 9387 * Clear out mode header for mode select. 9388 * The rest of the retrieved page will be reused. 9389 */ 9390 bzero(header, hdrlen); 9391 9392 if (un->un_f_cfg_is_atapi == TRUE) { 9393 mhp = (struct mode_header_grp2 *)header; 9394 mhp->bdesc_length_hi = bd_len >> 8; 9395 mhp->bdesc_length_lo = (uchar_t)bd_len & 0xff; 9396 } else { 9397 ((struct mode_header *)header)->bdesc_length = bd_len; 9398 } 9399 9400 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9401 9402 /* Issue mode select to change the cache settings */ 9403 if (un->un_f_cfg_is_atapi == TRUE) { 9404 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP1, header, 9405 sbuflen, save_pg, SD_PATH_DIRECT); 9406 } else { 9407 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, header, 9408 sbuflen, save_pg, SD_PATH_DIRECT); 9409 } 9410 9411 } 9412 9413 9414 mode_sense_failed: 9415 9416 kmem_free(header, buflen); 9417 9418 if (rval != 0) { 9419 if (rval == EIO) 9420 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 9421 else 9422 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9423 } 9424 return (rval); 9425 } 9426 9427 9428 /* 9429 * Function: sd_get_write_cache_enabled() 9430 * 9431 * Description: This routine is the driver entry point for determining if 9432 * write caching is enabled. It examines the WCE (write cache 9433 * enable) bits of mode page 8 (MODEPAGE_CACHING). 9434 * 9435 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 9436 * structure for this target. 9437 * is_enabled - pointer to int where write cache enabled state 9438 * is returned (non-zero -> write cache enabled) 9439 * 9440 * 9441 * Return Code: EIO 9442 * code returned by sd_send_scsi_MODE_SENSE 9443 * 9444 * Context: Kernel Thread 9445 * 9446 * NOTE: If ioctl is added to disable write cache, this sequence should 9447 * be followed so that no locking is required for accesses to 9448 * un->un_f_write_cache_enabled: 9449 * do mode select to clear wce 9450 * do synchronize cache to flush cache 9451 * set un->un_f_write_cache_enabled = FALSE 9452 * 9453 * Conversely, an ioctl to enable the write cache should be done 9454 * in this order: 9455 * set un->un_f_write_cache_enabled = TRUE 9456 * do mode select to set wce 9457 */ 9458 9459 static int 9460 sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled) 9461 { 9462 struct mode_caching *mode_caching_page; 9463 uchar_t *header; 9464 size_t buflen; 9465 int hdrlen; 9466 int bd_len; 9467 int rval = 0; 9468 struct sd_lun *un; 9469 int status; 9470 9471 ASSERT(ssc != NULL); 9472 un = ssc->ssc_un; 9473 ASSERT(un != NULL); 9474 ASSERT(is_enabled != NULL); 9475 9476 /* in case of error, flag as enabled */ 9477 *is_enabled = TRUE; 9478 9479 /* 9480 * Do a test unit ready, otherwise a mode sense may not work if this 9481 * is the first command sent to the device after boot. 9482 */ 9483 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 9484 9485 if (status != 0) 9486 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9487 9488 if (un->un_f_cfg_is_atapi == TRUE) { 9489 hdrlen = MODE_HEADER_LENGTH_GRP2; 9490 } else { 9491 hdrlen = MODE_HEADER_LENGTH; 9492 } 9493 9494 /* 9495 * Allocate memory for the retrieved mode page and its headers. Set 9496 * a pointer to the page itself. 9497 */ 9498 buflen = hdrlen + MODE_BLK_DESC_LENGTH + sizeof (struct mode_caching); 9499 header = kmem_zalloc(buflen, KM_SLEEP); 9500 9501 /* Get the information from the device. */ 9502 if (un->un_f_cfg_is_atapi == TRUE) { 9503 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, header, buflen, 9504 MODEPAGE_CACHING, SD_PATH_DIRECT); 9505 } else { 9506 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 9507 MODEPAGE_CACHING, SD_PATH_DIRECT); 9508 } 9509 9510 if (rval != 0) { 9511 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 9512 "sd_get_write_cache_enabled: Mode Sense Failed\n"); 9513 goto mode_sense_failed; 9514 } 9515 9516 /* 9517 * Determine size of Block Descriptors in order to locate 9518 * the mode page data. ATAPI devices return 0, SCSI devices 9519 * should return MODE_BLK_DESC_LENGTH. 9520 */ 9521 if (un->un_f_cfg_is_atapi == TRUE) { 9522 struct mode_header_grp2 *mhp; 9523 mhp = (struct mode_header_grp2 *)header; 9524 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 9525 } else { 9526 bd_len = ((struct mode_header *)header)->bdesc_length; 9527 } 9528 9529 if (bd_len > MODE_BLK_DESC_LENGTH) { 9530 /* FMA should make upset complain here */ 9531 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 0, 9532 "sd_get_write_cache_enabled: Mode Sense returned invalid " 9533 "block descriptor length\n"); 9534 rval = EIO; 9535 goto mode_sense_failed; 9536 } 9537 9538 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 9539 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 9540 /* FMA could make upset complain here */ 9541 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 9542 "sd_get_write_cache_enabled: Mode Sense caching page " 9543 "code mismatch %d\n", mode_caching_page->mode_page.code); 9544 rval = EIO; 9545 goto mode_sense_failed; 9546 } 9547 *is_enabled = mode_caching_page->wce; 9548 9549 mode_sense_failed: 9550 if (rval == 0) { 9551 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 9552 } else if (rval == EIO) { 9553 /* 9554 * Some disks do not support mode sense(6), we 9555 * should ignore this kind of error(sense key is 9556 * 0x5 - illegal request). 9557 */ 9558 uint8_t *sensep; 9559 int senlen; 9560 9561 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 9562 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 9563 ssc->ssc_uscsi_cmd->uscsi_rqresid); 9564 9565 if (senlen > 0 && 9566 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 9567 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 9568 } else { 9569 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 9570 } 9571 } else { 9572 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9573 } 9574 kmem_free(header, buflen); 9575 return (rval); 9576 } 9577 9578 /* 9579 * Function: sd_get_nv_sup() 9580 * 9581 * Description: This routine is the driver entry point for 9582 * determining whether non-volatile cache is supported. This 9583 * determination process works as follows: 9584 * 9585 * 1. sd first queries sd.conf on whether 9586 * suppress_cache_flush bit is set for this device. 9587 * 9588 * 2. if not there, then queries the internal disk table. 9589 * 9590 * 3. if either sd.conf or internal disk table specifies 9591 * cache flush be suppressed, we don't bother checking 9592 * NV_SUP bit. 9593 * 9594 * If SUPPRESS_CACHE_FLUSH bit is not set to 1, sd queries 9595 * the optional INQUIRY VPD page 0x86. If the device 9596 * supports VPD page 0x86, sd examines the NV_SUP 9597 * (non-volatile cache support) bit in the INQUIRY VPD page 9598 * 0x86: 9599 * o If NV_SUP bit is set, sd assumes the device has a 9600 * non-volatile cache and set the 9601 * un_f_sync_nv_supported to TRUE. 9602 * o Otherwise cache is not non-volatile, 9603 * un_f_sync_nv_supported is set to FALSE. 9604 * 9605 * Arguments: un - driver soft state (unit) structure 9606 * 9607 * Return Code: 9608 * 9609 * Context: Kernel Thread 9610 */ 9611 9612 static void 9613 sd_get_nv_sup(sd_ssc_t *ssc) 9614 { 9615 int rval = 0; 9616 uchar_t *inq86 = NULL; 9617 size_t inq86_len = MAX_INQUIRY_SIZE; 9618 size_t inq86_resid = 0; 9619 struct dk_callback *dkc; 9620 struct sd_lun *un; 9621 9622 ASSERT(ssc != NULL); 9623 un = ssc->ssc_un; 9624 ASSERT(un != NULL); 9625 9626 mutex_enter(SD_MUTEX(un)); 9627 9628 /* 9629 * Be conservative on the device's support of 9630 * SYNC_NV bit: un_f_sync_nv_supported is 9631 * initialized to be false. 9632 */ 9633 un->un_f_sync_nv_supported = FALSE; 9634 9635 /* 9636 * If either sd.conf or internal disk table 9637 * specifies cache flush be suppressed, then 9638 * we don't bother checking NV_SUP bit. 9639 */ 9640 if (un->un_f_suppress_cache_flush == TRUE) { 9641 mutex_exit(SD_MUTEX(un)); 9642 return; 9643 } 9644 9645 if (sd_check_vpd_page_support(ssc) == 0 && 9646 un->un_vpd_page_mask & SD_VPD_EXTENDED_DATA_PG) { 9647 mutex_exit(SD_MUTEX(un)); 9648 /* collect page 86 data if available */ 9649 inq86 = kmem_zalloc(inq86_len, KM_SLEEP); 9650 9651 rval = sd_send_scsi_INQUIRY(ssc, inq86, inq86_len, 9652 0x01, 0x86, &inq86_resid); 9653 9654 if (rval == 0 && (inq86_len - inq86_resid > 6)) { 9655 SD_TRACE(SD_LOG_COMMON, un, 9656 "sd_get_nv_sup: \ 9657 successfully get VPD page: %x \ 9658 PAGE LENGTH: %x BYTE 6: %x\n", 9659 inq86[1], inq86[3], inq86[6]); 9660 9661 mutex_enter(SD_MUTEX(un)); 9662 /* 9663 * check the value of NV_SUP bit: only if the device 9664 * reports NV_SUP bit to be 1, the 9665 * un_f_sync_nv_supported bit will be set to true. 9666 */ 9667 if (inq86[6] & SD_VPD_NV_SUP) { 9668 un->un_f_sync_nv_supported = TRUE; 9669 } 9670 mutex_exit(SD_MUTEX(un)); 9671 } else if (rval != 0) { 9672 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9673 } 9674 9675 kmem_free(inq86, inq86_len); 9676 } else { 9677 mutex_exit(SD_MUTEX(un)); 9678 } 9679 9680 /* 9681 * Send a SYNC CACHE command to check whether 9682 * SYNC_NV bit is supported. This command should have 9683 * un_f_sync_nv_supported set to correct value. 9684 */ 9685 mutex_enter(SD_MUTEX(un)); 9686 if (un->un_f_sync_nv_supported) { 9687 mutex_exit(SD_MUTEX(un)); 9688 dkc = kmem_zalloc(sizeof (struct dk_callback), KM_SLEEP); 9689 dkc->dkc_flag = FLUSH_VOLATILE; 9690 (void) sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 9691 9692 /* 9693 * Send a TEST UNIT READY command to the device. This should 9694 * clear any outstanding UNIT ATTENTION that may be present. 9695 */ 9696 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR); 9697 if (rval != 0) 9698 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9699 9700 kmem_free(dkc, sizeof (struct dk_callback)); 9701 } else { 9702 mutex_exit(SD_MUTEX(un)); 9703 } 9704 9705 SD_TRACE(SD_LOG_COMMON, un, "sd_get_nv_sup: \ 9706 un_f_suppress_cache_flush is set to %d\n", 9707 un->un_f_suppress_cache_flush); 9708 } 9709 9710 /* 9711 * Function: sd_make_device 9712 * 9713 * Description: Utility routine to return the Solaris device number from 9714 * the data in the device's dev_info structure. 9715 * 9716 * Return Code: The Solaris device number 9717 * 9718 * Context: Any 9719 */ 9720 9721 static dev_t 9722 sd_make_device(dev_info_t *devi) 9723 { 9724 return (makedevice(ddi_driver_major(devi), 9725 ddi_get_instance(devi) << SDUNIT_SHIFT)); 9726 } 9727 9728 9729 /* 9730 * Function: sd_pm_entry 9731 * 9732 * Description: Called at the start of a new command to manage power 9733 * and busy status of a device. This includes determining whether 9734 * the current power state of the device is sufficient for 9735 * performing the command or whether it must be changed. 9736 * The PM framework is notified appropriately. 9737 * Only with a return status of DDI_SUCCESS will the 9738 * component be busy to the framework. 9739 * 9740 * All callers of sd_pm_entry must check the return status 9741 * and only call sd_pm_exit it it was DDI_SUCCESS. A status 9742 * of DDI_FAILURE indicates the device failed to power up. 9743 * In this case un_pm_count has been adjusted so the result 9744 * on exit is still powered down, ie. count is less than 0. 9745 * Calling sd_pm_exit with this count value hits an ASSERT. 9746 * 9747 * Return Code: DDI_SUCCESS or DDI_FAILURE 9748 * 9749 * Context: Kernel thread context. 9750 */ 9751 9752 static int 9753 sd_pm_entry(struct sd_lun *un) 9754 { 9755 int return_status = DDI_SUCCESS; 9756 9757 ASSERT(!mutex_owned(SD_MUTEX(un))); 9758 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9759 9760 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n"); 9761 9762 if (un->un_f_pm_is_enabled == FALSE) { 9763 SD_TRACE(SD_LOG_IO_PM, un, 9764 "sd_pm_entry: exiting, PM not enabled\n"); 9765 return (return_status); 9766 } 9767 9768 /* 9769 * Just increment a counter if PM is enabled. On the transition from 9770 * 0 ==> 1, mark the device as busy. The iodone side will decrement 9771 * the count with each IO and mark the device as idle when the count 9772 * hits 0. 9773 * 9774 * If the count is less than 0 the device is powered down. If a powered 9775 * down device is successfully powered up then the count must be 9776 * incremented to reflect the power up. Note that it'll get incremented 9777 * a second time to become busy. 9778 * 9779 * Because the following has the potential to change the device state 9780 * and must release the un_pm_mutex to do so, only one thread can be 9781 * allowed through at a time. 9782 */ 9783 9784 mutex_enter(&un->un_pm_mutex); 9785 while (un->un_pm_busy == TRUE) { 9786 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex); 9787 } 9788 un->un_pm_busy = TRUE; 9789 9790 if (un->un_pm_count < 1) { 9791 9792 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n"); 9793 9794 /* 9795 * Indicate we are now busy so the framework won't attempt to 9796 * power down the device. This call will only fail if either 9797 * we passed a bad component number or the device has no 9798 * components. Neither of these should ever happen. 9799 */ 9800 mutex_exit(&un->un_pm_mutex); 9801 return_status = pm_busy_component(SD_DEVINFO(un), 0); 9802 ASSERT(return_status == DDI_SUCCESS); 9803 9804 mutex_enter(&un->un_pm_mutex); 9805 9806 if (un->un_pm_count < 0) { 9807 mutex_exit(&un->un_pm_mutex); 9808 9809 SD_TRACE(SD_LOG_IO_PM, un, 9810 "sd_pm_entry: power up component\n"); 9811 9812 /* 9813 * pm_raise_power will cause sdpower to be called 9814 * which brings the device power level to the 9815 * desired state, If successful, un_pm_count and 9816 * un_power_level will be updated appropriately. 9817 */ 9818 return_status = pm_raise_power(SD_DEVINFO(un), 0, 9819 SD_PM_STATE_ACTIVE(un)); 9820 9821 mutex_enter(&un->un_pm_mutex); 9822 9823 if (return_status != DDI_SUCCESS) { 9824 /* 9825 * Power up failed. 9826 * Idle the device and adjust the count 9827 * so the result on exit is that we're 9828 * still powered down, ie. count is less than 0. 9829 */ 9830 SD_TRACE(SD_LOG_IO_PM, un, 9831 "sd_pm_entry: power up failed," 9832 " idle the component\n"); 9833 9834 (void) pm_idle_component(SD_DEVINFO(un), 0); 9835 un->un_pm_count--; 9836 } else { 9837 /* 9838 * Device is powered up, verify the 9839 * count is non-negative. 9840 * This is debug only. 9841 */ 9842 ASSERT(un->un_pm_count == 0); 9843 } 9844 } 9845 9846 if (return_status == DDI_SUCCESS) { 9847 /* 9848 * For performance, now that the device has been tagged 9849 * as busy, and it's known to be powered up, update the 9850 * chain types to use jump tables that do not include 9851 * pm. This significantly lowers the overhead and 9852 * therefore improves performance. 9853 */ 9854 9855 mutex_exit(&un->un_pm_mutex); 9856 mutex_enter(SD_MUTEX(un)); 9857 SD_TRACE(SD_LOG_IO_PM, un, 9858 "sd_pm_entry: changing uscsi_chain_type from %d\n", 9859 un->un_uscsi_chain_type); 9860 9861 if (un->un_f_non_devbsize_supported) { 9862 un->un_buf_chain_type = 9863 SD_CHAIN_INFO_RMMEDIA_NO_PM; 9864 } else { 9865 un->un_buf_chain_type = 9866 SD_CHAIN_INFO_DISK_NO_PM; 9867 } 9868 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 9869 9870 SD_TRACE(SD_LOG_IO_PM, un, 9871 " changed uscsi_chain_type to %d\n", 9872 un->un_uscsi_chain_type); 9873 mutex_exit(SD_MUTEX(un)); 9874 mutex_enter(&un->un_pm_mutex); 9875 9876 if (un->un_pm_idle_timeid == NULL) { 9877 /* 300 ms. */ 9878 un->un_pm_idle_timeid = 9879 timeout(sd_pm_idletimeout_handler, un, 9880 (drv_usectohz((clock_t)300000))); 9881 /* 9882 * Include an extra call to busy which keeps the 9883 * device busy with-respect-to the PM layer 9884 * until the timer fires, at which time it'll 9885 * get the extra idle call. 9886 */ 9887 (void) pm_busy_component(SD_DEVINFO(un), 0); 9888 } 9889 } 9890 } 9891 un->un_pm_busy = FALSE; 9892 /* Next... */ 9893 cv_signal(&un->un_pm_busy_cv); 9894 9895 un->un_pm_count++; 9896 9897 SD_TRACE(SD_LOG_IO_PM, un, 9898 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count); 9899 9900 mutex_exit(&un->un_pm_mutex); 9901 9902 return (return_status); 9903 } 9904 9905 9906 /* 9907 * Function: sd_pm_exit 9908 * 9909 * Description: Called at the completion of a command to manage busy 9910 * status for the device. If the device becomes idle the 9911 * PM framework is notified. 9912 * 9913 * Context: Kernel thread context 9914 */ 9915 9916 static void 9917 sd_pm_exit(struct sd_lun *un) 9918 { 9919 ASSERT(!mutex_owned(SD_MUTEX(un))); 9920 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9921 9922 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n"); 9923 9924 /* 9925 * After attach the following flag is only read, so don't 9926 * take the penalty of acquiring a mutex for it. 9927 */ 9928 if (un->un_f_pm_is_enabled == TRUE) { 9929 9930 mutex_enter(&un->un_pm_mutex); 9931 un->un_pm_count--; 9932 9933 SD_TRACE(SD_LOG_IO_PM, un, 9934 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count); 9935 9936 ASSERT(un->un_pm_count >= 0); 9937 if (un->un_pm_count == 0) { 9938 mutex_exit(&un->un_pm_mutex); 9939 9940 SD_TRACE(SD_LOG_IO_PM, un, 9941 "sd_pm_exit: idle component\n"); 9942 9943 (void) pm_idle_component(SD_DEVINFO(un), 0); 9944 9945 } else { 9946 mutex_exit(&un->un_pm_mutex); 9947 } 9948 } 9949 9950 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n"); 9951 } 9952 9953 9954 /* 9955 * Function: sdopen 9956 * 9957 * Description: Driver's open(9e) entry point function. 9958 * 9959 * Arguments: dev_i - pointer to device number 9960 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE) 9961 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9962 * cred_p - user credential pointer 9963 * 9964 * Return Code: EINVAL 9965 * ENXIO 9966 * EIO 9967 * EROFS 9968 * EBUSY 9969 * 9970 * Context: Kernel thread context 9971 */ 9972 /* ARGSUSED */ 9973 static int 9974 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 9975 { 9976 struct sd_lun *un; 9977 int nodelay; 9978 int part; 9979 uint64_t partmask; 9980 int instance; 9981 dev_t dev; 9982 int rval = EIO; 9983 diskaddr_t nblks = 0; 9984 diskaddr_t label_cap; 9985 9986 /* Validate the open type */ 9987 if (otyp >= OTYPCNT) { 9988 return (EINVAL); 9989 } 9990 9991 dev = *dev_p; 9992 instance = SDUNIT(dev); 9993 mutex_enter(&sd_detach_mutex); 9994 9995 /* 9996 * Fail the open if there is no softstate for the instance, or 9997 * if another thread somewhere is trying to detach the instance. 9998 */ 9999 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 10000 (un->un_detach_count != 0)) { 10001 mutex_exit(&sd_detach_mutex); 10002 /* 10003 * The probe cache only needs to be cleared when open (9e) fails 10004 * with ENXIO (4238046). 10005 */ 10006 /* 10007 * un-conditionally clearing probe cache is ok with 10008 * separate sd/ssd binaries 10009 * x86 platform can be an issue with both parallel 10010 * and fibre in 1 binary 10011 */ 10012 sd_scsi_clear_probe_cache(); 10013 return (ENXIO); 10014 } 10015 10016 /* 10017 * The un_layer_count is to prevent another thread in specfs from 10018 * trying to detach the instance, which can happen when we are 10019 * called from a higher-layer driver instead of thru specfs. 10020 * This will not be needed when DDI provides a layered driver 10021 * interface that allows specfs to know that an instance is in 10022 * use by a layered driver & should not be detached. 10023 * 10024 * Note: the semantics for layered driver opens are exactly one 10025 * close for every open. 10026 */ 10027 if (otyp == OTYP_LYR) { 10028 un->un_layer_count++; 10029 } 10030 10031 /* 10032 * Keep a count of the current # of opens in progress. This is because 10033 * some layered drivers try to call us as a regular open. This can 10034 * cause problems that we cannot prevent, however by keeping this count 10035 * we can at least keep our open and detach routines from racing against 10036 * each other under such conditions. 10037 */ 10038 un->un_opens_in_progress++; 10039 mutex_exit(&sd_detach_mutex); 10040 10041 nodelay = (flag & (FNDELAY | FNONBLOCK)); 10042 part = SDPART(dev); 10043 partmask = 1 << part; 10044 10045 /* 10046 * We use a semaphore here in order to serialize 10047 * open and close requests on the device. 10048 */ 10049 sema_p(&un->un_semoclose); 10050 10051 mutex_enter(SD_MUTEX(un)); 10052 10053 /* 10054 * All device accesses go thru sdstrategy() where we check 10055 * on suspend status but there could be a scsi_poll command, 10056 * which bypasses sdstrategy(), so we need to check pm 10057 * status. 10058 */ 10059 10060 if (!nodelay) { 10061 while ((un->un_state == SD_STATE_SUSPENDED) || 10062 (un->un_state == SD_STATE_PM_CHANGING)) { 10063 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10064 } 10065 10066 mutex_exit(SD_MUTEX(un)); 10067 if (sd_pm_entry(un) != DDI_SUCCESS) { 10068 rval = EIO; 10069 SD_ERROR(SD_LOG_OPEN_CLOSE, un, 10070 "sdopen: sd_pm_entry failed\n"); 10071 goto open_failed_with_pm; 10072 } 10073 mutex_enter(SD_MUTEX(un)); 10074 } 10075 10076 /* check for previous exclusive open */ 10077 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); 10078 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 10079 "sdopen: exclopen=%x, flag=%x, regopen=%x\n", 10080 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 10081 10082 if (un->un_exclopen & (partmask)) { 10083 goto excl_open_fail; 10084 } 10085 10086 if (flag & FEXCL) { 10087 int i; 10088 if (un->un_ocmap.lyropen[part]) { 10089 goto excl_open_fail; 10090 } 10091 for (i = 0; i < (OTYPCNT - 1); i++) { 10092 if (un->un_ocmap.regopen[i] & (partmask)) { 10093 goto excl_open_fail; 10094 } 10095 } 10096 } 10097 10098 /* 10099 * Check the write permission if this is a removable media device, 10100 * NDELAY has not been set, and writable permission is requested. 10101 * 10102 * Note: If NDELAY was set and this is write-protected media the WRITE 10103 * attempt will fail with EIO as part of the I/O processing. This is a 10104 * more permissive implementation that allows the open to succeed and 10105 * WRITE attempts to fail when appropriate. 10106 */ 10107 if (un->un_f_chk_wp_open) { 10108 if ((flag & FWRITE) && (!nodelay)) { 10109 mutex_exit(SD_MUTEX(un)); 10110 /* 10111 * Defer the check for write permission on writable 10112 * DVD drive till sdstrategy and will not fail open even 10113 * if FWRITE is set as the device can be writable 10114 * depending upon the media and the media can change 10115 * after the call to open(). 10116 */ 10117 if (un->un_f_dvdram_writable_device == FALSE) { 10118 if (ISCD(un) || sr_check_wp(dev)) { 10119 rval = EROFS; 10120 mutex_enter(SD_MUTEX(un)); 10121 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10122 "write to cd or write protected media\n"); 10123 goto open_fail; 10124 } 10125 } 10126 mutex_enter(SD_MUTEX(un)); 10127 } 10128 } 10129 10130 /* 10131 * If opening in NDELAY/NONBLOCK mode, just return. 10132 * Check if disk is ready and has a valid geometry later. 10133 */ 10134 if (!nodelay) { 10135 sd_ssc_t *ssc; 10136 10137 mutex_exit(SD_MUTEX(un)); 10138 ssc = sd_ssc_init(un); 10139 rval = sd_ready_and_valid(ssc, part); 10140 sd_ssc_fini(ssc); 10141 mutex_enter(SD_MUTEX(un)); 10142 /* 10143 * Fail if device is not ready or if the number of disk 10144 * blocks is zero or negative for non CD devices. 10145 */ 10146 10147 nblks = 0; 10148 10149 if (rval == SD_READY_VALID && (!ISCD(un))) { 10150 /* if cmlb_partinfo fails, nblks remains 0 */ 10151 mutex_exit(SD_MUTEX(un)); 10152 (void) cmlb_partinfo(un->un_cmlbhandle, part, &nblks, 10153 NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 10154 mutex_enter(SD_MUTEX(un)); 10155 } 10156 10157 if ((rval != SD_READY_VALID) || 10158 (!ISCD(un) && nblks <= 0)) { 10159 rval = un->un_f_has_removable_media ? ENXIO : EIO; 10160 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10161 "device not ready or invalid disk block value\n"); 10162 goto open_fail; 10163 } 10164 #if defined(__i386) || defined(__amd64) 10165 } else { 10166 uchar_t *cp; 10167 /* 10168 * x86 requires special nodelay handling, so that p0 is 10169 * always defined and accessible. 10170 * Invalidate geometry only if device is not already open. 10171 */ 10172 cp = &un->un_ocmap.chkd[0]; 10173 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 10174 if (*cp != (uchar_t)0) { 10175 break; 10176 } 10177 cp++; 10178 } 10179 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 10180 mutex_exit(SD_MUTEX(un)); 10181 cmlb_invalidate(un->un_cmlbhandle, 10182 (void *)SD_PATH_DIRECT); 10183 mutex_enter(SD_MUTEX(un)); 10184 } 10185 10186 #endif 10187 } 10188 10189 if (otyp == OTYP_LYR) { 10190 un->un_ocmap.lyropen[part]++; 10191 } else { 10192 un->un_ocmap.regopen[otyp] |= partmask; 10193 } 10194 10195 /* Set up open and exclusive open flags */ 10196 if (flag & FEXCL) { 10197 un->un_exclopen |= (partmask); 10198 } 10199 10200 /* 10201 * If the lun is EFI labeled and lun capacity is greater than the 10202 * capacity contained in the label, log a sys-event to notify the 10203 * interested module. 10204 * To avoid an infinite loop of logging sys-event, we only log the 10205 * event when the lun is not opened in NDELAY mode. The event handler 10206 * should open the lun in NDELAY mode. 10207 */ 10208 if (!(flag & FNDELAY)) { 10209 mutex_exit(SD_MUTEX(un)); 10210 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 10211 (void*)SD_PATH_DIRECT) == 0) { 10212 mutex_enter(SD_MUTEX(un)); 10213 if (un->un_f_blockcount_is_valid && 10214 un->un_blockcount > label_cap) { 10215 mutex_exit(SD_MUTEX(un)); 10216 sd_log_lun_expansion_event(un, 10217 (nodelay ? KM_NOSLEEP : KM_SLEEP)); 10218 mutex_enter(SD_MUTEX(un)); 10219 } 10220 } else { 10221 mutex_enter(SD_MUTEX(un)); 10222 } 10223 } 10224 10225 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10226 "open of part %d type %d\n", part, otyp); 10227 10228 mutex_exit(SD_MUTEX(un)); 10229 if (!nodelay) { 10230 sd_pm_exit(un); 10231 } 10232 10233 sema_v(&un->un_semoclose); 10234 10235 mutex_enter(&sd_detach_mutex); 10236 un->un_opens_in_progress--; 10237 mutex_exit(&sd_detach_mutex); 10238 10239 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n"); 10240 return (DDI_SUCCESS); 10241 10242 excl_open_fail: 10243 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n"); 10244 rval = EBUSY; 10245 10246 open_fail: 10247 mutex_exit(SD_MUTEX(un)); 10248 10249 /* 10250 * On a failed open we must exit the pm management. 10251 */ 10252 if (!nodelay) { 10253 sd_pm_exit(un); 10254 } 10255 open_failed_with_pm: 10256 sema_v(&un->un_semoclose); 10257 10258 mutex_enter(&sd_detach_mutex); 10259 un->un_opens_in_progress--; 10260 if (otyp == OTYP_LYR) { 10261 un->un_layer_count--; 10262 } 10263 mutex_exit(&sd_detach_mutex); 10264 10265 return (rval); 10266 } 10267 10268 10269 /* 10270 * Function: sdclose 10271 * 10272 * Description: Driver's close(9e) entry point function. 10273 * 10274 * Arguments: dev - device number 10275 * flag - file status flag, informational only 10276 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 10277 * cred_p - user credential pointer 10278 * 10279 * Return Code: ENXIO 10280 * 10281 * Context: Kernel thread context 10282 */ 10283 /* ARGSUSED */ 10284 static int 10285 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 10286 { 10287 struct sd_lun *un; 10288 uchar_t *cp; 10289 int part; 10290 int nodelay; 10291 int rval = 0; 10292 10293 /* Validate the open type */ 10294 if (otyp >= OTYPCNT) { 10295 return (ENXIO); 10296 } 10297 10298 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10299 return (ENXIO); 10300 } 10301 10302 part = SDPART(dev); 10303 nodelay = flag & (FNDELAY | FNONBLOCK); 10304 10305 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 10306 "sdclose: close of part %d type %d\n", part, otyp); 10307 10308 /* 10309 * We use a semaphore here in order to serialize 10310 * open and close requests on the device. 10311 */ 10312 sema_p(&un->un_semoclose); 10313 10314 mutex_enter(SD_MUTEX(un)); 10315 10316 /* Don't proceed if power is being changed. */ 10317 while (un->un_state == SD_STATE_PM_CHANGING) { 10318 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10319 } 10320 10321 if (un->un_exclopen & (1 << part)) { 10322 un->un_exclopen &= ~(1 << part); 10323 } 10324 10325 /* Update the open partition map */ 10326 if (otyp == OTYP_LYR) { 10327 un->un_ocmap.lyropen[part] -= 1; 10328 } else { 10329 un->un_ocmap.regopen[otyp] &= ~(1 << part); 10330 } 10331 10332 cp = &un->un_ocmap.chkd[0]; 10333 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 10334 if (*cp != NULL) { 10335 break; 10336 } 10337 cp++; 10338 } 10339 10340 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 10341 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n"); 10342 10343 /* 10344 * We avoid persistance upon the last close, and set 10345 * the throttle back to the maximum. 10346 */ 10347 un->un_throttle = un->un_saved_throttle; 10348 10349 if (un->un_state == SD_STATE_OFFLINE) { 10350 if (un->un_f_is_fibre == FALSE) { 10351 scsi_log(SD_DEVINFO(un), sd_label, 10352 CE_WARN, "offline\n"); 10353 } 10354 mutex_exit(SD_MUTEX(un)); 10355 cmlb_invalidate(un->un_cmlbhandle, 10356 (void *)SD_PATH_DIRECT); 10357 mutex_enter(SD_MUTEX(un)); 10358 10359 } else { 10360 /* 10361 * Flush any outstanding writes in NVRAM cache. 10362 * Note: SYNCHRONIZE CACHE is an optional SCSI-2 10363 * cmd, it may not work for non-Pluto devices. 10364 * SYNCHRONIZE CACHE is not required for removables, 10365 * except DVD-RAM drives. 10366 * 10367 * Also note: because SYNCHRONIZE CACHE is currently 10368 * the only command issued here that requires the 10369 * drive be powered up, only do the power up before 10370 * sending the Sync Cache command. If additional 10371 * commands are added which require a powered up 10372 * drive, the following sequence may have to change. 10373 * 10374 * And finally, note that parallel SCSI on SPARC 10375 * only issues a Sync Cache to DVD-RAM, a newly 10376 * supported device. 10377 */ 10378 #if defined(__i386) || defined(__amd64) 10379 if ((un->un_f_sync_cache_supported && 10380 un->un_f_sync_cache_required) || 10381 un->un_f_dvdram_writable_device == TRUE) { 10382 #else 10383 if (un->un_f_dvdram_writable_device == TRUE) { 10384 #endif 10385 mutex_exit(SD_MUTEX(un)); 10386 if (sd_pm_entry(un) == DDI_SUCCESS) { 10387 rval = 10388 sd_send_scsi_SYNCHRONIZE_CACHE(un, 10389 NULL); 10390 /* ignore error if not supported */ 10391 if (rval == ENOTSUP) { 10392 rval = 0; 10393 } else if (rval != 0) { 10394 rval = EIO; 10395 } 10396 sd_pm_exit(un); 10397 } else { 10398 rval = EIO; 10399 } 10400 mutex_enter(SD_MUTEX(un)); 10401 } 10402 10403 /* 10404 * For devices which supports DOOR_LOCK, send an ALLOW 10405 * MEDIA REMOVAL command, but don't get upset if it 10406 * fails. We need to raise the power of the drive before 10407 * we can call sd_send_scsi_DOORLOCK() 10408 */ 10409 if (un->un_f_doorlock_supported) { 10410 mutex_exit(SD_MUTEX(un)); 10411 if (sd_pm_entry(un) == DDI_SUCCESS) { 10412 sd_ssc_t *ssc; 10413 10414 ssc = sd_ssc_init(un); 10415 rval = sd_send_scsi_DOORLOCK(ssc, 10416 SD_REMOVAL_ALLOW, SD_PATH_DIRECT); 10417 if (rval != 0) 10418 sd_ssc_assessment(ssc, 10419 SD_FMT_IGNORE); 10420 sd_ssc_fini(ssc); 10421 10422 sd_pm_exit(un); 10423 if (ISCD(un) && (rval != 0) && 10424 (nodelay != 0)) { 10425 rval = ENXIO; 10426 } 10427 } else { 10428 rval = EIO; 10429 } 10430 mutex_enter(SD_MUTEX(un)); 10431 } 10432 10433 /* 10434 * If a device has removable media, invalidate all 10435 * parameters related to media, such as geometry, 10436 * blocksize, and blockcount. 10437 */ 10438 if (un->un_f_has_removable_media) { 10439 sr_ejected(un); 10440 } 10441 10442 /* 10443 * Destroy the cache (if it exists) which was 10444 * allocated for the write maps since this is 10445 * the last close for this media. 10446 */ 10447 if (un->un_wm_cache) { 10448 /* 10449 * Check if there are pending commands. 10450 * and if there are give a warning and 10451 * do not destroy the cache. 10452 */ 10453 if (un->un_ncmds_in_driver > 0) { 10454 scsi_log(SD_DEVINFO(un), 10455 sd_label, CE_WARN, 10456 "Unable to clean up memory " 10457 "because of pending I/O\n"); 10458 } else { 10459 kmem_cache_destroy( 10460 un->un_wm_cache); 10461 un->un_wm_cache = NULL; 10462 } 10463 } 10464 } 10465 } 10466 10467 mutex_exit(SD_MUTEX(un)); 10468 sema_v(&un->un_semoclose); 10469 10470 if (otyp == OTYP_LYR) { 10471 mutex_enter(&sd_detach_mutex); 10472 /* 10473 * The detach routine may run when the layer count 10474 * drops to zero. 10475 */ 10476 un->un_layer_count--; 10477 mutex_exit(&sd_detach_mutex); 10478 } 10479 10480 return (rval); 10481 } 10482 10483 10484 /* 10485 * Function: sd_ready_and_valid 10486 * 10487 * Description: Test if device is ready and has a valid geometry. 10488 * 10489 * Arguments: ssc - sd_ssc_t will contain un 10490 * un - driver soft state (unit) structure 10491 * 10492 * Return Code: SD_READY_VALID ready and valid label 10493 * SD_NOT_READY_VALID not ready, no label 10494 * SD_RESERVED_BY_OTHERS reservation conflict 10495 * 10496 * Context: Never called at interrupt context. 10497 */ 10498 10499 static int 10500 sd_ready_and_valid(sd_ssc_t *ssc, int part) 10501 { 10502 struct sd_errstats *stp; 10503 uint64_t capacity; 10504 uint_t lbasize; 10505 int rval = SD_READY_VALID; 10506 char name_str[48]; 10507 boolean_t is_valid; 10508 struct sd_lun *un; 10509 int status; 10510 10511 ASSERT(ssc != NULL); 10512 un = ssc->ssc_un; 10513 ASSERT(un != NULL); 10514 ASSERT(!mutex_owned(SD_MUTEX(un))); 10515 10516 mutex_enter(SD_MUTEX(un)); 10517 /* 10518 * If a device has removable media, we must check if media is 10519 * ready when checking if this device is ready and valid. 10520 */ 10521 if (un->un_f_has_removable_media) { 10522 mutex_exit(SD_MUTEX(un)); 10523 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10524 10525 if (status != 0) { 10526 rval = SD_NOT_READY_VALID; 10527 mutex_enter(SD_MUTEX(un)); 10528 10529 /* Ignore all failed status for removalbe media */ 10530 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10531 10532 goto done; 10533 } 10534 10535 is_valid = SD_IS_VALID_LABEL(un); 10536 mutex_enter(SD_MUTEX(un)); 10537 if (!is_valid || 10538 (un->un_f_blockcount_is_valid == FALSE) || 10539 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 10540 10541 /* capacity has to be read every open. */ 10542 mutex_exit(SD_MUTEX(un)); 10543 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity, 10544 &lbasize, SD_PATH_DIRECT); 10545 10546 if (status != 0) { 10547 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10548 10549 cmlb_invalidate(un->un_cmlbhandle, 10550 (void *)SD_PATH_DIRECT); 10551 mutex_enter(SD_MUTEX(un)); 10552 rval = SD_NOT_READY_VALID; 10553 10554 goto done; 10555 } else { 10556 mutex_enter(SD_MUTEX(un)); 10557 sd_update_block_info(un, lbasize, capacity); 10558 } 10559 } 10560 10561 /* 10562 * Check if the media in the device is writable or not. 10563 */ 10564 if (!is_valid && ISCD(un)) { 10565 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT); 10566 } 10567 10568 } else { 10569 /* 10570 * Do a test unit ready to clear any unit attention from non-cd 10571 * devices. 10572 */ 10573 mutex_exit(SD_MUTEX(un)); 10574 10575 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10576 if (status != 0) { 10577 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10578 } 10579 10580 mutex_enter(SD_MUTEX(un)); 10581 } 10582 10583 10584 /* 10585 * If this is a non 512 block device, allocate space for 10586 * the wmap cache. This is being done here since every time 10587 * a media is changed this routine will be called and the 10588 * block size is a function of media rather than device. 10589 */ 10590 if ((un->un_f_rmw_type != SD_RMW_TYPE_RETURN_ERROR || 10591 un->un_f_non_devbsize_supported) && 10592 un->un_tgt_blocksize != DEV_BSIZE) { 10593 if (!(un->un_wm_cache)) { 10594 (void) snprintf(name_str, sizeof (name_str), 10595 "%s%d_cache", 10596 ddi_driver_name(SD_DEVINFO(un)), 10597 ddi_get_instance(SD_DEVINFO(un))); 10598 un->un_wm_cache = kmem_cache_create( 10599 name_str, sizeof (struct sd_w_map), 10600 8, sd_wm_cache_constructor, 10601 sd_wm_cache_destructor, NULL, 10602 (void *)un, NULL, 0); 10603 if (!(un->un_wm_cache)) { 10604 rval = ENOMEM; 10605 goto done; 10606 } 10607 } 10608 } 10609 10610 if (un->un_state == SD_STATE_NORMAL) { 10611 /* 10612 * If the target is not yet ready here (defined by a TUR 10613 * failure), invalidate the geometry and print an 'offline' 10614 * message. This is a legacy message, as the state of the 10615 * target is not actually changed to SD_STATE_OFFLINE. 10616 * 10617 * If the TUR fails for EACCES (Reservation Conflict), 10618 * SD_RESERVED_BY_OTHERS will be returned to indicate 10619 * reservation conflict. If the TUR fails for other 10620 * reasons, SD_NOT_READY_VALID will be returned. 10621 */ 10622 int err; 10623 10624 mutex_exit(SD_MUTEX(un)); 10625 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10626 mutex_enter(SD_MUTEX(un)); 10627 10628 if (err != 0) { 10629 mutex_exit(SD_MUTEX(un)); 10630 cmlb_invalidate(un->un_cmlbhandle, 10631 (void *)SD_PATH_DIRECT); 10632 mutex_enter(SD_MUTEX(un)); 10633 if (err == EACCES) { 10634 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10635 "reservation conflict\n"); 10636 rval = SD_RESERVED_BY_OTHERS; 10637 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10638 } else { 10639 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10640 "drive offline\n"); 10641 rval = SD_NOT_READY_VALID; 10642 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 10643 } 10644 goto done; 10645 } 10646 } 10647 10648 if (un->un_f_format_in_progress == FALSE) { 10649 mutex_exit(SD_MUTEX(un)); 10650 10651 (void) cmlb_validate(un->un_cmlbhandle, 0, 10652 (void *)SD_PATH_DIRECT); 10653 if (cmlb_partinfo(un->un_cmlbhandle, part, NULL, NULL, NULL, 10654 NULL, (void *) SD_PATH_DIRECT) != 0) { 10655 rval = SD_NOT_READY_VALID; 10656 mutex_enter(SD_MUTEX(un)); 10657 10658 goto done; 10659 } 10660 if (un->un_f_pkstats_enabled) { 10661 sd_set_pstats(un); 10662 SD_TRACE(SD_LOG_IO_PARTITION, un, 10663 "sd_ready_and_valid: un:0x%p pstats created and " 10664 "set\n", un); 10665 } 10666 mutex_enter(SD_MUTEX(un)); 10667 } 10668 10669 /* 10670 * If this device supports DOOR_LOCK command, try and send 10671 * this command to PREVENT MEDIA REMOVAL, but don't get upset 10672 * if it fails. For a CD, however, it is an error 10673 */ 10674 if (un->un_f_doorlock_supported) { 10675 mutex_exit(SD_MUTEX(un)); 10676 status = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 10677 SD_PATH_DIRECT); 10678 10679 if ((status != 0) && ISCD(un)) { 10680 rval = SD_NOT_READY_VALID; 10681 mutex_enter(SD_MUTEX(un)); 10682 10683 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10684 10685 goto done; 10686 } else if (status != 0) 10687 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10688 mutex_enter(SD_MUTEX(un)); 10689 } 10690 10691 /* The state has changed, inform the media watch routines */ 10692 un->un_mediastate = DKIO_INSERTED; 10693 cv_broadcast(&un->un_state_cv); 10694 rval = SD_READY_VALID; 10695 10696 done: 10697 10698 /* 10699 * Initialize the capacity kstat value, if no media previously 10700 * (capacity kstat is 0) and a media has been inserted 10701 * (un_blockcount > 0). 10702 */ 10703 if (un->un_errstats != NULL) { 10704 stp = (struct sd_errstats *)un->un_errstats->ks_data; 10705 if ((stp->sd_capacity.value.ui64 == 0) && 10706 (un->un_f_blockcount_is_valid == TRUE)) { 10707 stp->sd_capacity.value.ui64 = 10708 (uint64_t)((uint64_t)un->un_blockcount * 10709 un->un_sys_blocksize); 10710 } 10711 } 10712 10713 mutex_exit(SD_MUTEX(un)); 10714 return (rval); 10715 } 10716 10717 10718 /* 10719 * Function: sdmin 10720 * 10721 * Description: Routine to limit the size of a data transfer. Used in 10722 * conjunction with physio(9F). 10723 * 10724 * Arguments: bp - pointer to the indicated buf(9S) struct. 10725 * 10726 * Context: Kernel thread context. 10727 */ 10728 10729 static void 10730 sdmin(struct buf *bp) 10731 { 10732 struct sd_lun *un; 10733 int instance; 10734 10735 instance = SDUNIT(bp->b_edev); 10736 10737 un = ddi_get_soft_state(sd_state, instance); 10738 ASSERT(un != NULL); 10739 10740 /* 10741 * We depend on DMA partial or buf breakup to restrict 10742 * IO size if any of them enabled. 10743 */ 10744 if (un->un_partial_dma_supported || 10745 un->un_buf_breakup_supported) { 10746 return; 10747 } 10748 10749 if (bp->b_bcount > un->un_max_xfer_size) { 10750 bp->b_bcount = un->un_max_xfer_size; 10751 } 10752 } 10753 10754 10755 /* 10756 * Function: sdread 10757 * 10758 * Description: Driver's read(9e) entry point function. 10759 * 10760 * Arguments: dev - device number 10761 * uio - structure pointer describing where data is to be stored 10762 * in user's space 10763 * cred_p - user credential pointer 10764 * 10765 * Return Code: ENXIO 10766 * EIO 10767 * EINVAL 10768 * value returned by physio 10769 * 10770 * Context: Kernel thread context. 10771 */ 10772 /* ARGSUSED */ 10773 static int 10774 sdread(dev_t dev, struct uio *uio, cred_t *cred_p) 10775 { 10776 struct sd_lun *un = NULL; 10777 int secmask; 10778 int err = 0; 10779 sd_ssc_t *ssc; 10780 10781 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10782 return (ENXIO); 10783 } 10784 10785 ASSERT(!mutex_owned(SD_MUTEX(un))); 10786 10787 10788 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10789 mutex_enter(SD_MUTEX(un)); 10790 /* 10791 * Because the call to sd_ready_and_valid will issue I/O we 10792 * must wait here if either the device is suspended or 10793 * if it's power level is changing. 10794 */ 10795 while ((un->un_state == SD_STATE_SUSPENDED) || 10796 (un->un_state == SD_STATE_PM_CHANGING)) { 10797 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10798 } 10799 un->un_ncmds_in_driver++; 10800 mutex_exit(SD_MUTEX(un)); 10801 10802 /* Initialize sd_ssc_t for internal uscsi commands */ 10803 ssc = sd_ssc_init(un); 10804 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10805 err = EIO; 10806 } else { 10807 err = 0; 10808 } 10809 sd_ssc_fini(ssc); 10810 10811 mutex_enter(SD_MUTEX(un)); 10812 un->un_ncmds_in_driver--; 10813 ASSERT(un->un_ncmds_in_driver >= 0); 10814 mutex_exit(SD_MUTEX(un)); 10815 if (err != 0) 10816 return (err); 10817 } 10818 10819 /* 10820 * Read requests are restricted to multiples of the system block size. 10821 */ 10822 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR) 10823 secmask = un->un_tgt_blocksize - 1; 10824 else 10825 secmask = DEV_BSIZE - 1; 10826 10827 if (uio->uio_loffset & ((offset_t)(secmask))) { 10828 SD_ERROR(SD_LOG_READ_WRITE, un, 10829 "sdread: file offset not modulo %d\n", 10830 secmask + 1); 10831 err = EINVAL; 10832 } else if (uio->uio_iov->iov_len & (secmask)) { 10833 SD_ERROR(SD_LOG_READ_WRITE, un, 10834 "sdread: transfer length not modulo %d\n", 10835 secmask + 1); 10836 err = EINVAL; 10837 } else { 10838 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio); 10839 } 10840 10841 return (err); 10842 } 10843 10844 10845 /* 10846 * Function: sdwrite 10847 * 10848 * Description: Driver's write(9e) entry point function. 10849 * 10850 * Arguments: dev - device number 10851 * uio - structure pointer describing where data is stored in 10852 * user's space 10853 * cred_p - user credential pointer 10854 * 10855 * Return Code: ENXIO 10856 * EIO 10857 * EINVAL 10858 * value returned by physio 10859 * 10860 * Context: Kernel thread context. 10861 */ 10862 /* ARGSUSED */ 10863 static int 10864 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 10865 { 10866 struct sd_lun *un = NULL; 10867 int secmask; 10868 int err = 0; 10869 sd_ssc_t *ssc; 10870 10871 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10872 return (ENXIO); 10873 } 10874 10875 ASSERT(!mutex_owned(SD_MUTEX(un))); 10876 10877 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10878 mutex_enter(SD_MUTEX(un)); 10879 /* 10880 * Because the call to sd_ready_and_valid will issue I/O we 10881 * must wait here if either the device is suspended or 10882 * if it's power level is changing. 10883 */ 10884 while ((un->un_state == SD_STATE_SUSPENDED) || 10885 (un->un_state == SD_STATE_PM_CHANGING)) { 10886 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10887 } 10888 un->un_ncmds_in_driver++; 10889 mutex_exit(SD_MUTEX(un)); 10890 10891 /* Initialize sd_ssc_t for internal uscsi commands */ 10892 ssc = sd_ssc_init(un); 10893 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10894 err = EIO; 10895 } else { 10896 err = 0; 10897 } 10898 sd_ssc_fini(ssc); 10899 10900 mutex_enter(SD_MUTEX(un)); 10901 un->un_ncmds_in_driver--; 10902 ASSERT(un->un_ncmds_in_driver >= 0); 10903 mutex_exit(SD_MUTEX(un)); 10904 if (err != 0) 10905 return (err); 10906 } 10907 10908 /* 10909 * Write requests are restricted to multiples of the system block size. 10910 */ 10911 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR) 10912 secmask = un->un_tgt_blocksize - 1; 10913 else 10914 secmask = DEV_BSIZE - 1; 10915 10916 if (uio->uio_loffset & ((offset_t)(secmask))) { 10917 SD_ERROR(SD_LOG_READ_WRITE, un, 10918 "sdwrite: file offset not modulo %d\n", 10919 secmask + 1); 10920 err = EINVAL; 10921 } else if (uio->uio_iov->iov_len & (secmask)) { 10922 SD_ERROR(SD_LOG_READ_WRITE, un, 10923 "sdwrite: transfer length not modulo %d\n", 10924 secmask + 1); 10925 err = EINVAL; 10926 } else { 10927 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio); 10928 } 10929 10930 return (err); 10931 } 10932 10933 10934 /* 10935 * Function: sdaread 10936 * 10937 * Description: Driver's aread(9e) entry point function. 10938 * 10939 * Arguments: dev - device number 10940 * aio - structure pointer describing where data is to be stored 10941 * cred_p - user credential pointer 10942 * 10943 * Return Code: ENXIO 10944 * EIO 10945 * EINVAL 10946 * value returned by aphysio 10947 * 10948 * Context: Kernel thread context. 10949 */ 10950 /* ARGSUSED */ 10951 static int 10952 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10953 { 10954 struct sd_lun *un = NULL; 10955 struct uio *uio = aio->aio_uio; 10956 int secmask; 10957 int err = 0; 10958 sd_ssc_t *ssc; 10959 10960 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10961 return (ENXIO); 10962 } 10963 10964 ASSERT(!mutex_owned(SD_MUTEX(un))); 10965 10966 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10967 mutex_enter(SD_MUTEX(un)); 10968 /* 10969 * Because the call to sd_ready_and_valid will issue I/O we 10970 * must wait here if either the device is suspended or 10971 * if it's power level is changing. 10972 */ 10973 while ((un->un_state == SD_STATE_SUSPENDED) || 10974 (un->un_state == SD_STATE_PM_CHANGING)) { 10975 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10976 } 10977 un->un_ncmds_in_driver++; 10978 mutex_exit(SD_MUTEX(un)); 10979 10980 /* Initialize sd_ssc_t for internal uscsi commands */ 10981 ssc = sd_ssc_init(un); 10982 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10983 err = EIO; 10984 } else { 10985 err = 0; 10986 } 10987 sd_ssc_fini(ssc); 10988 10989 mutex_enter(SD_MUTEX(un)); 10990 un->un_ncmds_in_driver--; 10991 ASSERT(un->un_ncmds_in_driver >= 0); 10992 mutex_exit(SD_MUTEX(un)); 10993 if (err != 0) 10994 return (err); 10995 } 10996 10997 /* 10998 * Read requests are restricted to multiples of the system block size. 10999 */ 11000 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR) 11001 secmask = un->un_tgt_blocksize - 1; 11002 else 11003 secmask = DEV_BSIZE - 1; 11004 11005 if (uio->uio_loffset & ((offset_t)(secmask))) { 11006 SD_ERROR(SD_LOG_READ_WRITE, un, 11007 "sdaread: file offset not modulo %d\n", 11008 secmask + 1); 11009 err = EINVAL; 11010 } else if (uio->uio_iov->iov_len & (secmask)) { 11011 SD_ERROR(SD_LOG_READ_WRITE, un, 11012 "sdaread: transfer length not modulo %d\n", 11013 secmask + 1); 11014 err = EINVAL; 11015 } else { 11016 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio); 11017 } 11018 11019 return (err); 11020 } 11021 11022 11023 /* 11024 * Function: sdawrite 11025 * 11026 * Description: Driver's awrite(9e) entry point function. 11027 * 11028 * Arguments: dev - device number 11029 * aio - structure pointer describing where data is stored 11030 * cred_p - user credential pointer 11031 * 11032 * Return Code: ENXIO 11033 * EIO 11034 * EINVAL 11035 * value returned by aphysio 11036 * 11037 * Context: Kernel thread context. 11038 */ 11039 /* ARGSUSED */ 11040 static int 11041 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 11042 { 11043 struct sd_lun *un = NULL; 11044 struct uio *uio = aio->aio_uio; 11045 int secmask; 11046 int err = 0; 11047 sd_ssc_t *ssc; 11048 11049 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 11050 return (ENXIO); 11051 } 11052 11053 ASSERT(!mutex_owned(SD_MUTEX(un))); 11054 11055 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 11056 mutex_enter(SD_MUTEX(un)); 11057 /* 11058 * Because the call to sd_ready_and_valid will issue I/O we 11059 * must wait here if either the device is suspended or 11060 * if it's power level is changing. 11061 */ 11062 while ((un->un_state == SD_STATE_SUSPENDED) || 11063 (un->un_state == SD_STATE_PM_CHANGING)) { 11064 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 11065 } 11066 un->un_ncmds_in_driver++; 11067 mutex_exit(SD_MUTEX(un)); 11068 11069 /* Initialize sd_ssc_t for internal uscsi commands */ 11070 ssc = sd_ssc_init(un); 11071 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 11072 err = EIO; 11073 } else { 11074 err = 0; 11075 } 11076 sd_ssc_fini(ssc); 11077 11078 mutex_enter(SD_MUTEX(un)); 11079 un->un_ncmds_in_driver--; 11080 ASSERT(un->un_ncmds_in_driver >= 0); 11081 mutex_exit(SD_MUTEX(un)); 11082 if (err != 0) 11083 return (err); 11084 } 11085 11086 /* 11087 * Write requests are restricted to multiples of the system block size. 11088 */ 11089 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR) 11090 secmask = un->un_tgt_blocksize - 1; 11091 else 11092 secmask = DEV_BSIZE - 1; 11093 11094 if (uio->uio_loffset & ((offset_t)(secmask))) { 11095 SD_ERROR(SD_LOG_READ_WRITE, un, 11096 "sdawrite: file offset not modulo %d\n", 11097 secmask + 1); 11098 err = EINVAL; 11099 } else if (uio->uio_iov->iov_len & (secmask)) { 11100 SD_ERROR(SD_LOG_READ_WRITE, un, 11101 "sdawrite: transfer length not modulo %d\n", 11102 secmask + 1); 11103 err = EINVAL; 11104 } else { 11105 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio); 11106 } 11107 11108 return (err); 11109 } 11110 11111 11112 11113 11114 11115 /* 11116 * Driver IO processing follows the following sequence: 11117 * 11118 * sdioctl(9E) sdstrategy(9E) biodone(9F) 11119 * | | ^ 11120 * v v | 11121 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+ 11122 * | | | | 11123 * v | | | 11124 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone() 11125 * | | ^ ^ 11126 * v v | | 11127 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | | 11128 * | | | | 11129 * +---+ | +------------+ +-------+ 11130 * | | | | 11131 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 11132 * | v | | 11133 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() | 11134 * | | ^ | 11135 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 11136 * | v | | 11137 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() | 11138 * | | ^ | 11139 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 11140 * | v | | 11141 * | sd_checksum_iostart() sd_checksum_iodone() | 11142 * | | ^ | 11143 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+ 11144 * | v | | 11145 * | sd_pm_iostart() sd_pm_iodone() | 11146 * | | ^ | 11147 * | | | | 11148 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+ 11149 * | ^ 11150 * v | 11151 * sd_core_iostart() | 11152 * | | 11153 * | +------>(*destroypkt)() 11154 * +-> sd_start_cmds() <-+ | | 11155 * | | | v 11156 * | | | scsi_destroy_pkt(9F) 11157 * | | | 11158 * +->(*initpkt)() +- sdintr() 11159 * | | | | 11160 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx() 11161 * | +-> scsi_setup_cdb(9F) | 11162 * | | 11163 * +--> scsi_transport(9F) | 11164 * | | 11165 * +----> SCSA ---->+ 11166 * 11167 * 11168 * This code is based upon the following presumptions: 11169 * 11170 * - iostart and iodone functions operate on buf(9S) structures. These 11171 * functions perform the necessary operations on the buf(9S) and pass 11172 * them along to the next function in the chain by using the macros 11173 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE() 11174 * (for iodone side functions). 11175 * 11176 * - The iostart side functions may sleep. The iodone side functions 11177 * are called under interrupt context and may NOT sleep. Therefore 11178 * iodone side functions also may not call iostart side functions. 11179 * (NOTE: iostart side functions should NOT sleep for memory, as 11180 * this could result in deadlock.) 11181 * 11182 * - An iostart side function may call its corresponding iodone side 11183 * function directly (if necessary). 11184 * 11185 * - In the event of an error, an iostart side function can return a buf(9S) 11186 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and 11187 * b_error in the usual way of course). 11188 * 11189 * - The taskq mechanism may be used by the iodone side functions to dispatch 11190 * requests to the iostart side functions. The iostart side functions in 11191 * this case would be called under the context of a taskq thread, so it's 11192 * OK for them to block/sleep/spin in this case. 11193 * 11194 * - iostart side functions may allocate "shadow" buf(9S) structs and 11195 * pass them along to the next function in the chain. The corresponding 11196 * iodone side functions must coalesce the "shadow" bufs and return 11197 * the "original" buf to the next higher layer. 11198 * 11199 * - The b_private field of the buf(9S) struct holds a pointer to 11200 * an sd_xbuf struct, which contains information needed to 11201 * construct the scsi_pkt for the command. 11202 * 11203 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each 11204 * layer must acquire & release the SD_MUTEX(un) as needed. 11205 */ 11206 11207 11208 /* 11209 * Create taskq for all targets in the system. This is created at 11210 * _init(9E) and destroyed at _fini(9E). 11211 * 11212 * Note: here we set the minalloc to a reasonably high number to ensure that 11213 * we will have an adequate supply of task entries available at interrupt time. 11214 * This is used in conjunction with the TASKQ_PREPOPULATE flag in 11215 * sd_create_taskq(). Since we do not want to sleep for allocations at 11216 * interrupt time, set maxalloc equal to minalloc. That way we will just fail 11217 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq 11218 * requests any one instant in time. 11219 */ 11220 #define SD_TASKQ_NUMTHREADS 8 11221 #define SD_TASKQ_MINALLOC 256 11222 #define SD_TASKQ_MAXALLOC 256 11223 11224 static taskq_t *sd_tq = NULL; 11225 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq)) 11226 11227 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; 11228 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; 11229 11230 /* 11231 * The following task queue is being created for the write part of 11232 * read-modify-write of non-512 block size devices. 11233 * Limit the number of threads to 1 for now. This number has been chosen 11234 * considering the fact that it applies only to dvd ram drives/MO drives 11235 * currently. Performance for which is not main criteria at this stage. 11236 * Note: It needs to be explored if we can use a single taskq in future 11237 */ 11238 #define SD_WMR_TASKQ_NUMTHREADS 1 11239 static taskq_t *sd_wmr_tq = NULL; 11240 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq)) 11241 11242 /* 11243 * Function: sd_taskq_create 11244 * 11245 * Description: Create taskq thread(s) and preallocate task entries 11246 * 11247 * Return Code: Returns a pointer to the allocated taskq_t. 11248 * 11249 * Context: Can sleep. Requires blockable context. 11250 * 11251 * Notes: - The taskq() facility currently is NOT part of the DDI. 11252 * (definitely NOT recommeded for 3rd-party drivers!) :-) 11253 * - taskq_create() will block for memory, also it will panic 11254 * if it cannot create the requested number of threads. 11255 * - Currently taskq_create() creates threads that cannot be 11256 * swapped. 11257 * - We use TASKQ_PREPOPULATE to ensure we have an adequate 11258 * supply of taskq entries at interrupt time (ie, so that we 11259 * do not have to sleep for memory) 11260 */ 11261 11262 static void 11263 sd_taskq_create(void) 11264 { 11265 char taskq_name[TASKQ_NAMELEN]; 11266 11267 ASSERT(sd_tq == NULL); 11268 ASSERT(sd_wmr_tq == NULL); 11269 11270 (void) snprintf(taskq_name, sizeof (taskq_name), 11271 "%s_drv_taskq", sd_label); 11272 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS, 11273 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 11274 TASKQ_PREPOPULATE)); 11275 11276 (void) snprintf(taskq_name, sizeof (taskq_name), 11277 "%s_rmw_taskq", sd_label); 11278 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS, 11279 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 11280 TASKQ_PREPOPULATE)); 11281 } 11282 11283 11284 /* 11285 * Function: sd_taskq_delete 11286 * 11287 * Description: Complementary cleanup routine for sd_taskq_create(). 11288 * 11289 * Context: Kernel thread context. 11290 */ 11291 11292 static void 11293 sd_taskq_delete(void) 11294 { 11295 ASSERT(sd_tq != NULL); 11296 ASSERT(sd_wmr_tq != NULL); 11297 taskq_destroy(sd_tq); 11298 taskq_destroy(sd_wmr_tq); 11299 sd_tq = NULL; 11300 sd_wmr_tq = NULL; 11301 } 11302 11303 11304 /* 11305 * Function: sdstrategy 11306 * 11307 * Description: Driver's strategy (9E) entry point function. 11308 * 11309 * Arguments: bp - pointer to buf(9S) 11310 * 11311 * Return Code: Always returns zero 11312 * 11313 * Context: Kernel thread context. 11314 */ 11315 11316 static int 11317 sdstrategy(struct buf *bp) 11318 { 11319 struct sd_lun *un; 11320 11321 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 11322 if (un == NULL) { 11323 bioerror(bp, EIO); 11324 bp->b_resid = bp->b_bcount; 11325 biodone(bp); 11326 return (0); 11327 } 11328 11329 /* As was done in the past, fail new cmds. if state is dumping. */ 11330 if (un->un_state == SD_STATE_DUMPING) { 11331 bioerror(bp, ENXIO); 11332 bp->b_resid = bp->b_bcount; 11333 biodone(bp); 11334 return (0); 11335 } 11336 11337 ASSERT(!mutex_owned(SD_MUTEX(un))); 11338 11339 /* 11340 * Commands may sneak in while we released the mutex in 11341 * DDI_SUSPEND, we should block new commands. However, old 11342 * commands that are still in the driver at this point should 11343 * still be allowed to drain. 11344 */ 11345 mutex_enter(SD_MUTEX(un)); 11346 /* 11347 * Must wait here if either the device is suspended or 11348 * if it's power level is changing. 11349 */ 11350 while ((un->un_state == SD_STATE_SUSPENDED) || 11351 (un->un_state == SD_STATE_PM_CHANGING)) { 11352 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 11353 } 11354 11355 un->un_ncmds_in_driver++; 11356 11357 /* 11358 * atapi: Since we are running the CD for now in PIO mode we need to 11359 * call bp_mapin here to avoid bp_mapin called interrupt context under 11360 * the HBA's init_pkt routine. 11361 */ 11362 if (un->un_f_cfg_is_atapi == TRUE) { 11363 mutex_exit(SD_MUTEX(un)); 11364 bp_mapin(bp); 11365 mutex_enter(SD_MUTEX(un)); 11366 } 11367 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n", 11368 un->un_ncmds_in_driver); 11369 11370 if (bp->b_flags & B_WRITE) 11371 un->un_f_sync_cache_required = TRUE; 11372 11373 mutex_exit(SD_MUTEX(un)); 11374 11375 /* 11376 * This will (eventually) allocate the sd_xbuf area and 11377 * call sd_xbuf_strategy(). We just want to return the 11378 * result of ddi_xbuf_qstrategy so that we have an opt- 11379 * imized tail call which saves us a stack frame. 11380 */ 11381 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); 11382 } 11383 11384 11385 /* 11386 * Function: sd_xbuf_strategy 11387 * 11388 * Description: Function for initiating IO operations via the 11389 * ddi_xbuf_qstrategy() mechanism. 11390 * 11391 * Context: Kernel thread context. 11392 */ 11393 11394 static void 11395 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg) 11396 { 11397 struct sd_lun *un = arg; 11398 11399 ASSERT(bp != NULL); 11400 ASSERT(xp != NULL); 11401 ASSERT(un != NULL); 11402 ASSERT(!mutex_owned(SD_MUTEX(un))); 11403 11404 /* 11405 * Initialize the fields in the xbuf and save a pointer to the 11406 * xbuf in bp->b_private. 11407 */ 11408 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL); 11409 11410 /* Send the buf down the iostart chain */ 11411 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp); 11412 } 11413 11414 11415 /* 11416 * Function: sd_xbuf_init 11417 * 11418 * Description: Prepare the given sd_xbuf struct for use. 11419 * 11420 * Arguments: un - ptr to softstate 11421 * bp - ptr to associated buf(9S) 11422 * xp - ptr to associated sd_xbuf 11423 * chain_type - IO chain type to use: 11424 * SD_CHAIN_NULL 11425 * SD_CHAIN_BUFIO 11426 * SD_CHAIN_USCSI 11427 * SD_CHAIN_DIRECT 11428 * SD_CHAIN_DIRECT_PRIORITY 11429 * pktinfop - ptr to private data struct for scsi_pkt(9S) 11430 * initialization; may be NULL if none. 11431 * 11432 * Context: Kernel thread context 11433 */ 11434 11435 static void 11436 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 11437 uchar_t chain_type, void *pktinfop) 11438 { 11439 int index; 11440 11441 ASSERT(un != NULL); 11442 ASSERT(bp != NULL); 11443 ASSERT(xp != NULL); 11444 11445 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n", 11446 bp, chain_type); 11447 11448 xp->xb_un = un; 11449 xp->xb_pktp = NULL; 11450 xp->xb_pktinfo = pktinfop; 11451 xp->xb_private = bp->b_private; 11452 xp->xb_blkno = (daddr_t)bp->b_blkno; 11453 11454 /* 11455 * Set up the iostart and iodone chain indexes in the xbuf, based 11456 * upon the specified chain type to use. 11457 */ 11458 switch (chain_type) { 11459 case SD_CHAIN_NULL: 11460 /* 11461 * Fall thru to just use the values for the buf type, even 11462 * tho for the NULL chain these values will never be used. 11463 */ 11464 /* FALLTHRU */ 11465 case SD_CHAIN_BUFIO: 11466 index = un->un_buf_chain_type; 11467 if ((!un->un_f_has_removable_media) && 11468 (un->un_tgt_blocksize != 0) && 11469 (un->un_tgt_blocksize != DEV_BSIZE)) { 11470 int secmask = 0, blknomask = 0; 11471 blknomask = 11472 (un->un_tgt_blocksize / DEV_BSIZE) - 1; 11473 secmask = un->un_tgt_blocksize - 1; 11474 11475 if ((bp->b_lblkno & (blknomask)) || 11476 (bp->b_bcount & (secmask))) { 11477 if (un->un_f_rmw_type != 11478 SD_RMW_TYPE_RETURN_ERROR) { 11479 if (un->un_f_pm_is_enabled == FALSE) 11480 index = 11481 SD_CHAIN_INFO_MSS_DSK_NO_PM; 11482 else 11483 index = 11484 SD_CHAIN_INFO_MSS_DISK; 11485 } 11486 } 11487 } 11488 break; 11489 case SD_CHAIN_USCSI: 11490 index = un->un_uscsi_chain_type; 11491 break; 11492 case SD_CHAIN_DIRECT: 11493 index = un->un_direct_chain_type; 11494 break; 11495 case SD_CHAIN_DIRECT_PRIORITY: 11496 index = un->un_priority_chain_type; 11497 break; 11498 default: 11499 /* We're really broken if we ever get here... */ 11500 panic("sd_xbuf_init: illegal chain type!"); 11501 /*NOTREACHED*/ 11502 } 11503 11504 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index; 11505 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index; 11506 11507 /* 11508 * It might be a bit easier to simply bzero the entire xbuf above, 11509 * but it turns out that since we init a fair number of members anyway, 11510 * we save a fair number cycles by doing explicit assignment of zero. 11511 */ 11512 xp->xb_pkt_flags = 0; 11513 xp->xb_dma_resid = 0; 11514 xp->xb_retry_count = 0; 11515 xp->xb_victim_retry_count = 0; 11516 xp->xb_ua_retry_count = 0; 11517 xp->xb_nr_retry_count = 0; 11518 xp->xb_sense_bp = NULL; 11519 xp->xb_sense_status = 0; 11520 xp->xb_sense_state = 0; 11521 xp->xb_sense_resid = 0; 11522 xp->xb_ena = 0; 11523 11524 bp->b_private = xp; 11525 bp->b_flags &= ~(B_DONE | B_ERROR); 11526 bp->b_resid = 0; 11527 bp->av_forw = NULL; 11528 bp->av_back = NULL; 11529 bioerror(bp, 0); 11530 11531 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n"); 11532 } 11533 11534 11535 /* 11536 * Function: sd_uscsi_strategy 11537 * 11538 * Description: Wrapper for calling into the USCSI chain via physio(9F) 11539 * 11540 * Arguments: bp - buf struct ptr 11541 * 11542 * Return Code: Always returns 0 11543 * 11544 * Context: Kernel thread context 11545 */ 11546 11547 static int 11548 sd_uscsi_strategy(struct buf *bp) 11549 { 11550 struct sd_lun *un; 11551 struct sd_uscsi_info *uip; 11552 struct sd_xbuf *xp; 11553 uchar_t chain_type; 11554 uchar_t cmd; 11555 11556 ASSERT(bp != NULL); 11557 11558 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 11559 if (un == NULL) { 11560 bioerror(bp, EIO); 11561 bp->b_resid = bp->b_bcount; 11562 biodone(bp); 11563 return (0); 11564 } 11565 11566 ASSERT(!mutex_owned(SD_MUTEX(un))); 11567 11568 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp); 11569 11570 /* 11571 * A pointer to a struct sd_uscsi_info is expected in bp->b_private 11572 */ 11573 ASSERT(bp->b_private != NULL); 11574 uip = (struct sd_uscsi_info *)bp->b_private; 11575 cmd = ((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_cdb[0]; 11576 11577 mutex_enter(SD_MUTEX(un)); 11578 /* 11579 * atapi: Since we are running the CD for now in PIO mode we need to 11580 * call bp_mapin here to avoid bp_mapin called interrupt context under 11581 * the HBA's init_pkt routine. 11582 */ 11583 if (un->un_f_cfg_is_atapi == TRUE) { 11584 mutex_exit(SD_MUTEX(un)); 11585 bp_mapin(bp); 11586 mutex_enter(SD_MUTEX(un)); 11587 } 11588 un->un_ncmds_in_driver++; 11589 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n", 11590 un->un_ncmds_in_driver); 11591 11592 if ((bp->b_flags & B_WRITE) && (bp->b_bcount != 0) && 11593 (cmd != SCMD_MODE_SELECT) && (cmd != SCMD_MODE_SELECT_G1)) 11594 un->un_f_sync_cache_required = TRUE; 11595 11596 mutex_exit(SD_MUTEX(un)); 11597 11598 switch (uip->ui_flags) { 11599 case SD_PATH_DIRECT: 11600 chain_type = SD_CHAIN_DIRECT; 11601 break; 11602 case SD_PATH_DIRECT_PRIORITY: 11603 chain_type = SD_CHAIN_DIRECT_PRIORITY; 11604 break; 11605 default: 11606 chain_type = SD_CHAIN_USCSI; 11607 break; 11608 } 11609 11610 /* 11611 * We may allocate extra buf for external USCSI commands. If the 11612 * application asks for bigger than 20-byte sense data via USCSI, 11613 * SCSA layer will allocate 252 bytes sense buf for that command. 11614 */ 11615 if (((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_rqlen > 11616 SENSE_LENGTH) { 11617 xp = kmem_zalloc(sizeof (struct sd_xbuf) - SENSE_LENGTH + 11618 MAX_SENSE_LENGTH, KM_SLEEP); 11619 } else { 11620 xp = kmem_zalloc(sizeof (struct sd_xbuf), KM_SLEEP); 11621 } 11622 11623 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp); 11624 11625 /* Use the index obtained within xbuf_init */ 11626 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp); 11627 11628 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp); 11629 11630 return (0); 11631 } 11632 11633 /* 11634 * Function: sd_send_scsi_cmd 11635 * 11636 * Description: Runs a USCSI command for user (when called thru sdioctl), 11637 * or for the driver 11638 * 11639 * Arguments: dev - the dev_t for the device 11640 * incmd - ptr to a valid uscsi_cmd struct 11641 * flag - bit flag, indicating open settings, 32/64 bit type 11642 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 11643 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 11644 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 11645 * to use the USCSI "direct" chain and bypass the normal 11646 * command waitq. 11647 * 11648 * Return Code: 0 - successful completion of the given command 11649 * EIO - scsi_uscsi_handle_command() failed 11650 * ENXIO - soft state not found for specified dev 11651 * EINVAL 11652 * EFAULT - copyin/copyout error 11653 * return code of scsi_uscsi_handle_command(): 11654 * EIO 11655 * ENXIO 11656 * EACCES 11657 * 11658 * Context: Waits for command to complete. Can sleep. 11659 */ 11660 11661 static int 11662 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 11663 enum uio_seg dataspace, int path_flag) 11664 { 11665 struct sd_lun *un; 11666 sd_ssc_t *ssc; 11667 int rval; 11668 11669 un = ddi_get_soft_state(sd_state, SDUNIT(dev)); 11670 if (un == NULL) { 11671 return (ENXIO); 11672 } 11673 11674 /* 11675 * Using sd_ssc_send to handle uscsi cmd 11676 */ 11677 ssc = sd_ssc_init(un); 11678 rval = sd_ssc_send(ssc, incmd, flag, dataspace, path_flag); 11679 sd_ssc_fini(ssc); 11680 11681 return (rval); 11682 } 11683 11684 /* 11685 * Function: sd_ssc_init 11686 * 11687 * Description: Uscsi end-user call this function to initialize necessary 11688 * fields, such as uscsi_cmd and sd_uscsi_info struct. 11689 * 11690 * The return value of sd_send_scsi_cmd will be treated as a 11691 * fault in various conditions. Even it is not Zero, some 11692 * callers may ignore the return value. That is to say, we can 11693 * not make an accurate assessment in sdintr, since if a 11694 * command is failed in sdintr it does not mean the caller of 11695 * sd_send_scsi_cmd will treat it as a real failure. 11696 * 11697 * To avoid printing too many error logs for a failed uscsi 11698 * packet that the caller may not treat it as a failure, the 11699 * sd will keep silent for handling all uscsi commands. 11700 * 11701 * During detach->attach and attach-open, for some types of 11702 * problems, the driver should be providing information about 11703 * the problem encountered. Device use USCSI_SILENT, which 11704 * suppresses all driver information. The result is that no 11705 * information about the problem is available. Being 11706 * completely silent during this time is inappropriate. The 11707 * driver needs a more selective filter than USCSI_SILENT, so 11708 * that information related to faults is provided. 11709 * 11710 * To make the accurate accessment, the caller of 11711 * sd_send_scsi_USCSI_CMD should take the ownership and 11712 * get necessary information to print error messages. 11713 * 11714 * If we want to print necessary info of uscsi command, we need to 11715 * keep the uscsi_cmd and sd_uscsi_info till we can make the 11716 * assessment. We use sd_ssc_init to alloc necessary 11717 * structs for sending an uscsi command and we are also 11718 * responsible for free the memory by calling 11719 * sd_ssc_fini. 11720 * 11721 * The calling secquences will look like: 11722 * sd_ssc_init-> 11723 * 11724 * ... 11725 * 11726 * sd_send_scsi_USCSI_CMD-> 11727 * sd_ssc_send-> - - - sdintr 11728 * ... 11729 * 11730 * if we think the return value should be treated as a 11731 * failure, we make the accessment here and print out 11732 * necessary by retrieving uscsi_cmd and sd_uscsi_info' 11733 * 11734 * ... 11735 * 11736 * sd_ssc_fini 11737 * 11738 * 11739 * Arguments: un - pointer to driver soft state (unit) structure for this 11740 * target. 11741 * 11742 * Return code: sd_ssc_t - pointer to allocated sd_ssc_t struct, it contains 11743 * uscsi_cmd and sd_uscsi_info. 11744 * NULL - if can not alloc memory for sd_ssc_t struct 11745 * 11746 * Context: Kernel Thread. 11747 */ 11748 static sd_ssc_t * 11749 sd_ssc_init(struct sd_lun *un) 11750 { 11751 sd_ssc_t *ssc; 11752 struct uscsi_cmd *ucmdp; 11753 struct sd_uscsi_info *uip; 11754 11755 ASSERT(un != NULL); 11756 ASSERT(!mutex_owned(SD_MUTEX(un))); 11757 11758 /* 11759 * Allocate sd_ssc_t structure 11760 */ 11761 ssc = kmem_zalloc(sizeof (sd_ssc_t), KM_SLEEP); 11762 11763 /* 11764 * Allocate uscsi_cmd by calling scsi_uscsi_alloc common routine 11765 */ 11766 ucmdp = scsi_uscsi_alloc(); 11767 11768 /* 11769 * Allocate sd_uscsi_info structure 11770 */ 11771 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 11772 11773 ssc->ssc_uscsi_cmd = ucmdp; 11774 ssc->ssc_uscsi_info = uip; 11775 ssc->ssc_un = un; 11776 11777 return (ssc); 11778 } 11779 11780 /* 11781 * Function: sd_ssc_fini 11782 * 11783 * Description: To free sd_ssc_t and it's hanging off 11784 * 11785 * Arguments: ssc - struct pointer of sd_ssc_t. 11786 */ 11787 static void 11788 sd_ssc_fini(sd_ssc_t *ssc) 11789 { 11790 scsi_uscsi_free(ssc->ssc_uscsi_cmd); 11791 11792 if (ssc->ssc_uscsi_info != NULL) { 11793 kmem_free(ssc->ssc_uscsi_info, sizeof (struct sd_uscsi_info)); 11794 ssc->ssc_uscsi_info = NULL; 11795 } 11796 11797 kmem_free(ssc, sizeof (sd_ssc_t)); 11798 ssc = NULL; 11799 } 11800 11801 /* 11802 * Function: sd_ssc_send 11803 * 11804 * Description: Runs a USCSI command for user when called through sdioctl, 11805 * or for the driver. 11806 * 11807 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11808 * sd_uscsi_info in. 11809 * incmd - ptr to a valid uscsi_cmd struct 11810 * flag - bit flag, indicating open settings, 32/64 bit type 11811 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 11812 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 11813 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 11814 * to use the USCSI "direct" chain and bypass the normal 11815 * command waitq. 11816 * 11817 * Return Code: 0 - successful completion of the given command 11818 * EIO - scsi_uscsi_handle_command() failed 11819 * ENXIO - soft state not found for specified dev 11820 * ECANCELED - command cancelled due to low power 11821 * EINVAL 11822 * EFAULT - copyin/copyout error 11823 * return code of scsi_uscsi_handle_command(): 11824 * EIO 11825 * ENXIO 11826 * EACCES 11827 * 11828 * Context: Kernel Thread; 11829 * Waits for command to complete. Can sleep. 11830 */ 11831 static int 11832 sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, int flag, 11833 enum uio_seg dataspace, int path_flag) 11834 { 11835 struct sd_uscsi_info *uip; 11836 struct uscsi_cmd *uscmd; 11837 struct sd_lun *un; 11838 dev_t dev; 11839 11840 int format = 0; 11841 int rval; 11842 11843 ASSERT(ssc != NULL); 11844 un = ssc->ssc_un; 11845 ASSERT(un != NULL); 11846 uscmd = ssc->ssc_uscsi_cmd; 11847 ASSERT(uscmd != NULL); 11848 ASSERT(!mutex_owned(SD_MUTEX(un))); 11849 if (ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT) { 11850 /* 11851 * If enter here, it indicates that the previous uscsi 11852 * command has not been processed by sd_ssc_assessment. 11853 * This is violating our rules of FMA telemetry processing. 11854 * We should print out this message and the last undisposed 11855 * uscsi command. 11856 */ 11857 if (uscmd->uscsi_cdb != NULL) { 11858 SD_INFO(SD_LOG_SDTEST, un, 11859 "sd_ssc_send is missing the alternative " 11860 "sd_ssc_assessment when running command 0x%x.\n", 11861 uscmd->uscsi_cdb[0]); 11862 } 11863 /* 11864 * Set the ssc_flags to SSC_FLAGS_UNKNOWN, which should be 11865 * the initial status. 11866 */ 11867 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 11868 } 11869 11870 /* 11871 * We need to make sure sd_ssc_send will have sd_ssc_assessment 11872 * followed to avoid missing FMA telemetries. 11873 */ 11874 ssc->ssc_flags |= SSC_FLAGS_NEED_ASSESSMENT; 11875 11876 /* 11877 * if USCSI_PMFAILFAST is set and un is in low power, fail the 11878 * command immediately. 11879 */ 11880 mutex_enter(SD_MUTEX(un)); 11881 mutex_enter(&un->un_pm_mutex); 11882 if ((uscmd->uscsi_flags & USCSI_PMFAILFAST) && 11883 SD_DEVICE_IS_IN_LOW_POWER(un)) { 11884 SD_TRACE(SD_LOG_IO, un, "sd_ssc_send:" 11885 "un:0x%p is in low power\n", un); 11886 mutex_exit(&un->un_pm_mutex); 11887 mutex_exit(SD_MUTEX(un)); 11888 return (ECANCELED); 11889 } 11890 mutex_exit(&un->un_pm_mutex); 11891 mutex_exit(SD_MUTEX(un)); 11892 11893 #ifdef SDDEBUG 11894 switch (dataspace) { 11895 case UIO_USERSPACE: 11896 SD_TRACE(SD_LOG_IO, un, 11897 "sd_ssc_send: entry: un:0x%p UIO_USERSPACE\n", un); 11898 break; 11899 case UIO_SYSSPACE: 11900 SD_TRACE(SD_LOG_IO, un, 11901 "sd_ssc_send: entry: un:0x%p UIO_SYSSPACE\n", un); 11902 break; 11903 default: 11904 SD_TRACE(SD_LOG_IO, un, 11905 "sd_ssc_send: entry: un:0x%p UNEXPECTED SPACE\n", un); 11906 break; 11907 } 11908 #endif 11909 11910 rval = scsi_uscsi_copyin((intptr_t)incmd, flag, 11911 SD_ADDRESS(un), &uscmd); 11912 if (rval != 0) { 11913 SD_TRACE(SD_LOG_IO, un, "sd_sense_scsi_cmd: " 11914 "scsi_uscsi_alloc_and_copyin failed\n", un); 11915 return (rval); 11916 } 11917 11918 if ((uscmd->uscsi_cdb != NULL) && 11919 (uscmd->uscsi_cdb[0] == SCMD_FORMAT)) { 11920 mutex_enter(SD_MUTEX(un)); 11921 un->un_f_format_in_progress = TRUE; 11922 mutex_exit(SD_MUTEX(un)); 11923 format = 1; 11924 } 11925 11926 /* 11927 * Allocate an sd_uscsi_info struct and fill it with the info 11928 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 11929 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 11930 * since we allocate the buf here in this function, we do not 11931 * need to preserve the prior contents of b_private. 11932 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 11933 */ 11934 uip = ssc->ssc_uscsi_info; 11935 uip->ui_flags = path_flag; 11936 uip->ui_cmdp = uscmd; 11937 11938 /* 11939 * Commands sent with priority are intended for error recovery 11940 * situations, and do not have retries performed. 11941 */ 11942 if (path_flag == SD_PATH_DIRECT_PRIORITY) { 11943 uscmd->uscsi_flags |= USCSI_DIAGNOSE; 11944 } 11945 uscmd->uscsi_flags &= ~USCSI_NOINTR; 11946 11947 dev = SD_GET_DEV(un); 11948 rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd, 11949 sd_uscsi_strategy, NULL, uip); 11950 11951 /* 11952 * mark ssc_flags right after handle_cmd to make sure 11953 * the uscsi has been sent 11954 */ 11955 ssc->ssc_flags |= SSC_FLAGS_CMD_ISSUED; 11956 11957 #ifdef SDDEBUG 11958 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: " 11959 "uscsi_status: 0x%02x uscsi_resid:0x%x\n", 11960 uscmd->uscsi_status, uscmd->uscsi_resid); 11961 if (uscmd->uscsi_bufaddr != NULL) { 11962 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: " 11963 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n", 11964 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen); 11965 if (dataspace == UIO_SYSSPACE) { 11966 SD_DUMP_MEMORY(un, SD_LOG_IO, 11967 "data", (uchar_t *)uscmd->uscsi_bufaddr, 11968 uscmd->uscsi_buflen, SD_LOG_HEX); 11969 } 11970 } 11971 #endif 11972 11973 if (format == 1) { 11974 mutex_enter(SD_MUTEX(un)); 11975 un->un_f_format_in_progress = FALSE; 11976 mutex_exit(SD_MUTEX(un)); 11977 } 11978 11979 (void) scsi_uscsi_copyout((intptr_t)incmd, uscmd); 11980 11981 return (rval); 11982 } 11983 11984 /* 11985 * Function: sd_ssc_print 11986 * 11987 * Description: Print information available to the console. 11988 * 11989 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11990 * sd_uscsi_info in. 11991 * sd_severity - log level. 11992 * Context: Kernel thread or interrupt context. 11993 */ 11994 static void 11995 sd_ssc_print(sd_ssc_t *ssc, int sd_severity) 11996 { 11997 struct uscsi_cmd *ucmdp; 11998 struct scsi_device *devp; 11999 dev_info_t *devinfo; 12000 uchar_t *sensep; 12001 int senlen; 12002 union scsi_cdb *cdbp; 12003 uchar_t com; 12004 extern struct scsi_key_strings scsi_cmds[]; 12005 12006 ASSERT(ssc != NULL); 12007 ASSERT(ssc->ssc_un != NULL); 12008 12009 if (SD_FM_LOG(ssc->ssc_un) != SD_FM_LOG_EREPORT) 12010 return; 12011 ucmdp = ssc->ssc_uscsi_cmd; 12012 devp = SD_SCSI_DEVP(ssc->ssc_un); 12013 devinfo = SD_DEVINFO(ssc->ssc_un); 12014 ASSERT(ucmdp != NULL); 12015 ASSERT(devp != NULL); 12016 ASSERT(devinfo != NULL); 12017 sensep = (uint8_t *)ucmdp->uscsi_rqbuf; 12018 senlen = ucmdp->uscsi_rqlen - ucmdp->uscsi_rqresid; 12019 cdbp = (union scsi_cdb *)ucmdp->uscsi_cdb; 12020 12021 /* In certain case (like DOORLOCK), the cdb could be NULL. */ 12022 if (cdbp == NULL) 12023 return; 12024 /* We don't print log if no sense data available. */ 12025 if (senlen == 0) 12026 sensep = NULL; 12027 com = cdbp->scc_cmd; 12028 scsi_generic_errmsg(devp, sd_label, sd_severity, 0, 0, com, 12029 scsi_cmds, sensep, ssc->ssc_un->un_additional_codes, NULL); 12030 } 12031 12032 /* 12033 * Function: sd_ssc_assessment 12034 * 12035 * Description: We use this function to make an assessment at the point 12036 * where SD driver may encounter a potential error. 12037 * 12038 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 12039 * sd_uscsi_info in. 12040 * tp_assess - a hint of strategy for ereport posting. 12041 * Possible values of tp_assess include: 12042 * SD_FMT_IGNORE - we don't post any ereport because we're 12043 * sure that it is ok to ignore the underlying problems. 12044 * SD_FMT_IGNORE_COMPROMISE - we don't post any ereport for now 12045 * but it might be not correct to ignore the underlying hardware 12046 * error. 12047 * SD_FMT_STATUS_CHECK - we will post an ereport with the 12048 * payload driver-assessment of value "fail" or 12049 * "fatal"(depending on what information we have here). This 12050 * assessment value is usually set when SD driver think there 12051 * is a potential error occurred(Typically, when return value 12052 * of the SCSI command is EIO). 12053 * SD_FMT_STANDARD - we will post an ereport with the payload 12054 * driver-assessment of value "info". This assessment value is 12055 * set when the SCSI command returned successfully and with 12056 * sense data sent back. 12057 * 12058 * Context: Kernel thread. 12059 */ 12060 static void 12061 sd_ssc_assessment(sd_ssc_t *ssc, enum sd_type_assessment tp_assess) 12062 { 12063 int senlen = 0; 12064 struct uscsi_cmd *ucmdp = NULL; 12065 struct sd_lun *un; 12066 12067 ASSERT(ssc != NULL); 12068 un = ssc->ssc_un; 12069 ASSERT(un != NULL); 12070 ucmdp = ssc->ssc_uscsi_cmd; 12071 ASSERT(ucmdp != NULL); 12072 12073 if (ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT) { 12074 ssc->ssc_flags &= ~SSC_FLAGS_NEED_ASSESSMENT; 12075 } else { 12076 /* 12077 * If enter here, it indicates that we have a wrong 12078 * calling sequence of sd_ssc_send and sd_ssc_assessment, 12079 * both of which should be called in a pair in case of 12080 * loss of FMA telemetries. 12081 */ 12082 if (ucmdp->uscsi_cdb != NULL) { 12083 SD_INFO(SD_LOG_SDTEST, un, 12084 "sd_ssc_assessment is missing the " 12085 "alternative sd_ssc_send when running 0x%x, " 12086 "or there are superfluous sd_ssc_assessment for " 12087 "the same sd_ssc_send.\n", 12088 ucmdp->uscsi_cdb[0]); 12089 } 12090 /* 12091 * Set the ssc_flags to the initial value to avoid passing 12092 * down dirty flags to the following sd_ssc_send function. 12093 */ 12094 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12095 return; 12096 } 12097 12098 /* 12099 * Only handle an issued command which is waiting for assessment. 12100 * A command which is not issued will not have 12101 * SSC_FLAGS_INVALID_DATA set, so it'ok we just return here. 12102 */ 12103 if (!(ssc->ssc_flags & SSC_FLAGS_CMD_ISSUED)) { 12104 sd_ssc_print(ssc, SCSI_ERR_INFO); 12105 return; 12106 } else { 12107 /* 12108 * For an issued command, we should clear this flag in 12109 * order to make the sd_ssc_t structure be used off 12110 * multiple uscsi commands. 12111 */ 12112 ssc->ssc_flags &= ~SSC_FLAGS_CMD_ISSUED; 12113 } 12114 12115 /* 12116 * We will not deal with non-retryable(flag USCSI_DIAGNOSE set) 12117 * commands here. And we should clear the ssc_flags before return. 12118 */ 12119 if (ucmdp->uscsi_flags & USCSI_DIAGNOSE) { 12120 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12121 return; 12122 } 12123 12124 switch (tp_assess) { 12125 case SD_FMT_IGNORE: 12126 case SD_FMT_IGNORE_COMPROMISE: 12127 break; 12128 case SD_FMT_STATUS_CHECK: 12129 /* 12130 * For a failed command(including the succeeded command 12131 * with invalid data sent back). 12132 */ 12133 sd_ssc_post(ssc, SD_FM_DRV_FATAL); 12134 break; 12135 case SD_FMT_STANDARD: 12136 /* 12137 * Always for the succeeded commands probably with sense 12138 * data sent back. 12139 * Limitation: 12140 * We can only handle a succeeded command with sense 12141 * data sent back when auto-request-sense is enabled. 12142 */ 12143 senlen = ssc->ssc_uscsi_cmd->uscsi_rqlen - 12144 ssc->ssc_uscsi_cmd->uscsi_rqresid; 12145 if ((ssc->ssc_uscsi_info->ui_pkt_state & STATE_ARQ_DONE) && 12146 (un->un_f_arq_enabled == TRUE) && 12147 senlen > 0 && 12148 ssc->ssc_uscsi_cmd->uscsi_rqbuf != NULL) { 12149 sd_ssc_post(ssc, SD_FM_DRV_NOTICE); 12150 } 12151 break; 12152 default: 12153 /* 12154 * Should not have other type of assessment. 12155 */ 12156 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 12157 "sd_ssc_assessment got wrong " 12158 "sd_type_assessment %d.\n", tp_assess); 12159 break; 12160 } 12161 /* 12162 * Clear up the ssc_flags before return. 12163 */ 12164 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12165 } 12166 12167 /* 12168 * Function: sd_ssc_post 12169 * 12170 * Description: 1. read the driver property to get fm-scsi-log flag. 12171 * 2. print log if fm_log_capable is non-zero. 12172 * 3. call sd_ssc_ereport_post to post ereport if possible. 12173 * 12174 * Context: May be called from kernel thread or interrupt context. 12175 */ 12176 static void 12177 sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess) 12178 { 12179 struct sd_lun *un; 12180 int sd_severity; 12181 12182 ASSERT(ssc != NULL); 12183 un = ssc->ssc_un; 12184 ASSERT(un != NULL); 12185 12186 /* 12187 * We may enter here from sd_ssc_assessment(for USCSI command) or 12188 * by directly called from sdintr context. 12189 * We don't handle a non-disk drive(CD-ROM, removable media). 12190 * Clear the ssc_flags before return in case we've set 12191 * SSC_FLAGS_INVALID_XXX which should be skipped for a non-disk 12192 * driver. 12193 */ 12194 if (ISCD(un) || un->un_f_has_removable_media) { 12195 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12196 return; 12197 } 12198 12199 switch (sd_assess) { 12200 case SD_FM_DRV_FATAL: 12201 sd_severity = SCSI_ERR_FATAL; 12202 break; 12203 case SD_FM_DRV_RECOVERY: 12204 sd_severity = SCSI_ERR_RECOVERED; 12205 break; 12206 case SD_FM_DRV_RETRY: 12207 sd_severity = SCSI_ERR_RETRYABLE; 12208 break; 12209 case SD_FM_DRV_NOTICE: 12210 sd_severity = SCSI_ERR_INFO; 12211 break; 12212 default: 12213 sd_severity = SCSI_ERR_UNKNOWN; 12214 } 12215 /* print log */ 12216 sd_ssc_print(ssc, sd_severity); 12217 12218 /* always post ereport */ 12219 sd_ssc_ereport_post(ssc, sd_assess); 12220 } 12221 12222 /* 12223 * Function: sd_ssc_set_info 12224 * 12225 * Description: Mark ssc_flags and set ssc_info which would be the 12226 * payload of uderr ereport. This function will cause 12227 * sd_ssc_ereport_post to post uderr ereport only. 12228 * Besides, when ssc_flags == SSC_FLAGS_INVALID_DATA(USCSI), 12229 * the function will also call SD_ERROR or scsi_log for a 12230 * CDROM/removable-media/DDI_FM_NOT_CAPABLE device. 12231 * 12232 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 12233 * sd_uscsi_info in. 12234 * ssc_flags - indicate the sub-category of a uderr. 12235 * comp - this argument is meaningful only when 12236 * ssc_flags == SSC_FLAGS_INVALID_DATA, and its possible 12237 * values include: 12238 * > 0, SD_ERROR is used with comp as the driver logging 12239 * component; 12240 * = 0, scsi-log is used to log error telemetries; 12241 * < 0, no log available for this telemetry. 12242 * 12243 * Context: Kernel thread or interrupt context 12244 */ 12245 static void 12246 sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, uint_t comp, const char *fmt, ...) 12247 { 12248 va_list ap; 12249 12250 ASSERT(ssc != NULL); 12251 ASSERT(ssc->ssc_un != NULL); 12252 12253 ssc->ssc_flags |= ssc_flags; 12254 va_start(ap, fmt); 12255 (void) vsnprintf(ssc->ssc_info, sizeof (ssc->ssc_info), fmt, ap); 12256 va_end(ap); 12257 12258 /* 12259 * If SSC_FLAGS_INVALID_DATA is set, it should be a uscsi command 12260 * with invalid data sent back. For non-uscsi command, the 12261 * following code will be bypassed. 12262 */ 12263 if (ssc_flags & SSC_FLAGS_INVALID_DATA) { 12264 if (SD_FM_LOG(ssc->ssc_un) == SD_FM_LOG_NSUP) { 12265 /* 12266 * If the error belong to certain component and we 12267 * do not want it to show up on the console, we 12268 * will use SD_ERROR, otherwise scsi_log is 12269 * preferred. 12270 */ 12271 if (comp > 0) { 12272 SD_ERROR(comp, ssc->ssc_un, ssc->ssc_info); 12273 } else if (comp == 0) { 12274 scsi_log(SD_DEVINFO(ssc->ssc_un), sd_label, 12275 CE_WARN, ssc->ssc_info); 12276 } 12277 } 12278 } 12279 } 12280 12281 /* 12282 * Function: sd_buf_iodone 12283 * 12284 * Description: Frees the sd_xbuf & returns the buf to its originator. 12285 * 12286 * Context: May be called from interrupt context. 12287 */ 12288 /* ARGSUSED */ 12289 static void 12290 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp) 12291 { 12292 struct sd_xbuf *xp; 12293 12294 ASSERT(un != NULL); 12295 ASSERT(bp != NULL); 12296 ASSERT(!mutex_owned(SD_MUTEX(un))); 12297 12298 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n"); 12299 12300 xp = SD_GET_XBUF(bp); 12301 ASSERT(xp != NULL); 12302 12303 /* xbuf is gone after this */ 12304 if (ddi_xbuf_done(bp, un->un_xbuf_attr)) { 12305 mutex_enter(SD_MUTEX(un)); 12306 12307 /* 12308 * Grab time when the cmd completed. 12309 * This is used for determining if the system has been 12310 * idle long enough to make it idle to the PM framework. 12311 * This is for lowering the overhead, and therefore improving 12312 * performance per I/O operation. 12313 */ 12314 un->un_pm_idle_time = ddi_get_time(); 12315 12316 un->un_ncmds_in_driver--; 12317 ASSERT(un->un_ncmds_in_driver >= 0); 12318 SD_INFO(SD_LOG_IO, un, 12319 "sd_buf_iodone: un_ncmds_in_driver = %ld\n", 12320 un->un_ncmds_in_driver); 12321 12322 mutex_exit(SD_MUTEX(un)); 12323 } 12324 12325 biodone(bp); /* bp is gone after this */ 12326 12327 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n"); 12328 } 12329 12330 12331 /* 12332 * Function: sd_uscsi_iodone 12333 * 12334 * Description: Frees the sd_xbuf & returns the buf to its originator. 12335 * 12336 * Context: May be called from interrupt context. 12337 */ 12338 /* ARGSUSED */ 12339 static void 12340 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 12341 { 12342 struct sd_xbuf *xp; 12343 12344 ASSERT(un != NULL); 12345 ASSERT(bp != NULL); 12346 12347 xp = SD_GET_XBUF(bp); 12348 ASSERT(xp != NULL); 12349 ASSERT(!mutex_owned(SD_MUTEX(un))); 12350 12351 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n"); 12352 12353 bp->b_private = xp->xb_private; 12354 12355 mutex_enter(SD_MUTEX(un)); 12356 12357 /* 12358 * Grab time when the cmd completed. 12359 * This is used for determining if the system has been 12360 * idle long enough to make it idle to the PM framework. 12361 * This is for lowering the overhead, and therefore improving 12362 * performance per I/O operation. 12363 */ 12364 un->un_pm_idle_time = ddi_get_time(); 12365 12366 un->un_ncmds_in_driver--; 12367 ASSERT(un->un_ncmds_in_driver >= 0); 12368 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n", 12369 un->un_ncmds_in_driver); 12370 12371 mutex_exit(SD_MUTEX(un)); 12372 12373 if (((struct uscsi_cmd *)(xp->xb_pktinfo))->uscsi_rqlen > 12374 SENSE_LENGTH) { 12375 kmem_free(xp, sizeof (struct sd_xbuf) - SENSE_LENGTH + 12376 MAX_SENSE_LENGTH); 12377 } else { 12378 kmem_free(xp, sizeof (struct sd_xbuf)); 12379 } 12380 12381 biodone(bp); 12382 12383 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n"); 12384 } 12385 12386 12387 /* 12388 * Function: sd_mapblockaddr_iostart 12389 * 12390 * Description: Verify request lies within the partition limits for 12391 * the indicated minor device. Issue "overrun" buf if 12392 * request would exceed partition range. Converts 12393 * partition-relative block address to absolute. 12394 * 12395 * Upon exit of this function: 12396 * 1.I/O is aligned 12397 * xp->xb_blkno represents the absolute sector address 12398 * 2.I/O is misaligned 12399 * xp->xb_blkno represents the absolute logical block address 12400 * based on DEV_BSIZE. The logical block address will be 12401 * converted to physical sector address in sd_mapblocksize_\ 12402 * iostart. 12403 * 3.I/O is misaligned but is aligned in "overrun" buf 12404 * xp->xb_blkno represents the absolute logical block address 12405 * based on DEV_BSIZE. The logical block address will be 12406 * converted to physical sector address in sd_mapblocksize_\ 12407 * iostart. But no RMW will be issued in this case. 12408 * 12409 * Context: Can sleep 12410 * 12411 * Issues: This follows what the old code did, in terms of accessing 12412 * some of the partition info in the unit struct without holding 12413 * the mutext. This is a general issue, if the partition info 12414 * can be altered while IO is in progress... as soon as we send 12415 * a buf, its partitioning can be invalid before it gets to the 12416 * device. Probably the right fix is to move partitioning out 12417 * of the driver entirely. 12418 */ 12419 12420 static void 12421 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp) 12422 { 12423 diskaddr_t nblocks; /* #blocks in the given partition */ 12424 daddr_t blocknum; /* Block number specified by the buf */ 12425 size_t requested_nblocks; 12426 size_t available_nblocks; 12427 int partition; 12428 diskaddr_t partition_offset; 12429 struct sd_xbuf *xp; 12430 int secmask = 0, blknomask = 0; 12431 ushort_t is_aligned = TRUE; 12432 12433 ASSERT(un != NULL); 12434 ASSERT(bp != NULL); 12435 ASSERT(!mutex_owned(SD_MUTEX(un))); 12436 12437 SD_TRACE(SD_LOG_IO_PARTITION, un, 12438 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp); 12439 12440 xp = SD_GET_XBUF(bp); 12441 ASSERT(xp != NULL); 12442 12443 /* 12444 * If the geometry is not indicated as valid, attempt to access 12445 * the unit & verify the geometry/label. This can be the case for 12446 * removable-media devices, of if the device was opened in 12447 * NDELAY/NONBLOCK mode. 12448 */ 12449 partition = SDPART(bp->b_edev); 12450 12451 if (!SD_IS_VALID_LABEL(un)) { 12452 sd_ssc_t *ssc; 12453 /* 12454 * Initialize sd_ssc_t for internal uscsi commands 12455 * In case of potential porformance issue, we need 12456 * to alloc memory only if there is invalid label 12457 */ 12458 ssc = sd_ssc_init(un); 12459 12460 if (sd_ready_and_valid(ssc, partition) != SD_READY_VALID) { 12461 /* 12462 * For removable devices it is possible to start an 12463 * I/O without a media by opening the device in nodelay 12464 * mode. Also for writable CDs there can be many 12465 * scenarios where there is no geometry yet but volume 12466 * manager is trying to issue a read() just because 12467 * it can see TOC on the CD. So do not print a message 12468 * for removables. 12469 */ 12470 if (!un->un_f_has_removable_media) { 12471 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12472 "i/o to invalid geometry\n"); 12473 } 12474 bioerror(bp, EIO); 12475 bp->b_resid = bp->b_bcount; 12476 SD_BEGIN_IODONE(index, un, bp); 12477 12478 sd_ssc_fini(ssc); 12479 return; 12480 } 12481 sd_ssc_fini(ssc); 12482 } 12483 12484 nblocks = 0; 12485 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 12486 &nblocks, &partition_offset, NULL, NULL, (void *)SD_PATH_DIRECT); 12487 12488 blknomask = (un->un_tgt_blocksize / DEV_BSIZE) - 1; 12489 secmask = un->un_tgt_blocksize - 1; 12490 12491 if ((bp->b_lblkno & (blknomask)) || (bp->b_bcount & (secmask))) { 12492 is_aligned = FALSE; 12493 } 12494 12495 if (!(NOT_DEVBSIZE(un))) { 12496 /* 12497 * If I/O is aligned, no need to involve RMW(Read Modify Write) 12498 * Convert the logical block number to target's physical sector 12499 * number. 12500 */ 12501 if (is_aligned) { 12502 xp->xb_blkno = SD_SYS2TGTBLOCK(un, xp->xb_blkno); 12503 } else { 12504 switch (un->un_f_rmw_type) { 12505 case SD_RMW_TYPE_RETURN_ERROR: 12506 bp->b_flags |= B_ERROR; 12507 goto error_exit; 12508 12509 case SD_RMW_TYPE_DEFAULT: 12510 mutex_enter(SD_MUTEX(un)); 12511 if (un->un_rmw_msg_timeid == NULL) { 12512 scsi_log(SD_DEVINFO(un), sd_label, 12513 CE_WARN, "I/O request is not " 12514 "aligned with %d disk sector size. " 12515 "It is handled through Read Modify " 12516 "Write but the performance is " 12517 "very low.\n", 12518 un->un_tgt_blocksize); 12519 un->un_rmw_msg_timeid = 12520 timeout(sd_rmw_msg_print_handler, 12521 un, SD_RMW_MSG_PRINT_TIMEOUT); 12522 } else { 12523 un->un_rmw_incre_count ++; 12524 } 12525 mutex_exit(SD_MUTEX(un)); 12526 break; 12527 12528 case SD_RMW_TYPE_NO_WARNING: 12529 default: 12530 break; 12531 } 12532 12533 nblocks = SD_TGT2SYSBLOCK(un, nblocks); 12534 partition_offset = SD_TGT2SYSBLOCK(un, 12535 partition_offset); 12536 } 12537 } 12538 12539 /* 12540 * blocknum is the starting block number of the request. At this 12541 * point it is still relative to the start of the minor device. 12542 */ 12543 blocknum = xp->xb_blkno; 12544 12545 /* 12546 * Legacy: If the starting block number is one past the last block 12547 * in the partition, do not set B_ERROR in the buf. 12548 */ 12549 if (blocknum == nblocks) { 12550 goto error_exit; 12551 } 12552 12553 /* 12554 * Confirm that the first block of the request lies within the 12555 * partition limits. Also the requested number of bytes must be 12556 * a multiple of the system block size. 12557 */ 12558 if ((blocknum < 0) || (blocknum >= nblocks) || 12559 ((bp->b_bcount & (DEV_BSIZE - 1)) != 0)) { 12560 bp->b_flags |= B_ERROR; 12561 goto error_exit; 12562 } 12563 12564 /* 12565 * If the requsted # blocks exceeds the available # blocks, that 12566 * is an overrun of the partition. 12567 */ 12568 if ((!NOT_DEVBSIZE(un)) && is_aligned) { 12569 requested_nblocks = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 12570 } else { 12571 requested_nblocks = SD_BYTES2SYSBLOCKS(bp->b_bcount); 12572 } 12573 12574 available_nblocks = (size_t)(nblocks - blocknum); 12575 ASSERT(nblocks >= blocknum); 12576 12577 if (requested_nblocks > available_nblocks) { 12578 size_t resid; 12579 12580 /* 12581 * Allocate an "overrun" buf to allow the request to proceed 12582 * for the amount of space available in the partition. The 12583 * amount not transferred will be added into the b_resid 12584 * when the operation is complete. The overrun buf 12585 * replaces the original buf here, and the original buf 12586 * is saved inside the overrun buf, for later use. 12587 */ 12588 if ((!NOT_DEVBSIZE(un)) && is_aligned) { 12589 resid = SD_TGTBLOCKS2BYTES(un, 12590 (offset_t)(requested_nblocks - available_nblocks)); 12591 } else { 12592 resid = SD_SYSBLOCKS2BYTES( 12593 (offset_t)(requested_nblocks - available_nblocks)); 12594 } 12595 12596 size_t count = bp->b_bcount - resid; 12597 /* 12598 * Note: count is an unsigned entity thus it'll NEVER 12599 * be less than 0 so ASSERT the original values are 12600 * correct. 12601 */ 12602 ASSERT(bp->b_bcount >= resid); 12603 12604 bp = sd_bioclone_alloc(bp, count, blocknum, 12605 (int (*)(struct buf *)) sd_mapblockaddr_iodone); 12606 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */ 12607 ASSERT(xp != NULL); 12608 } 12609 12610 /* At this point there should be no residual for this buf. */ 12611 ASSERT(bp->b_resid == 0); 12612 12613 /* Convert the block number to an absolute address. */ 12614 xp->xb_blkno += partition_offset; 12615 12616 SD_NEXT_IOSTART(index, un, bp); 12617 12618 SD_TRACE(SD_LOG_IO_PARTITION, un, 12619 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp); 12620 12621 return; 12622 12623 error_exit: 12624 bp->b_resid = bp->b_bcount; 12625 SD_BEGIN_IODONE(index, un, bp); 12626 SD_TRACE(SD_LOG_IO_PARTITION, un, 12627 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp); 12628 } 12629 12630 12631 /* 12632 * Function: sd_mapblockaddr_iodone 12633 * 12634 * Description: Completion-side processing for partition management. 12635 * 12636 * Context: May be called under interrupt context 12637 */ 12638 12639 static void 12640 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp) 12641 { 12642 /* int partition; */ /* Not used, see below. */ 12643 ASSERT(un != NULL); 12644 ASSERT(bp != NULL); 12645 ASSERT(!mutex_owned(SD_MUTEX(un))); 12646 12647 SD_TRACE(SD_LOG_IO_PARTITION, un, 12648 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp); 12649 12650 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) { 12651 /* 12652 * We have an "overrun" buf to deal with... 12653 */ 12654 struct sd_xbuf *xp; 12655 struct buf *obp; /* ptr to the original buf */ 12656 12657 xp = SD_GET_XBUF(bp); 12658 ASSERT(xp != NULL); 12659 12660 /* Retrieve the pointer to the original buf */ 12661 obp = (struct buf *)xp->xb_private; 12662 ASSERT(obp != NULL); 12663 12664 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid); 12665 bioerror(obp, bp->b_error); 12666 12667 sd_bioclone_free(bp); 12668 12669 /* 12670 * Get back the original buf. 12671 * Note that since the restoration of xb_blkno below 12672 * was removed, the sd_xbuf is not needed. 12673 */ 12674 bp = obp; 12675 /* 12676 * xp = SD_GET_XBUF(bp); 12677 * ASSERT(xp != NULL); 12678 */ 12679 } 12680 12681 /* 12682 * Convert sd->xb_blkno back to a minor-device relative value. 12683 * Note: this has been commented out, as it is not needed in the 12684 * current implementation of the driver (ie, since this function 12685 * is at the top of the layering chains, so the info will be 12686 * discarded) and it is in the "hot" IO path. 12687 * 12688 * partition = getminor(bp->b_edev) & SDPART_MASK; 12689 * xp->xb_blkno -= un->un_offset[partition]; 12690 */ 12691 12692 SD_NEXT_IODONE(index, un, bp); 12693 12694 SD_TRACE(SD_LOG_IO_PARTITION, un, 12695 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp); 12696 } 12697 12698 12699 /* 12700 * Function: sd_mapblocksize_iostart 12701 * 12702 * Description: Convert between system block size (un->un_sys_blocksize) 12703 * and target block size (un->un_tgt_blocksize). 12704 * 12705 * Context: Can sleep to allocate resources. 12706 * 12707 * Assumptions: A higher layer has already performed any partition validation, 12708 * and converted the xp->xb_blkno to an absolute value relative 12709 * to the start of the device. 12710 * 12711 * It is also assumed that the higher layer has implemented 12712 * an "overrun" mechanism for the case where the request would 12713 * read/write beyond the end of a partition. In this case we 12714 * assume (and ASSERT) that bp->b_resid == 0. 12715 * 12716 * Note: The implementation for this routine assumes the target 12717 * block size remains constant between allocation and transport. 12718 */ 12719 12720 static void 12721 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp) 12722 { 12723 struct sd_mapblocksize_info *bsp; 12724 struct sd_xbuf *xp; 12725 offset_t first_byte; 12726 daddr_t start_block, end_block; 12727 daddr_t request_bytes; 12728 ushort_t is_aligned = FALSE; 12729 12730 ASSERT(un != NULL); 12731 ASSERT(bp != NULL); 12732 ASSERT(!mutex_owned(SD_MUTEX(un))); 12733 ASSERT(bp->b_resid == 0); 12734 12735 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12736 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp); 12737 12738 /* 12739 * For a non-writable CD, a write request is an error 12740 */ 12741 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) && 12742 (un->un_f_mmc_writable_media == FALSE)) { 12743 bioerror(bp, EIO); 12744 bp->b_resid = bp->b_bcount; 12745 SD_BEGIN_IODONE(index, un, bp); 12746 return; 12747 } 12748 12749 /* 12750 * We do not need a shadow buf if the device is using 12751 * un->un_sys_blocksize as its block size or if bcount == 0. 12752 * In this case there is no layer-private data block allocated. 12753 */ 12754 if ((un->un_tgt_blocksize == DEV_BSIZE) || 12755 (bp->b_bcount == 0)) { 12756 goto done; 12757 } 12758 12759 #if defined(__i386) || defined(__amd64) 12760 /* We do not support non-block-aligned transfers for ROD devices */ 12761 ASSERT(!ISROD(un)); 12762 #endif 12763 12764 xp = SD_GET_XBUF(bp); 12765 ASSERT(xp != NULL); 12766 12767 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12768 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n", 12769 un->un_tgt_blocksize, DEV_BSIZE); 12770 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12771 "request start block:0x%x\n", xp->xb_blkno); 12772 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12773 "request len:0x%x\n", bp->b_bcount); 12774 12775 /* 12776 * Allocate the layer-private data area for the mapblocksize layer. 12777 * Layers are allowed to use the xp_private member of the sd_xbuf 12778 * struct to store the pointer to their layer-private data block, but 12779 * each layer also has the responsibility of restoring the prior 12780 * contents of xb_private before returning the buf/xbuf to the 12781 * higher layer that sent it. 12782 * 12783 * Here we save the prior contents of xp->xb_private into the 12784 * bsp->mbs_oprivate field of our layer-private data area. This value 12785 * is restored by sd_mapblocksize_iodone() just prior to freeing up 12786 * the layer-private area and returning the buf/xbuf to the layer 12787 * that sent it. 12788 * 12789 * Note that here we use kmem_zalloc for the allocation as there are 12790 * parts of the mapblocksize code that expect certain fields to be 12791 * zero unless explicitly set to a required value. 12792 */ 12793 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 12794 bsp->mbs_oprivate = xp->xb_private; 12795 xp->xb_private = bsp; 12796 12797 /* 12798 * This treats the data on the disk (target) as an array of bytes. 12799 * first_byte is the byte offset, from the beginning of the device, 12800 * to the location of the request. This is converted from a 12801 * un->un_sys_blocksize block address to a byte offset, and then back 12802 * to a block address based upon a un->un_tgt_blocksize block size. 12803 * 12804 * xp->xb_blkno should be absolute upon entry into this function, 12805 * but, but it is based upon partitions that use the "system" 12806 * block size. It must be adjusted to reflect the block size of 12807 * the target. 12808 * 12809 * Note that end_block is actually the block that follows the last 12810 * block of the request, but that's what is needed for the computation. 12811 */ 12812 first_byte = SD_SYSBLOCKS2BYTES((offset_t)xp->xb_blkno); 12813 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize; 12814 end_block = (first_byte + bp->b_bcount + un->un_tgt_blocksize - 1) / 12815 un->un_tgt_blocksize; 12816 12817 /* request_bytes is rounded up to a multiple of the target block size */ 12818 request_bytes = (end_block - start_block) * un->un_tgt_blocksize; 12819 12820 /* 12821 * See if the starting address of the request and the request 12822 * length are aligned on a un->un_tgt_blocksize boundary. If aligned 12823 * then we do not need to allocate a shadow buf to handle the request. 12824 */ 12825 if (((first_byte % un->un_tgt_blocksize) == 0) && 12826 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) { 12827 is_aligned = TRUE; 12828 } 12829 12830 if ((bp->b_flags & B_READ) == 0) { 12831 /* 12832 * Lock the range for a write operation. An aligned request is 12833 * considered a simple write; otherwise the request must be a 12834 * read-modify-write. 12835 */ 12836 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1, 12837 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW); 12838 } 12839 12840 /* 12841 * Alloc a shadow buf if the request is not aligned. Also, this is 12842 * where the READ command is generated for a read-modify-write. (The 12843 * write phase is deferred until after the read completes.) 12844 */ 12845 if (is_aligned == FALSE) { 12846 12847 struct sd_mapblocksize_info *shadow_bsp; 12848 struct sd_xbuf *shadow_xp; 12849 struct buf *shadow_bp; 12850 12851 /* 12852 * Allocate the shadow buf and it associated xbuf. Note that 12853 * after this call the xb_blkno value in both the original 12854 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the 12855 * same: absolute relative to the start of the device, and 12856 * adjusted for the target block size. The b_blkno in the 12857 * shadow buf will also be set to this value. We should never 12858 * change b_blkno in the original bp however. 12859 * 12860 * Note also that the shadow buf will always need to be a 12861 * READ command, regardless of whether the incoming command 12862 * is a READ or a WRITE. 12863 */ 12864 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ, 12865 xp->xb_blkno, 12866 (int (*)(struct buf *)) sd_mapblocksize_iodone); 12867 12868 shadow_xp = SD_GET_XBUF(shadow_bp); 12869 12870 /* 12871 * Allocate the layer-private data for the shadow buf. 12872 * (No need to preserve xb_private in the shadow xbuf.) 12873 */ 12874 shadow_xp->xb_private = shadow_bsp = 12875 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 12876 12877 /* 12878 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone 12879 * to figure out where the start of the user data is (based upon 12880 * the system block size) in the data returned by the READ 12881 * command (which will be based upon the target blocksize). Note 12882 * that this is only really used if the request is unaligned. 12883 */ 12884 bsp->mbs_copy_offset = (ssize_t)(first_byte - 12885 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize)); 12886 ASSERT((bsp->mbs_copy_offset >= 0) && 12887 (bsp->mbs_copy_offset < un->un_tgt_blocksize)); 12888 12889 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset; 12890 12891 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index; 12892 12893 /* Transfer the wmap (if any) to the shadow buf */ 12894 shadow_bsp->mbs_wmp = bsp->mbs_wmp; 12895 bsp->mbs_wmp = NULL; 12896 12897 /* 12898 * The shadow buf goes on from here in place of the 12899 * original buf. 12900 */ 12901 shadow_bsp->mbs_orig_bp = bp; 12902 bp = shadow_bp; 12903 } 12904 12905 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12906 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno); 12907 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12908 "sd_mapblocksize_iostart: tgt request len:0x%x\n", 12909 request_bytes); 12910 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12911 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp); 12912 12913 done: 12914 SD_NEXT_IOSTART(index, un, bp); 12915 12916 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12917 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp); 12918 } 12919 12920 12921 /* 12922 * Function: sd_mapblocksize_iodone 12923 * 12924 * Description: Completion side processing for block-size mapping. 12925 * 12926 * Context: May be called under interrupt context 12927 */ 12928 12929 static void 12930 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp) 12931 { 12932 struct sd_mapblocksize_info *bsp; 12933 struct sd_xbuf *xp; 12934 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */ 12935 struct buf *orig_bp; /* ptr to the original buf */ 12936 offset_t shadow_end; 12937 offset_t request_end; 12938 offset_t shadow_start; 12939 ssize_t copy_offset; 12940 size_t copy_length; 12941 size_t shortfall; 12942 uint_t is_write; /* TRUE if this bp is a WRITE */ 12943 uint_t has_wmap; /* TRUE is this bp has a wmap */ 12944 12945 ASSERT(un != NULL); 12946 ASSERT(bp != NULL); 12947 12948 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12949 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp); 12950 12951 /* 12952 * There is no shadow buf or layer-private data if the target is 12953 * using un->un_sys_blocksize as its block size or if bcount == 0. 12954 */ 12955 if ((un->un_tgt_blocksize == DEV_BSIZE) || 12956 (bp->b_bcount == 0)) { 12957 goto exit; 12958 } 12959 12960 xp = SD_GET_XBUF(bp); 12961 ASSERT(xp != NULL); 12962 12963 /* Retrieve the pointer to the layer-private data area from the xbuf. */ 12964 bsp = xp->xb_private; 12965 12966 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE; 12967 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE; 12968 12969 if (is_write) { 12970 /* 12971 * For a WRITE request we must free up the block range that 12972 * we have locked up. This holds regardless of whether this is 12973 * an aligned write request or a read-modify-write request. 12974 */ 12975 sd_range_unlock(un, bsp->mbs_wmp); 12976 bsp->mbs_wmp = NULL; 12977 } 12978 12979 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) { 12980 /* 12981 * An aligned read or write command will have no shadow buf; 12982 * there is not much else to do with it. 12983 */ 12984 goto done; 12985 } 12986 12987 orig_bp = bsp->mbs_orig_bp; 12988 ASSERT(orig_bp != NULL); 12989 orig_xp = SD_GET_XBUF(orig_bp); 12990 ASSERT(orig_xp != NULL); 12991 ASSERT(!mutex_owned(SD_MUTEX(un))); 12992 12993 if (!is_write && has_wmap) { 12994 /* 12995 * A READ with a wmap means this is the READ phase of a 12996 * read-modify-write. If an error occurred on the READ then 12997 * we do not proceed with the WRITE phase or copy any data. 12998 * Just release the write maps and return with an error. 12999 */ 13000 if ((bp->b_resid != 0) || (bp->b_error != 0)) { 13001 orig_bp->b_resid = orig_bp->b_bcount; 13002 bioerror(orig_bp, bp->b_error); 13003 sd_range_unlock(un, bsp->mbs_wmp); 13004 goto freebuf_done; 13005 } 13006 } 13007 13008 /* 13009 * Here is where we set up to copy the data from the shadow buf 13010 * into the space associated with the original buf. 13011 * 13012 * To deal with the conversion between block sizes, these 13013 * computations treat the data as an array of bytes, with the 13014 * first byte (byte 0) corresponding to the first byte in the 13015 * first block on the disk. 13016 */ 13017 13018 /* 13019 * shadow_start and shadow_len indicate the location and size of 13020 * the data returned with the shadow IO request. 13021 */ 13022 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 13023 shadow_end = shadow_start + bp->b_bcount - bp->b_resid; 13024 13025 /* 13026 * copy_offset gives the offset (in bytes) from the start of the first 13027 * block of the READ request to the beginning of the data. We retrieve 13028 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved 13029 * there by sd_mapblockize_iostart(). copy_length gives the amount of 13030 * data to be copied (in bytes). 13031 */ 13032 copy_offset = bsp->mbs_copy_offset; 13033 ASSERT((copy_offset >= 0) && (copy_offset < un->un_tgt_blocksize)); 13034 copy_length = orig_bp->b_bcount; 13035 request_end = shadow_start + copy_offset + orig_bp->b_bcount; 13036 13037 /* 13038 * Set up the resid and error fields of orig_bp as appropriate. 13039 */ 13040 if (shadow_end >= request_end) { 13041 /* We got all the requested data; set resid to zero */ 13042 orig_bp->b_resid = 0; 13043 } else { 13044 /* 13045 * We failed to get enough data to fully satisfy the original 13046 * request. Just copy back whatever data we got and set 13047 * up the residual and error code as required. 13048 * 13049 * 'shortfall' is the amount by which the data received with the 13050 * shadow buf has "fallen short" of the requested amount. 13051 */ 13052 shortfall = (size_t)(request_end - shadow_end); 13053 13054 if (shortfall > orig_bp->b_bcount) { 13055 /* 13056 * We did not get enough data to even partially 13057 * fulfill the original request. The residual is 13058 * equal to the amount requested. 13059 */ 13060 orig_bp->b_resid = orig_bp->b_bcount; 13061 } else { 13062 /* 13063 * We did not get all the data that we requested 13064 * from the device, but we will try to return what 13065 * portion we did get. 13066 */ 13067 orig_bp->b_resid = shortfall; 13068 } 13069 ASSERT(copy_length >= orig_bp->b_resid); 13070 copy_length -= orig_bp->b_resid; 13071 } 13072 13073 /* Propagate the error code from the shadow buf to the original buf */ 13074 bioerror(orig_bp, bp->b_error); 13075 13076 if (is_write) { 13077 goto freebuf_done; /* No data copying for a WRITE */ 13078 } 13079 13080 if (has_wmap) { 13081 /* 13082 * This is a READ command from the READ phase of a 13083 * read-modify-write request. We have to copy the data given 13084 * by the user OVER the data returned by the READ command, 13085 * then convert the command from a READ to a WRITE and send 13086 * it back to the target. 13087 */ 13088 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset, 13089 copy_length); 13090 13091 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */ 13092 13093 /* 13094 * Dispatch the WRITE command to the taskq thread, which 13095 * will in turn send the command to the target. When the 13096 * WRITE command completes, we (sd_mapblocksize_iodone()) 13097 * will get called again as part of the iodone chain 13098 * processing for it. Note that we will still be dealing 13099 * with the shadow buf at that point. 13100 */ 13101 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp, 13102 KM_NOSLEEP) != 0) { 13103 /* 13104 * Dispatch was successful so we are done. Return 13105 * without going any higher up the iodone chain. Do 13106 * not free up any layer-private data until after the 13107 * WRITE completes. 13108 */ 13109 return; 13110 } 13111 13112 /* 13113 * Dispatch of the WRITE command failed; set up the error 13114 * condition and send this IO back up the iodone chain. 13115 */ 13116 bioerror(orig_bp, EIO); 13117 orig_bp->b_resid = orig_bp->b_bcount; 13118 13119 } else { 13120 /* 13121 * This is a regular READ request (ie, not a RMW). Copy the 13122 * data from the shadow buf into the original buf. The 13123 * copy_offset compensates for any "misalignment" between the 13124 * shadow buf (with its un->un_tgt_blocksize blocks) and the 13125 * original buf (with its un->un_sys_blocksize blocks). 13126 */ 13127 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr, 13128 copy_length); 13129 } 13130 13131 freebuf_done: 13132 13133 /* 13134 * At this point we still have both the shadow buf AND the original 13135 * buf to deal with, as well as the layer-private data area in each. 13136 * Local variables are as follows: 13137 * 13138 * bp -- points to shadow buf 13139 * xp -- points to xbuf of shadow buf 13140 * bsp -- points to layer-private data area of shadow buf 13141 * orig_bp -- points to original buf 13142 * 13143 * First free the shadow buf and its associated xbuf, then free the 13144 * layer-private data area from the shadow buf. There is no need to 13145 * restore xb_private in the shadow xbuf. 13146 */ 13147 sd_shadow_buf_free(bp); 13148 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 13149 13150 /* 13151 * Now update the local variables to point to the original buf, xbuf, 13152 * and layer-private area. 13153 */ 13154 bp = orig_bp; 13155 xp = SD_GET_XBUF(bp); 13156 ASSERT(xp != NULL); 13157 ASSERT(xp == orig_xp); 13158 bsp = xp->xb_private; 13159 ASSERT(bsp != NULL); 13160 13161 done: 13162 /* 13163 * Restore xb_private to whatever it was set to by the next higher 13164 * layer in the chain, then free the layer-private data area. 13165 */ 13166 xp->xb_private = bsp->mbs_oprivate; 13167 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 13168 13169 exit: 13170 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp), 13171 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp); 13172 13173 SD_NEXT_IODONE(index, un, bp); 13174 } 13175 13176 13177 /* 13178 * Function: sd_checksum_iostart 13179 * 13180 * Description: A stub function for a layer that's currently not used. 13181 * For now just a placeholder. 13182 * 13183 * Context: Kernel thread context 13184 */ 13185 13186 static void 13187 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp) 13188 { 13189 ASSERT(un != NULL); 13190 ASSERT(bp != NULL); 13191 ASSERT(!mutex_owned(SD_MUTEX(un))); 13192 SD_NEXT_IOSTART(index, un, bp); 13193 } 13194 13195 13196 /* 13197 * Function: sd_checksum_iodone 13198 * 13199 * Description: A stub function for a layer that's currently not used. 13200 * For now just a placeholder. 13201 * 13202 * Context: May be called under interrupt context 13203 */ 13204 13205 static void 13206 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp) 13207 { 13208 ASSERT(un != NULL); 13209 ASSERT(bp != NULL); 13210 ASSERT(!mutex_owned(SD_MUTEX(un))); 13211 SD_NEXT_IODONE(index, un, bp); 13212 } 13213 13214 13215 /* 13216 * Function: sd_checksum_uscsi_iostart 13217 * 13218 * Description: A stub function for a layer that's currently not used. 13219 * For now just a placeholder. 13220 * 13221 * Context: Kernel thread context 13222 */ 13223 13224 static void 13225 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp) 13226 { 13227 ASSERT(un != NULL); 13228 ASSERT(bp != NULL); 13229 ASSERT(!mutex_owned(SD_MUTEX(un))); 13230 SD_NEXT_IOSTART(index, un, bp); 13231 } 13232 13233 13234 /* 13235 * Function: sd_checksum_uscsi_iodone 13236 * 13237 * Description: A stub function for a layer that's currently not used. 13238 * For now just a placeholder. 13239 * 13240 * Context: May be called under interrupt context 13241 */ 13242 13243 static void 13244 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 13245 { 13246 ASSERT(un != NULL); 13247 ASSERT(bp != NULL); 13248 ASSERT(!mutex_owned(SD_MUTEX(un))); 13249 SD_NEXT_IODONE(index, un, bp); 13250 } 13251 13252 13253 /* 13254 * Function: sd_pm_iostart 13255 * 13256 * Description: iostart-side routine for Power mangement. 13257 * 13258 * Context: Kernel thread context 13259 */ 13260 13261 static void 13262 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp) 13263 { 13264 ASSERT(un != NULL); 13265 ASSERT(bp != NULL); 13266 ASSERT(!mutex_owned(SD_MUTEX(un))); 13267 ASSERT(!mutex_owned(&un->un_pm_mutex)); 13268 13269 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n"); 13270 13271 if (sd_pm_entry(un) != DDI_SUCCESS) { 13272 /* 13273 * Set up to return the failed buf back up the 'iodone' 13274 * side of the calling chain. 13275 */ 13276 bioerror(bp, EIO); 13277 bp->b_resid = bp->b_bcount; 13278 13279 SD_BEGIN_IODONE(index, un, bp); 13280 13281 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 13282 return; 13283 } 13284 13285 SD_NEXT_IOSTART(index, un, bp); 13286 13287 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 13288 } 13289 13290 13291 /* 13292 * Function: sd_pm_iodone 13293 * 13294 * Description: iodone-side routine for power mangement. 13295 * 13296 * Context: may be called from interrupt context 13297 */ 13298 13299 static void 13300 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp) 13301 { 13302 ASSERT(un != NULL); 13303 ASSERT(bp != NULL); 13304 ASSERT(!mutex_owned(&un->un_pm_mutex)); 13305 13306 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n"); 13307 13308 /* 13309 * After attach the following flag is only read, so don't 13310 * take the penalty of acquiring a mutex for it. 13311 */ 13312 if (un->un_f_pm_is_enabled == TRUE) { 13313 sd_pm_exit(un); 13314 } 13315 13316 SD_NEXT_IODONE(index, un, bp); 13317 13318 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n"); 13319 } 13320 13321 13322 /* 13323 * Function: sd_core_iostart 13324 * 13325 * Description: Primary driver function for enqueuing buf(9S) structs from 13326 * the system and initiating IO to the target device 13327 * 13328 * Context: Kernel thread context. Can sleep. 13329 * 13330 * Assumptions: - The given xp->xb_blkno is absolute 13331 * (ie, relative to the start of the device). 13332 * - The IO is to be done using the native blocksize of 13333 * the device, as specified in un->un_tgt_blocksize. 13334 */ 13335 /* ARGSUSED */ 13336 static void 13337 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp) 13338 { 13339 struct sd_xbuf *xp; 13340 13341 ASSERT(un != NULL); 13342 ASSERT(bp != NULL); 13343 ASSERT(!mutex_owned(SD_MUTEX(un))); 13344 ASSERT(bp->b_resid == 0); 13345 13346 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp); 13347 13348 xp = SD_GET_XBUF(bp); 13349 ASSERT(xp != NULL); 13350 13351 mutex_enter(SD_MUTEX(un)); 13352 13353 /* 13354 * If we are currently in the failfast state, fail any new IO 13355 * that has B_FAILFAST set, then return. 13356 */ 13357 if ((bp->b_flags & B_FAILFAST) && 13358 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) { 13359 mutex_exit(SD_MUTEX(un)); 13360 bioerror(bp, EIO); 13361 bp->b_resid = bp->b_bcount; 13362 SD_BEGIN_IODONE(index, un, bp); 13363 return; 13364 } 13365 13366 if (SD_IS_DIRECT_PRIORITY(xp)) { 13367 /* 13368 * Priority command -- transport it immediately. 13369 * 13370 * Note: We may want to assert that USCSI_DIAGNOSE is set, 13371 * because all direct priority commands should be associated 13372 * with error recovery actions which we don't want to retry. 13373 */ 13374 sd_start_cmds(un, bp); 13375 } else { 13376 /* 13377 * Normal command -- add it to the wait queue, then start 13378 * transporting commands from the wait queue. 13379 */ 13380 sd_add_buf_to_waitq(un, bp); 13381 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 13382 sd_start_cmds(un, NULL); 13383 } 13384 13385 mutex_exit(SD_MUTEX(un)); 13386 13387 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp); 13388 } 13389 13390 13391 /* 13392 * Function: sd_init_cdb_limits 13393 * 13394 * Description: This is to handle scsi_pkt initialization differences 13395 * between the driver platforms. 13396 * 13397 * Legacy behaviors: 13398 * 13399 * If the block number or the sector count exceeds the 13400 * capabilities of a Group 0 command, shift over to a 13401 * Group 1 command. We don't blindly use Group 1 13402 * commands because a) some drives (CDC Wren IVs) get a 13403 * bit confused, and b) there is probably a fair amount 13404 * of speed difference for a target to receive and decode 13405 * a 10 byte command instead of a 6 byte command. 13406 * 13407 * The xfer time difference of 6 vs 10 byte CDBs is 13408 * still significant so this code is still worthwhile. 13409 * 10 byte CDBs are very inefficient with the fas HBA driver 13410 * and older disks. Each CDB byte took 1 usec with some 13411 * popular disks. 13412 * 13413 * Context: Must be called at attach time 13414 */ 13415 13416 static void 13417 sd_init_cdb_limits(struct sd_lun *un) 13418 { 13419 int hba_cdb_limit; 13420 13421 /* 13422 * Use CDB_GROUP1 commands for most devices except for 13423 * parallel SCSI fixed drives in which case we get better 13424 * performance using CDB_GROUP0 commands (where applicable). 13425 */ 13426 un->un_mincdb = SD_CDB_GROUP1; 13427 #if !defined(__fibre) 13428 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) && 13429 !un->un_f_has_removable_media) { 13430 un->un_mincdb = SD_CDB_GROUP0; 13431 } 13432 #endif 13433 13434 /* 13435 * Try to read the max-cdb-length supported by HBA. 13436 */ 13437 un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1); 13438 if (0 >= un->un_max_hba_cdb) { 13439 un->un_max_hba_cdb = CDB_GROUP4; 13440 hba_cdb_limit = SD_CDB_GROUP4; 13441 } else if (0 < un->un_max_hba_cdb && 13442 un->un_max_hba_cdb < CDB_GROUP1) { 13443 hba_cdb_limit = SD_CDB_GROUP0; 13444 } else if (CDB_GROUP1 <= un->un_max_hba_cdb && 13445 un->un_max_hba_cdb < CDB_GROUP5) { 13446 hba_cdb_limit = SD_CDB_GROUP1; 13447 } else if (CDB_GROUP5 <= un->un_max_hba_cdb && 13448 un->un_max_hba_cdb < CDB_GROUP4) { 13449 hba_cdb_limit = SD_CDB_GROUP5; 13450 } else { 13451 hba_cdb_limit = SD_CDB_GROUP4; 13452 } 13453 13454 /* 13455 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4 13456 * commands for fixed disks unless we are building for a 32 bit 13457 * kernel. 13458 */ 13459 #ifdef _LP64 13460 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 13461 min(hba_cdb_limit, SD_CDB_GROUP4); 13462 #else 13463 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 13464 min(hba_cdb_limit, SD_CDB_GROUP1); 13465 #endif 13466 13467 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) 13468 ? sizeof (struct scsi_arq_status) : 1); 13469 un->un_cmd_timeout = (ushort_t)sd_io_time; 13470 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; 13471 } 13472 13473 13474 /* 13475 * Function: sd_initpkt_for_buf 13476 * 13477 * Description: Allocate and initialize for transport a scsi_pkt struct, 13478 * based upon the info specified in the given buf struct. 13479 * 13480 * Assumes the xb_blkno in the request is absolute (ie, 13481 * relative to the start of the device (NOT partition!). 13482 * Also assumes that the request is using the native block 13483 * size of the device (as returned by the READ CAPACITY 13484 * command). 13485 * 13486 * Return Code: SD_PKT_ALLOC_SUCCESS 13487 * SD_PKT_ALLOC_FAILURE 13488 * SD_PKT_ALLOC_FAILURE_NO_DMA 13489 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13490 * 13491 * Context: Kernel thread and may be called from software interrupt context 13492 * as part of a sdrunout callback. This function may not block or 13493 * call routines that block 13494 */ 13495 13496 static int 13497 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp) 13498 { 13499 struct sd_xbuf *xp; 13500 struct scsi_pkt *pktp = NULL; 13501 struct sd_lun *un; 13502 size_t blockcount; 13503 daddr_t startblock; 13504 int rval; 13505 int cmd_flags; 13506 13507 ASSERT(bp != NULL); 13508 ASSERT(pktpp != NULL); 13509 xp = SD_GET_XBUF(bp); 13510 ASSERT(xp != NULL); 13511 un = SD_GET_UN(bp); 13512 ASSERT(un != NULL); 13513 ASSERT(mutex_owned(SD_MUTEX(un))); 13514 ASSERT(bp->b_resid == 0); 13515 13516 SD_TRACE(SD_LOG_IO_CORE, un, 13517 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp); 13518 13519 mutex_exit(SD_MUTEX(un)); 13520 13521 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13522 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) { 13523 /* 13524 * Already have a scsi_pkt -- just need DMA resources. 13525 * We must recompute the CDB in case the mapping returns 13526 * a nonzero pkt_resid. 13527 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer 13528 * that is being retried, the unmap/remap of the DMA resouces 13529 * will result in the entire transfer starting over again 13530 * from the very first block. 13531 */ 13532 ASSERT(xp->xb_pktp != NULL); 13533 pktp = xp->xb_pktp; 13534 } else { 13535 pktp = NULL; 13536 } 13537 #endif /* __i386 || __amd64 */ 13538 13539 startblock = xp->xb_blkno; /* Absolute block num. */ 13540 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 13541 13542 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK); 13543 13544 /* 13545 * sd_setup_rw_pkt will determine the appropriate CDB group to use, 13546 * call scsi_init_pkt, and build the CDB. 13547 */ 13548 rval = sd_setup_rw_pkt(un, &pktp, bp, 13549 cmd_flags, sdrunout, (caddr_t)un, 13550 startblock, blockcount); 13551 13552 if (rval == 0) { 13553 /* 13554 * Success. 13555 * 13556 * If partial DMA is being used and required for this transfer. 13557 * set it up here. 13558 */ 13559 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 && 13560 (pktp->pkt_resid != 0)) { 13561 13562 /* 13563 * Save the CDB length and pkt_resid for the 13564 * next xfer 13565 */ 13566 xp->xb_dma_resid = pktp->pkt_resid; 13567 13568 /* rezero resid */ 13569 pktp->pkt_resid = 0; 13570 13571 } else { 13572 xp->xb_dma_resid = 0; 13573 } 13574 13575 pktp->pkt_flags = un->un_tagflags; 13576 pktp->pkt_time = un->un_cmd_timeout; 13577 pktp->pkt_comp = sdintr; 13578 13579 pktp->pkt_private = bp; 13580 *pktpp = pktp; 13581 13582 SD_TRACE(SD_LOG_IO_CORE, un, 13583 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp); 13584 13585 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13586 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED; 13587 #endif 13588 13589 mutex_enter(SD_MUTEX(un)); 13590 return (SD_PKT_ALLOC_SUCCESS); 13591 13592 } 13593 13594 /* 13595 * SD_PKT_ALLOC_FAILURE is the only expected failure code 13596 * from sd_setup_rw_pkt. 13597 */ 13598 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 13599 13600 if (rval == SD_PKT_ALLOC_FAILURE) { 13601 *pktpp = NULL; 13602 /* 13603 * Set the driver state to RWAIT to indicate the driver 13604 * is waiting on resource allocations. The driver will not 13605 * suspend, pm_suspend, or detatch while the state is RWAIT. 13606 */ 13607 mutex_enter(SD_MUTEX(un)); 13608 New_state(un, SD_STATE_RWAIT); 13609 13610 SD_ERROR(SD_LOG_IO_CORE, un, 13611 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp); 13612 13613 if ((bp->b_flags & B_ERROR) != 0) { 13614 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 13615 } 13616 return (SD_PKT_ALLOC_FAILURE); 13617 } else { 13618 /* 13619 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13620 * 13621 * This should never happen. Maybe someone messed with the 13622 * kernel's minphys? 13623 */ 13624 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13625 "Request rejected: too large for CDB: " 13626 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount); 13627 SD_ERROR(SD_LOG_IO_CORE, un, 13628 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp); 13629 mutex_enter(SD_MUTEX(un)); 13630 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13631 13632 } 13633 } 13634 13635 13636 /* 13637 * Function: sd_destroypkt_for_buf 13638 * 13639 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing). 13640 * 13641 * Context: Kernel thread or interrupt context 13642 */ 13643 13644 static void 13645 sd_destroypkt_for_buf(struct buf *bp) 13646 { 13647 ASSERT(bp != NULL); 13648 ASSERT(SD_GET_UN(bp) != NULL); 13649 13650 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13651 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp); 13652 13653 ASSERT(SD_GET_PKTP(bp) != NULL); 13654 scsi_destroy_pkt(SD_GET_PKTP(bp)); 13655 13656 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13657 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp); 13658 } 13659 13660 /* 13661 * Function: sd_setup_rw_pkt 13662 * 13663 * Description: Determines appropriate CDB group for the requested LBA 13664 * and transfer length, calls scsi_init_pkt, and builds 13665 * the CDB. Do not use for partial DMA transfers except 13666 * for the initial transfer since the CDB size must 13667 * remain constant. 13668 * 13669 * Context: Kernel thread and may be called from software interrupt 13670 * context as part of a sdrunout callback. This function may not 13671 * block or call routines that block 13672 */ 13673 13674 13675 int 13676 sd_setup_rw_pkt(struct sd_lun *un, 13677 struct scsi_pkt **pktpp, struct buf *bp, int flags, 13678 int (*callback)(caddr_t), caddr_t callback_arg, 13679 diskaddr_t lba, uint32_t blockcount) 13680 { 13681 struct scsi_pkt *return_pktp; 13682 union scsi_cdb *cdbp; 13683 struct sd_cdbinfo *cp = NULL; 13684 int i; 13685 13686 /* 13687 * See which size CDB to use, based upon the request. 13688 */ 13689 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) { 13690 13691 /* 13692 * Check lba and block count against sd_cdbtab limits. 13693 * In the partial DMA case, we have to use the same size 13694 * CDB for all the transfers. Check lba + blockcount 13695 * against the max LBA so we know that segment of the 13696 * transfer can use the CDB we select. 13697 */ 13698 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) && 13699 (blockcount <= sd_cdbtab[i].sc_maxlen)) { 13700 13701 /* 13702 * The command will fit into the CDB type 13703 * specified by sd_cdbtab[i]. 13704 */ 13705 cp = sd_cdbtab + i; 13706 13707 /* 13708 * Call scsi_init_pkt so we can fill in the 13709 * CDB. 13710 */ 13711 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp, 13712 bp, cp->sc_grpcode, un->un_status_len, 0, 13713 flags, callback, callback_arg); 13714 13715 if (return_pktp != NULL) { 13716 13717 /* 13718 * Return new value of pkt 13719 */ 13720 *pktpp = return_pktp; 13721 13722 /* 13723 * To be safe, zero the CDB insuring there is 13724 * no leftover data from a previous command. 13725 */ 13726 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode); 13727 13728 /* 13729 * Handle partial DMA mapping 13730 */ 13731 if (return_pktp->pkt_resid != 0) { 13732 13733 /* 13734 * Not going to xfer as many blocks as 13735 * originally expected 13736 */ 13737 blockcount -= 13738 SD_BYTES2TGTBLOCKS(un, 13739 return_pktp->pkt_resid); 13740 } 13741 13742 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp; 13743 13744 /* 13745 * Set command byte based on the CDB 13746 * type we matched. 13747 */ 13748 cdbp->scc_cmd = cp->sc_grpmask | 13749 ((bp->b_flags & B_READ) ? 13750 SCMD_READ : SCMD_WRITE); 13751 13752 SD_FILL_SCSI1_LUN(un, return_pktp); 13753 13754 /* 13755 * Fill in LBA and length 13756 */ 13757 ASSERT((cp->sc_grpcode == CDB_GROUP1) || 13758 (cp->sc_grpcode == CDB_GROUP4) || 13759 (cp->sc_grpcode == CDB_GROUP0) || 13760 (cp->sc_grpcode == CDB_GROUP5)); 13761 13762 if (cp->sc_grpcode == CDB_GROUP1) { 13763 FORMG1ADDR(cdbp, lba); 13764 FORMG1COUNT(cdbp, blockcount); 13765 return (0); 13766 } else if (cp->sc_grpcode == CDB_GROUP4) { 13767 FORMG4LONGADDR(cdbp, lba); 13768 FORMG4COUNT(cdbp, blockcount); 13769 return (0); 13770 } else if (cp->sc_grpcode == CDB_GROUP0) { 13771 FORMG0ADDR(cdbp, lba); 13772 FORMG0COUNT(cdbp, blockcount); 13773 return (0); 13774 } else if (cp->sc_grpcode == CDB_GROUP5) { 13775 FORMG5ADDR(cdbp, lba); 13776 FORMG5COUNT(cdbp, blockcount); 13777 return (0); 13778 } 13779 13780 /* 13781 * It should be impossible to not match one 13782 * of the CDB types above, so we should never 13783 * reach this point. Set the CDB command byte 13784 * to test-unit-ready to avoid writing 13785 * to somewhere we don't intend. 13786 */ 13787 cdbp->scc_cmd = SCMD_TEST_UNIT_READY; 13788 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13789 } else { 13790 /* 13791 * Couldn't get scsi_pkt 13792 */ 13793 return (SD_PKT_ALLOC_FAILURE); 13794 } 13795 } 13796 } 13797 13798 /* 13799 * None of the available CDB types were suitable. This really 13800 * should never happen: on a 64 bit system we support 13801 * READ16/WRITE16 which will hold an entire 64 bit disk address 13802 * and on a 32 bit system we will refuse to bind to a device 13803 * larger than 2TB so addresses will never be larger than 32 bits. 13804 */ 13805 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13806 } 13807 13808 /* 13809 * Function: sd_setup_next_rw_pkt 13810 * 13811 * Description: Setup packet for partial DMA transfers, except for the 13812 * initial transfer. sd_setup_rw_pkt should be used for 13813 * the initial transfer. 13814 * 13815 * Context: Kernel thread and may be called from interrupt context. 13816 */ 13817 13818 int 13819 sd_setup_next_rw_pkt(struct sd_lun *un, 13820 struct scsi_pkt *pktp, struct buf *bp, 13821 diskaddr_t lba, uint32_t blockcount) 13822 { 13823 uchar_t com; 13824 union scsi_cdb *cdbp; 13825 uchar_t cdb_group_id; 13826 13827 ASSERT(pktp != NULL); 13828 ASSERT(pktp->pkt_cdbp != NULL); 13829 13830 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 13831 com = cdbp->scc_cmd; 13832 cdb_group_id = CDB_GROUPID(com); 13833 13834 ASSERT((cdb_group_id == CDB_GROUPID_0) || 13835 (cdb_group_id == CDB_GROUPID_1) || 13836 (cdb_group_id == CDB_GROUPID_4) || 13837 (cdb_group_id == CDB_GROUPID_5)); 13838 13839 /* 13840 * Move pkt to the next portion of the xfer. 13841 * func is NULL_FUNC so we do not have to release 13842 * the disk mutex here. 13843 */ 13844 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0, 13845 NULL_FUNC, NULL) == pktp) { 13846 /* Success. Handle partial DMA */ 13847 if (pktp->pkt_resid != 0) { 13848 blockcount -= 13849 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid); 13850 } 13851 13852 cdbp->scc_cmd = com; 13853 SD_FILL_SCSI1_LUN(un, pktp); 13854 if (cdb_group_id == CDB_GROUPID_1) { 13855 FORMG1ADDR(cdbp, lba); 13856 FORMG1COUNT(cdbp, blockcount); 13857 return (0); 13858 } else if (cdb_group_id == CDB_GROUPID_4) { 13859 FORMG4LONGADDR(cdbp, lba); 13860 FORMG4COUNT(cdbp, blockcount); 13861 return (0); 13862 } else if (cdb_group_id == CDB_GROUPID_0) { 13863 FORMG0ADDR(cdbp, lba); 13864 FORMG0COUNT(cdbp, blockcount); 13865 return (0); 13866 } else if (cdb_group_id == CDB_GROUPID_5) { 13867 FORMG5ADDR(cdbp, lba); 13868 FORMG5COUNT(cdbp, blockcount); 13869 return (0); 13870 } 13871 13872 /* Unreachable */ 13873 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13874 } 13875 13876 /* 13877 * Error setting up next portion of cmd transfer. 13878 * Something is definitely very wrong and this 13879 * should not happen. 13880 */ 13881 return (SD_PKT_ALLOC_FAILURE); 13882 } 13883 13884 /* 13885 * Function: sd_initpkt_for_uscsi 13886 * 13887 * Description: Allocate and initialize for transport a scsi_pkt struct, 13888 * based upon the info specified in the given uscsi_cmd struct. 13889 * 13890 * Return Code: SD_PKT_ALLOC_SUCCESS 13891 * SD_PKT_ALLOC_FAILURE 13892 * SD_PKT_ALLOC_FAILURE_NO_DMA 13893 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13894 * 13895 * Context: Kernel thread and may be called from software interrupt context 13896 * as part of a sdrunout callback. This function may not block or 13897 * call routines that block 13898 */ 13899 13900 static int 13901 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp) 13902 { 13903 struct uscsi_cmd *uscmd; 13904 struct sd_xbuf *xp; 13905 struct scsi_pkt *pktp; 13906 struct sd_lun *un; 13907 uint32_t flags = 0; 13908 13909 ASSERT(bp != NULL); 13910 ASSERT(pktpp != NULL); 13911 xp = SD_GET_XBUF(bp); 13912 ASSERT(xp != NULL); 13913 un = SD_GET_UN(bp); 13914 ASSERT(un != NULL); 13915 ASSERT(mutex_owned(SD_MUTEX(un))); 13916 13917 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 13918 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 13919 ASSERT(uscmd != NULL); 13920 13921 SD_TRACE(SD_LOG_IO_CORE, un, 13922 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp); 13923 13924 /* 13925 * Allocate the scsi_pkt for the command. 13926 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path 13927 * during scsi_init_pkt time and will continue to use the 13928 * same path as long as the same scsi_pkt is used without 13929 * intervening scsi_dma_free(). Since uscsi command does 13930 * not call scsi_dmafree() before retry failed command, it 13931 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT 13932 * set such that scsi_vhci can use other available path for 13933 * retry. Besides, ucsci command does not allow DMA breakup, 13934 * so there is no need to set PKT_DMA_PARTIAL flag. 13935 */ 13936 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 13937 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 13938 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 13939 ((int)(uscmd->uscsi_rqlen) + sizeof (struct scsi_arq_status) 13940 - sizeof (struct scsi_extended_sense)), 0, 13941 (un->un_pkt_flags & ~PKT_DMA_PARTIAL) | PKT_XARQ, 13942 sdrunout, (caddr_t)un); 13943 } else { 13944 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 13945 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 13946 sizeof (struct scsi_arq_status), 0, 13947 (un->un_pkt_flags & ~PKT_DMA_PARTIAL), 13948 sdrunout, (caddr_t)un); 13949 } 13950 13951 if (pktp == NULL) { 13952 *pktpp = NULL; 13953 /* 13954 * Set the driver state to RWAIT to indicate the driver 13955 * is waiting on resource allocations. The driver will not 13956 * suspend, pm_suspend, or detatch while the state is RWAIT. 13957 */ 13958 New_state(un, SD_STATE_RWAIT); 13959 13960 SD_ERROR(SD_LOG_IO_CORE, un, 13961 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp); 13962 13963 if ((bp->b_flags & B_ERROR) != 0) { 13964 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 13965 } 13966 return (SD_PKT_ALLOC_FAILURE); 13967 } 13968 13969 /* 13970 * We do not do DMA breakup for USCSI commands, so return failure 13971 * here if all the needed DMA resources were not allocated. 13972 */ 13973 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) && 13974 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) { 13975 scsi_destroy_pkt(pktp); 13976 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: " 13977 "No partial DMA for USCSI. exit: buf:0x%p\n", bp); 13978 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL); 13979 } 13980 13981 /* Init the cdb from the given uscsi struct */ 13982 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp, 13983 uscmd->uscsi_cdb[0], 0, 0, 0); 13984 13985 SD_FILL_SCSI1_LUN(un, pktp); 13986 13987 /* 13988 * Set up the optional USCSI flags. See the uscsi (7I) man page 13989 * for listing of the supported flags. 13990 */ 13991 13992 if (uscmd->uscsi_flags & USCSI_SILENT) { 13993 flags |= FLAG_SILENT; 13994 } 13995 13996 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) { 13997 flags |= FLAG_DIAGNOSE; 13998 } 13999 14000 if (uscmd->uscsi_flags & USCSI_ISOLATE) { 14001 flags |= FLAG_ISOLATE; 14002 } 14003 14004 if (un->un_f_is_fibre == FALSE) { 14005 if (uscmd->uscsi_flags & USCSI_RENEGOT) { 14006 flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 14007 } 14008 } 14009 14010 /* 14011 * Set the pkt flags here so we save time later. 14012 * Note: These flags are NOT in the uscsi man page!!! 14013 */ 14014 if (uscmd->uscsi_flags & USCSI_HEAD) { 14015 flags |= FLAG_HEAD; 14016 } 14017 14018 if (uscmd->uscsi_flags & USCSI_NOINTR) { 14019 flags |= FLAG_NOINTR; 14020 } 14021 14022 /* 14023 * For tagged queueing, things get a bit complicated. 14024 * Check first for head of queue and last for ordered queue. 14025 * If neither head nor order, use the default driver tag flags. 14026 */ 14027 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) { 14028 if (uscmd->uscsi_flags & USCSI_HTAG) { 14029 flags |= FLAG_HTAG; 14030 } else if (uscmd->uscsi_flags & USCSI_OTAG) { 14031 flags |= FLAG_OTAG; 14032 } else { 14033 flags |= un->un_tagflags & FLAG_TAGMASK; 14034 } 14035 } 14036 14037 if (uscmd->uscsi_flags & USCSI_NODISCON) { 14038 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON; 14039 } 14040 14041 pktp->pkt_flags = flags; 14042 14043 /* Transfer uscsi information to scsi_pkt */ 14044 (void) scsi_uscsi_pktinit(uscmd, pktp); 14045 14046 /* Copy the caller's CDB into the pkt... */ 14047 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen); 14048 14049 if (uscmd->uscsi_timeout == 0) { 14050 pktp->pkt_time = un->un_uscsi_timeout; 14051 } else { 14052 pktp->pkt_time = uscmd->uscsi_timeout; 14053 } 14054 14055 /* need it later to identify USCSI request in sdintr */ 14056 xp->xb_pkt_flags |= SD_XB_USCSICMD; 14057 14058 xp->xb_sense_resid = uscmd->uscsi_rqresid; 14059 14060 pktp->pkt_private = bp; 14061 pktp->pkt_comp = sdintr; 14062 *pktpp = pktp; 14063 14064 SD_TRACE(SD_LOG_IO_CORE, un, 14065 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp); 14066 14067 return (SD_PKT_ALLOC_SUCCESS); 14068 } 14069 14070 14071 /* 14072 * Function: sd_destroypkt_for_uscsi 14073 * 14074 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi 14075 * IOs.. Also saves relevant info into the associated uscsi_cmd 14076 * struct. 14077 * 14078 * Context: May be called under interrupt context 14079 */ 14080 14081 static void 14082 sd_destroypkt_for_uscsi(struct buf *bp) 14083 { 14084 struct uscsi_cmd *uscmd; 14085 struct sd_xbuf *xp; 14086 struct scsi_pkt *pktp; 14087 struct sd_lun *un; 14088 struct sd_uscsi_info *suip; 14089 14090 ASSERT(bp != NULL); 14091 xp = SD_GET_XBUF(bp); 14092 ASSERT(xp != NULL); 14093 un = SD_GET_UN(bp); 14094 ASSERT(un != NULL); 14095 ASSERT(!mutex_owned(SD_MUTEX(un))); 14096 pktp = SD_GET_PKTP(bp); 14097 ASSERT(pktp != NULL); 14098 14099 SD_TRACE(SD_LOG_IO_CORE, un, 14100 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp); 14101 14102 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 14103 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 14104 ASSERT(uscmd != NULL); 14105 14106 /* Save the status and the residual into the uscsi_cmd struct */ 14107 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 14108 uscmd->uscsi_resid = bp->b_resid; 14109 14110 /* Transfer scsi_pkt information to uscsi */ 14111 (void) scsi_uscsi_pktfini(pktp, uscmd); 14112 14113 /* 14114 * If enabled, copy any saved sense data into the area specified 14115 * by the uscsi command. 14116 */ 14117 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 14118 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 14119 /* 14120 * Note: uscmd->uscsi_rqbuf should always point to a buffer 14121 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd()) 14122 */ 14123 uscmd->uscsi_rqstatus = xp->xb_sense_status; 14124 uscmd->uscsi_rqresid = xp->xb_sense_resid; 14125 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 14126 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 14127 MAX_SENSE_LENGTH); 14128 } else { 14129 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 14130 SENSE_LENGTH); 14131 } 14132 } 14133 /* 14134 * The following assignments are for SCSI FMA. 14135 */ 14136 ASSERT(xp->xb_private != NULL); 14137 suip = (struct sd_uscsi_info *)xp->xb_private; 14138 suip->ui_pkt_reason = pktp->pkt_reason; 14139 suip->ui_pkt_state = pktp->pkt_state; 14140 suip->ui_pkt_statistics = pktp->pkt_statistics; 14141 suip->ui_lba = (uint64_t)SD_GET_BLKNO(bp); 14142 14143 /* We are done with the scsi_pkt; free it now */ 14144 ASSERT(SD_GET_PKTP(bp) != NULL); 14145 scsi_destroy_pkt(SD_GET_PKTP(bp)); 14146 14147 SD_TRACE(SD_LOG_IO_CORE, un, 14148 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp); 14149 } 14150 14151 14152 /* 14153 * Function: sd_bioclone_alloc 14154 * 14155 * Description: Allocate a buf(9S) and init it as per the given buf 14156 * and the various arguments. The associated sd_xbuf 14157 * struct is (nearly) duplicated. The struct buf *bp 14158 * argument is saved in new_xp->xb_private. 14159 * 14160 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 14161 * datalen - size of data area for the shadow bp 14162 * blkno - starting LBA 14163 * func - function pointer for b_iodone in the shadow buf. (May 14164 * be NULL if none.) 14165 * 14166 * Return Code: Pointer to allocates buf(9S) struct 14167 * 14168 * Context: Can sleep. 14169 */ 14170 14171 static struct buf * 14172 sd_bioclone_alloc(struct buf *bp, size_t datalen, 14173 daddr_t blkno, int (*func)(struct buf *)) 14174 { 14175 struct sd_lun *un; 14176 struct sd_xbuf *xp; 14177 struct sd_xbuf *new_xp; 14178 struct buf *new_bp; 14179 14180 ASSERT(bp != NULL); 14181 xp = SD_GET_XBUF(bp); 14182 ASSERT(xp != NULL); 14183 un = SD_GET_UN(bp); 14184 ASSERT(un != NULL); 14185 ASSERT(!mutex_owned(SD_MUTEX(un))); 14186 14187 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func, 14188 NULL, KM_SLEEP); 14189 14190 new_bp->b_lblkno = blkno; 14191 14192 /* 14193 * Allocate an xbuf for the shadow bp and copy the contents of the 14194 * original xbuf into it. 14195 */ 14196 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 14197 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 14198 14199 /* 14200 * The given bp is automatically saved in the xb_private member 14201 * of the new xbuf. Callers are allowed to depend on this. 14202 */ 14203 new_xp->xb_private = bp; 14204 14205 new_bp->b_private = new_xp; 14206 14207 return (new_bp); 14208 } 14209 14210 /* 14211 * Function: sd_shadow_buf_alloc 14212 * 14213 * Description: Allocate a buf(9S) and init it as per the given buf 14214 * and the various arguments. The associated sd_xbuf 14215 * struct is (nearly) duplicated. The struct buf *bp 14216 * argument is saved in new_xp->xb_private. 14217 * 14218 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 14219 * datalen - size of data area for the shadow bp 14220 * bflags - B_READ or B_WRITE (pseudo flag) 14221 * blkno - starting LBA 14222 * func - function pointer for b_iodone in the shadow buf. (May 14223 * be NULL if none.) 14224 * 14225 * Return Code: Pointer to allocates buf(9S) struct 14226 * 14227 * Context: Can sleep. 14228 */ 14229 14230 static struct buf * 14231 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags, 14232 daddr_t blkno, int (*func)(struct buf *)) 14233 { 14234 struct sd_lun *un; 14235 struct sd_xbuf *xp; 14236 struct sd_xbuf *new_xp; 14237 struct buf *new_bp; 14238 14239 ASSERT(bp != NULL); 14240 xp = SD_GET_XBUF(bp); 14241 ASSERT(xp != NULL); 14242 un = SD_GET_UN(bp); 14243 ASSERT(un != NULL); 14244 ASSERT(!mutex_owned(SD_MUTEX(un))); 14245 14246 if (bp->b_flags & (B_PAGEIO | B_PHYS)) { 14247 bp_mapin(bp); 14248 } 14249 14250 bflags &= (B_READ | B_WRITE); 14251 #if defined(__i386) || defined(__amd64) 14252 new_bp = getrbuf(KM_SLEEP); 14253 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP); 14254 new_bp->b_bcount = datalen; 14255 new_bp->b_flags = bflags | 14256 (bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW)); 14257 #else 14258 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL, 14259 datalen, bflags, SLEEP_FUNC, NULL); 14260 #endif 14261 new_bp->av_forw = NULL; 14262 new_bp->av_back = NULL; 14263 new_bp->b_dev = bp->b_dev; 14264 new_bp->b_blkno = blkno; 14265 new_bp->b_iodone = func; 14266 new_bp->b_edev = bp->b_edev; 14267 new_bp->b_resid = 0; 14268 14269 /* We need to preserve the B_FAILFAST flag */ 14270 if (bp->b_flags & B_FAILFAST) { 14271 new_bp->b_flags |= B_FAILFAST; 14272 } 14273 14274 /* 14275 * Allocate an xbuf for the shadow bp and copy the contents of the 14276 * original xbuf into it. 14277 */ 14278 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 14279 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 14280 14281 /* Need later to copy data between the shadow buf & original buf! */ 14282 new_xp->xb_pkt_flags |= PKT_CONSISTENT; 14283 14284 /* 14285 * The given bp is automatically saved in the xb_private member 14286 * of the new xbuf. Callers are allowed to depend on this. 14287 */ 14288 new_xp->xb_private = bp; 14289 14290 new_bp->b_private = new_xp; 14291 14292 return (new_bp); 14293 } 14294 14295 /* 14296 * Function: sd_bioclone_free 14297 * 14298 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations 14299 * in the larger than partition operation. 14300 * 14301 * Context: May be called under interrupt context 14302 */ 14303 14304 static void 14305 sd_bioclone_free(struct buf *bp) 14306 { 14307 struct sd_xbuf *xp; 14308 14309 ASSERT(bp != NULL); 14310 xp = SD_GET_XBUF(bp); 14311 ASSERT(xp != NULL); 14312 14313 /* 14314 * Call bp_mapout() before freeing the buf, in case a lower 14315 * layer or HBA had done a bp_mapin(). we must do this here 14316 * as we are the "originator" of the shadow buf. 14317 */ 14318 bp_mapout(bp); 14319 14320 /* 14321 * Null out b_iodone before freeing the bp, to ensure that the driver 14322 * never gets confused by a stale value in this field. (Just a little 14323 * extra defensiveness here.) 14324 */ 14325 bp->b_iodone = NULL; 14326 14327 freerbuf(bp); 14328 14329 kmem_free(xp, sizeof (struct sd_xbuf)); 14330 } 14331 14332 /* 14333 * Function: sd_shadow_buf_free 14334 * 14335 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations. 14336 * 14337 * Context: May be called under interrupt context 14338 */ 14339 14340 static void 14341 sd_shadow_buf_free(struct buf *bp) 14342 { 14343 struct sd_xbuf *xp; 14344 14345 ASSERT(bp != NULL); 14346 xp = SD_GET_XBUF(bp); 14347 ASSERT(xp != NULL); 14348 14349 #if defined(__sparc) 14350 /* 14351 * Call bp_mapout() before freeing the buf, in case a lower 14352 * layer or HBA had done a bp_mapin(). we must do this here 14353 * as we are the "originator" of the shadow buf. 14354 */ 14355 bp_mapout(bp); 14356 #endif 14357 14358 /* 14359 * Null out b_iodone before freeing the bp, to ensure that the driver 14360 * never gets confused by a stale value in this field. (Just a little 14361 * extra defensiveness here.) 14362 */ 14363 bp->b_iodone = NULL; 14364 14365 #if defined(__i386) || defined(__amd64) 14366 kmem_free(bp->b_un.b_addr, bp->b_bcount); 14367 freerbuf(bp); 14368 #else 14369 scsi_free_consistent_buf(bp); 14370 #endif 14371 14372 kmem_free(xp, sizeof (struct sd_xbuf)); 14373 } 14374 14375 14376 /* 14377 * Function: sd_print_transport_rejected_message 14378 * 14379 * Description: This implements the ludicrously complex rules for printing 14380 * a "transport rejected" message. This is to address the 14381 * specific problem of having a flood of this error message 14382 * produced when a failover occurs. 14383 * 14384 * Context: Any. 14385 */ 14386 14387 static void 14388 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp, 14389 int code) 14390 { 14391 ASSERT(un != NULL); 14392 ASSERT(mutex_owned(SD_MUTEX(un))); 14393 ASSERT(xp != NULL); 14394 14395 /* 14396 * Print the "transport rejected" message under the following 14397 * conditions: 14398 * 14399 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set 14400 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR. 14401 * - If the error code IS a TRAN_FATAL_ERROR, then the message is 14402 * printed the FIRST time a TRAN_FATAL_ERROR is returned from 14403 * scsi_transport(9F) (which indicates that the target might have 14404 * gone off-line). This uses the un->un_tran_fatal_count 14405 * count, which is incremented whenever a TRAN_FATAL_ERROR is 14406 * received, and reset to zero whenver a TRAN_ACCEPT is returned 14407 * from scsi_transport(). 14408 * 14409 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of 14410 * the preceeding cases in order for the message to be printed. 14411 */ 14412 if (((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) && 14413 (SD_FM_LOG(un) == SD_FM_LOG_NSUP)) { 14414 if ((sd_level_mask & SD_LOGMASK_DIAG) || 14415 (code != TRAN_FATAL_ERROR) || 14416 (un->un_tran_fatal_count == 1)) { 14417 switch (code) { 14418 case TRAN_BADPKT: 14419 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14420 "transport rejected bad packet\n"); 14421 break; 14422 case TRAN_FATAL_ERROR: 14423 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14424 "transport rejected fatal error\n"); 14425 break; 14426 default: 14427 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14428 "transport rejected (%d)\n", code); 14429 break; 14430 } 14431 } 14432 } 14433 } 14434 14435 14436 /* 14437 * Function: sd_add_buf_to_waitq 14438 * 14439 * Description: Add the given buf(9S) struct to the wait queue for the 14440 * instance. If sorting is enabled, then the buf is added 14441 * to the queue via an elevator sort algorithm (a la 14442 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key. 14443 * If sorting is not enabled, then the buf is just added 14444 * to the end of the wait queue. 14445 * 14446 * Return Code: void 14447 * 14448 * Context: Does not sleep/block, therefore technically can be called 14449 * from any context. However if sorting is enabled then the 14450 * execution time is indeterminate, and may take long if 14451 * the wait queue grows large. 14452 */ 14453 14454 static void 14455 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp) 14456 { 14457 struct buf *ap; 14458 14459 ASSERT(bp != NULL); 14460 ASSERT(un != NULL); 14461 ASSERT(mutex_owned(SD_MUTEX(un))); 14462 14463 /* If the queue is empty, add the buf as the only entry & return. */ 14464 if (un->un_waitq_headp == NULL) { 14465 ASSERT(un->un_waitq_tailp == NULL); 14466 un->un_waitq_headp = un->un_waitq_tailp = bp; 14467 bp->av_forw = NULL; 14468 return; 14469 } 14470 14471 ASSERT(un->un_waitq_tailp != NULL); 14472 14473 /* 14474 * If sorting is disabled, just add the buf to the tail end of 14475 * the wait queue and return. 14476 */ 14477 if (un->un_f_disksort_disabled) { 14478 un->un_waitq_tailp->av_forw = bp; 14479 un->un_waitq_tailp = bp; 14480 bp->av_forw = NULL; 14481 return; 14482 } 14483 14484 /* 14485 * Sort thru the list of requests currently on the wait queue 14486 * and add the new buf request at the appropriate position. 14487 * 14488 * The un->un_waitq_headp is an activity chain pointer on which 14489 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The 14490 * first queue holds those requests which are positioned after 14491 * the current SD_GET_BLKNO() (in the first request); the second holds 14492 * requests which came in after their SD_GET_BLKNO() number was passed. 14493 * Thus we implement a one way scan, retracting after reaching 14494 * the end of the drive to the first request on the second 14495 * queue, at which time it becomes the first queue. 14496 * A one-way scan is natural because of the way UNIX read-ahead 14497 * blocks are allocated. 14498 * 14499 * If we lie after the first request, then we must locate the 14500 * second request list and add ourselves to it. 14501 */ 14502 ap = un->un_waitq_headp; 14503 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) { 14504 while (ap->av_forw != NULL) { 14505 /* 14506 * Look for an "inversion" in the (normally 14507 * ascending) block numbers. This indicates 14508 * the start of the second request list. 14509 */ 14510 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) { 14511 /* 14512 * Search the second request list for the 14513 * first request at a larger block number. 14514 * We go before that; however if there is 14515 * no such request, we go at the end. 14516 */ 14517 do { 14518 if (SD_GET_BLKNO(bp) < 14519 SD_GET_BLKNO(ap->av_forw)) { 14520 goto insert; 14521 } 14522 ap = ap->av_forw; 14523 } while (ap->av_forw != NULL); 14524 goto insert; /* after last */ 14525 } 14526 ap = ap->av_forw; 14527 } 14528 14529 /* 14530 * No inversions... we will go after the last, and 14531 * be the first request in the second request list. 14532 */ 14533 goto insert; 14534 } 14535 14536 /* 14537 * Request is at/after the current request... 14538 * sort in the first request list. 14539 */ 14540 while (ap->av_forw != NULL) { 14541 /* 14542 * We want to go after the current request (1) if 14543 * there is an inversion after it (i.e. it is the end 14544 * of the first request list), or (2) if the next 14545 * request is a larger block no. than our request. 14546 */ 14547 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) || 14548 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) { 14549 goto insert; 14550 } 14551 ap = ap->av_forw; 14552 } 14553 14554 /* 14555 * Neither a second list nor a larger request, therefore 14556 * we go at the end of the first list (which is the same 14557 * as the end of the whole schebang). 14558 */ 14559 insert: 14560 bp->av_forw = ap->av_forw; 14561 ap->av_forw = bp; 14562 14563 /* 14564 * If we inserted onto the tail end of the waitq, make sure the 14565 * tail pointer is updated. 14566 */ 14567 if (ap == un->un_waitq_tailp) { 14568 un->un_waitq_tailp = bp; 14569 } 14570 } 14571 14572 14573 /* 14574 * Function: sd_start_cmds 14575 * 14576 * Description: Remove and transport cmds from the driver queues. 14577 * 14578 * Arguments: un - pointer to the unit (soft state) struct for the target. 14579 * 14580 * immed_bp - ptr to a buf to be transported immediately. Only 14581 * the immed_bp is transported; bufs on the waitq are not 14582 * processed and the un_retry_bp is not checked. If immed_bp is 14583 * NULL, then normal queue processing is performed. 14584 * 14585 * Context: May be called from kernel thread context, interrupt context, 14586 * or runout callback context. This function may not block or 14587 * call routines that block. 14588 */ 14589 14590 static void 14591 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp) 14592 { 14593 struct sd_xbuf *xp; 14594 struct buf *bp; 14595 void (*statp)(kstat_io_t *); 14596 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14597 void (*saved_statp)(kstat_io_t *); 14598 #endif 14599 int rval; 14600 struct sd_fm_internal *sfip = NULL; 14601 14602 ASSERT(un != NULL); 14603 ASSERT(mutex_owned(SD_MUTEX(un))); 14604 ASSERT(un->un_ncmds_in_transport >= 0); 14605 ASSERT(un->un_throttle >= 0); 14606 14607 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); 14608 14609 do { 14610 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14611 saved_statp = NULL; 14612 #endif 14613 14614 /* 14615 * If we are syncing or dumping, fail the command to 14616 * avoid recursively calling back into scsi_transport(). 14617 * The dump I/O itself uses a separate code path so this 14618 * only prevents non-dump I/O from being sent while dumping. 14619 * File system sync takes place before dumping begins. 14620 * During panic, filesystem I/O is allowed provided 14621 * un_in_callback is <= 1. This is to prevent recursion 14622 * such as sd_start_cmds -> scsi_transport -> sdintr -> 14623 * sd_start_cmds and so on. See panic.c for more information 14624 * about the states the system can be in during panic. 14625 */ 14626 if ((un->un_state == SD_STATE_DUMPING) || 14627 (ddi_in_panic() && (un->un_in_callback > 1))) { 14628 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14629 "sd_start_cmds: panicking\n"); 14630 goto exit; 14631 } 14632 14633 if ((bp = immed_bp) != NULL) { 14634 /* 14635 * We have a bp that must be transported immediately. 14636 * It's OK to transport the immed_bp here without doing 14637 * the throttle limit check because the immed_bp is 14638 * always used in a retry/recovery case. This means 14639 * that we know we are not at the throttle limit by 14640 * virtue of the fact that to get here we must have 14641 * already gotten a command back via sdintr(). This also 14642 * relies on (1) the command on un_retry_bp preventing 14643 * further commands from the waitq from being issued; 14644 * and (2) the code in sd_retry_command checking the 14645 * throttle limit before issuing a delayed or immediate 14646 * retry. This holds even if the throttle limit is 14647 * currently ratcheted down from its maximum value. 14648 */ 14649 statp = kstat_runq_enter; 14650 if (bp == un->un_retry_bp) { 14651 ASSERT((un->un_retry_statp == NULL) || 14652 (un->un_retry_statp == kstat_waitq_enter) || 14653 (un->un_retry_statp == 14654 kstat_runq_back_to_waitq)); 14655 /* 14656 * If the waitq kstat was incremented when 14657 * sd_set_retry_bp() queued this bp for a retry, 14658 * then we must set up statp so that the waitq 14659 * count will get decremented correctly below. 14660 * Also we must clear un->un_retry_statp to 14661 * ensure that we do not act on a stale value 14662 * in this field. 14663 */ 14664 if ((un->un_retry_statp == kstat_waitq_enter) || 14665 (un->un_retry_statp == 14666 kstat_runq_back_to_waitq)) { 14667 statp = kstat_waitq_to_runq; 14668 } 14669 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14670 saved_statp = un->un_retry_statp; 14671 #endif 14672 un->un_retry_statp = NULL; 14673 14674 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14675 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p " 14676 "un_throttle:%d un_ncmds_in_transport:%d\n", 14677 un, un->un_retry_bp, un->un_throttle, 14678 un->un_ncmds_in_transport); 14679 } else { 14680 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: " 14681 "processing priority bp:0x%p\n", bp); 14682 } 14683 14684 } else if ((bp = un->un_waitq_headp) != NULL) { 14685 /* 14686 * A command on the waitq is ready to go, but do not 14687 * send it if: 14688 * 14689 * (1) the throttle limit has been reached, or 14690 * (2) a retry is pending, or 14691 * (3) a START_STOP_UNIT callback pending, or 14692 * (4) a callback for a SD_PATH_DIRECT_PRIORITY 14693 * command is pending. 14694 * 14695 * For all of these conditions, IO processing will 14696 * restart after the condition is cleared. 14697 */ 14698 if (un->un_ncmds_in_transport >= un->un_throttle) { 14699 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14700 "sd_start_cmds: exiting, " 14701 "throttle limit reached!\n"); 14702 goto exit; 14703 } 14704 if (un->un_retry_bp != NULL) { 14705 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14706 "sd_start_cmds: exiting, retry pending!\n"); 14707 goto exit; 14708 } 14709 if (un->un_startstop_timeid != NULL) { 14710 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14711 "sd_start_cmds: exiting, " 14712 "START_STOP pending!\n"); 14713 goto exit; 14714 } 14715 if (un->un_direct_priority_timeid != NULL) { 14716 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14717 "sd_start_cmds: exiting, " 14718 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n"); 14719 goto exit; 14720 } 14721 14722 /* Dequeue the command */ 14723 un->un_waitq_headp = bp->av_forw; 14724 if (un->un_waitq_headp == NULL) { 14725 un->un_waitq_tailp = NULL; 14726 } 14727 bp->av_forw = NULL; 14728 statp = kstat_waitq_to_runq; 14729 SD_TRACE(SD_LOG_IO_CORE, un, 14730 "sd_start_cmds: processing waitq bp:0x%p\n", bp); 14731 14732 } else { 14733 /* No work to do so bail out now */ 14734 SD_TRACE(SD_LOG_IO_CORE, un, 14735 "sd_start_cmds: no more work, exiting!\n"); 14736 goto exit; 14737 } 14738 14739 /* 14740 * Reset the state to normal. This is the mechanism by which 14741 * the state transitions from either SD_STATE_RWAIT or 14742 * SD_STATE_OFFLINE to SD_STATE_NORMAL. 14743 * If state is SD_STATE_PM_CHANGING then this command is 14744 * part of the device power control and the state must 14745 * not be put back to normal. Doing so would would 14746 * allow new commands to proceed when they shouldn't, 14747 * the device may be going off. 14748 */ 14749 if ((un->un_state != SD_STATE_SUSPENDED) && 14750 (un->un_state != SD_STATE_PM_CHANGING)) { 14751 New_state(un, SD_STATE_NORMAL); 14752 } 14753 14754 xp = SD_GET_XBUF(bp); 14755 ASSERT(xp != NULL); 14756 14757 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14758 /* 14759 * Allocate the scsi_pkt if we need one, or attach DMA 14760 * resources if we have a scsi_pkt that needs them. The 14761 * latter should only occur for commands that are being 14762 * retried. 14763 */ 14764 if ((xp->xb_pktp == NULL) || 14765 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { 14766 #else 14767 if (xp->xb_pktp == NULL) { 14768 #endif 14769 /* 14770 * There is no scsi_pkt allocated for this buf. Call 14771 * the initpkt function to allocate & init one. 14772 * 14773 * The scsi_init_pkt runout callback functionality is 14774 * implemented as follows: 14775 * 14776 * 1) The initpkt function always calls 14777 * scsi_init_pkt(9F) with sdrunout specified as the 14778 * callback routine. 14779 * 2) A successful packet allocation is initialized and 14780 * the I/O is transported. 14781 * 3) The I/O associated with an allocation resource 14782 * failure is left on its queue to be retried via 14783 * runout or the next I/O. 14784 * 4) The I/O associated with a DMA error is removed 14785 * from the queue and failed with EIO. Processing of 14786 * the transport queues is also halted to be 14787 * restarted via runout or the next I/O. 14788 * 5) The I/O associated with a CDB size or packet 14789 * size error is removed from the queue and failed 14790 * with EIO. Processing of the transport queues is 14791 * continued. 14792 * 14793 * Note: there is no interface for canceling a runout 14794 * callback. To prevent the driver from detaching or 14795 * suspending while a runout is pending the driver 14796 * state is set to SD_STATE_RWAIT 14797 * 14798 * Note: using the scsi_init_pkt callback facility can 14799 * result in an I/O request persisting at the head of 14800 * the list which cannot be satisfied even after 14801 * multiple retries. In the future the driver may 14802 * implement some kind of maximum runout count before 14803 * failing an I/O. 14804 * 14805 * Note: the use of funcp below may seem superfluous, 14806 * but it helps warlock figure out the correct 14807 * initpkt function calls (see [s]sd.wlcmd). 14808 */ 14809 struct scsi_pkt *pktp; 14810 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp); 14811 14812 ASSERT(bp != un->un_rqs_bp); 14813 14814 funcp = sd_initpkt_map[xp->xb_chain_iostart]; 14815 switch ((*funcp)(bp, &pktp)) { 14816 case SD_PKT_ALLOC_SUCCESS: 14817 xp->xb_pktp = pktp; 14818 SD_TRACE(SD_LOG_IO_CORE, un, 14819 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n", 14820 pktp); 14821 goto got_pkt; 14822 14823 case SD_PKT_ALLOC_FAILURE: 14824 /* 14825 * Temporary (hopefully) resource depletion. 14826 * Since retries and RQS commands always have a 14827 * scsi_pkt allocated, these cases should never 14828 * get here. So the only cases this needs to 14829 * handle is a bp from the waitq (which we put 14830 * back onto the waitq for sdrunout), or a bp 14831 * sent as an immed_bp (which we just fail). 14832 */ 14833 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14834 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n"); 14835 14836 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14837 14838 if (bp == immed_bp) { 14839 /* 14840 * If SD_XB_DMA_FREED is clear, then 14841 * this is a failure to allocate a 14842 * scsi_pkt, and we must fail the 14843 * command. 14844 */ 14845 if ((xp->xb_pkt_flags & 14846 SD_XB_DMA_FREED) == 0) { 14847 break; 14848 } 14849 14850 /* 14851 * If this immediate command is NOT our 14852 * un_retry_bp, then we must fail it. 14853 */ 14854 if (bp != un->un_retry_bp) { 14855 break; 14856 } 14857 14858 /* 14859 * We get here if this cmd is our 14860 * un_retry_bp that was DMAFREED, but 14861 * scsi_init_pkt() failed to reallocate 14862 * DMA resources when we attempted to 14863 * retry it. This can happen when an 14864 * mpxio failover is in progress, but 14865 * we don't want to just fail the 14866 * command in this case. 14867 * 14868 * Use timeout(9F) to restart it after 14869 * a 100ms delay. We don't want to 14870 * let sdrunout() restart it, because 14871 * sdrunout() is just supposed to start 14872 * commands that are sitting on the 14873 * wait queue. The un_retry_bp stays 14874 * set until the command completes, but 14875 * sdrunout can be called many times 14876 * before that happens. Since sdrunout 14877 * cannot tell if the un_retry_bp is 14878 * already in the transport, it could 14879 * end up calling scsi_transport() for 14880 * the un_retry_bp multiple times. 14881 * 14882 * Also: don't schedule the callback 14883 * if some other callback is already 14884 * pending. 14885 */ 14886 if (un->un_retry_statp == NULL) { 14887 /* 14888 * restore the kstat pointer to 14889 * keep kstat counts coherent 14890 * when we do retry the command. 14891 */ 14892 un->un_retry_statp = 14893 saved_statp; 14894 } 14895 14896 if ((un->un_startstop_timeid == NULL) && 14897 (un->un_retry_timeid == NULL) && 14898 (un->un_direct_priority_timeid == 14899 NULL)) { 14900 14901 un->un_retry_timeid = 14902 timeout( 14903 sd_start_retry_command, 14904 un, SD_RESTART_TIMEOUT); 14905 } 14906 goto exit; 14907 } 14908 14909 #else 14910 if (bp == immed_bp) { 14911 break; /* Just fail the command */ 14912 } 14913 #endif 14914 14915 /* Add the buf back to the head of the waitq */ 14916 bp->av_forw = un->un_waitq_headp; 14917 un->un_waitq_headp = bp; 14918 if (un->un_waitq_tailp == NULL) { 14919 un->un_waitq_tailp = bp; 14920 } 14921 goto exit; 14922 14923 case SD_PKT_ALLOC_FAILURE_NO_DMA: 14924 /* 14925 * HBA DMA resource failure. Fail the command 14926 * and continue processing of the queues. 14927 */ 14928 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14929 "sd_start_cmds: " 14930 "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); 14931 break; 14932 14933 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: 14934 /* 14935 * Note:x86: Partial DMA mapping not supported 14936 * for USCSI commands, and all the needed DMA 14937 * resources were not allocated. 14938 */ 14939 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14940 "sd_start_cmds: " 14941 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); 14942 break; 14943 14944 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: 14945 /* 14946 * Note:x86: Request cannot fit into CDB based 14947 * on lba and len. 14948 */ 14949 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14950 "sd_start_cmds: " 14951 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); 14952 break; 14953 14954 default: 14955 /* Should NEVER get here! */ 14956 panic("scsi_initpkt error"); 14957 /*NOTREACHED*/ 14958 } 14959 14960 /* 14961 * Fatal error in allocating a scsi_pkt for this buf. 14962 * Update kstats & return the buf with an error code. 14963 * We must use sd_return_failed_command_no_restart() to 14964 * avoid a recursive call back into sd_start_cmds(). 14965 * However this also means that we must keep processing 14966 * the waitq here in order to avoid stalling. 14967 */ 14968 if (statp == kstat_waitq_to_runq) { 14969 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 14970 } 14971 sd_return_failed_command_no_restart(un, bp, EIO); 14972 if (bp == immed_bp) { 14973 /* immed_bp is gone by now, so clear this */ 14974 immed_bp = NULL; 14975 } 14976 continue; 14977 } 14978 got_pkt: 14979 if (bp == immed_bp) { 14980 /* goto the head of the class.... */ 14981 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 14982 } 14983 14984 un->un_ncmds_in_transport++; 14985 SD_UPDATE_KSTATS(un, statp, bp); 14986 14987 /* 14988 * Call scsi_transport() to send the command to the target. 14989 * According to SCSA architecture, we must drop the mutex here 14990 * before calling scsi_transport() in order to avoid deadlock. 14991 * Note that the scsi_pkt's completion routine can be executed 14992 * (from interrupt context) even before the call to 14993 * scsi_transport() returns. 14994 */ 14995 SD_TRACE(SD_LOG_IO_CORE, un, 14996 "sd_start_cmds: calling scsi_transport()\n"); 14997 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp); 14998 14999 mutex_exit(SD_MUTEX(un)); 15000 rval = scsi_transport(xp->xb_pktp); 15001 mutex_enter(SD_MUTEX(un)); 15002 15003 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15004 "sd_start_cmds: scsi_transport() returned %d\n", rval); 15005 15006 switch (rval) { 15007 case TRAN_ACCEPT: 15008 /* Clear this with every pkt accepted by the HBA */ 15009 un->un_tran_fatal_count = 0; 15010 break; /* Success; try the next cmd (if any) */ 15011 15012 case TRAN_BUSY: 15013 un->un_ncmds_in_transport--; 15014 ASSERT(un->un_ncmds_in_transport >= 0); 15015 15016 /* 15017 * Don't retry request sense, the sense data 15018 * is lost when another request is sent. 15019 * Free up the rqs buf and retry 15020 * the original failed cmd. Update kstat. 15021 */ 15022 if (bp == un->un_rqs_bp) { 15023 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 15024 bp = sd_mark_rqs_idle(un, xp); 15025 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15026 NULL, NULL, EIO, un->un_busy_timeout / 500, 15027 kstat_waitq_enter); 15028 goto exit; 15029 } 15030 15031 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 15032 /* 15033 * Free the DMA resources for the scsi_pkt. This will 15034 * allow mpxio to select another path the next time 15035 * we call scsi_transport() with this scsi_pkt. 15036 * See sdintr() for the rationalization behind this. 15037 */ 15038 if ((un->un_f_is_fibre == TRUE) && 15039 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 15040 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) { 15041 scsi_dmafree(xp->xb_pktp); 15042 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 15043 } 15044 #endif 15045 15046 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) { 15047 /* 15048 * Commands that are SD_PATH_DIRECT_PRIORITY 15049 * are for error recovery situations. These do 15050 * not use the normal command waitq, so if they 15051 * get a TRAN_BUSY we cannot put them back onto 15052 * the waitq for later retry. One possible 15053 * problem is that there could already be some 15054 * other command on un_retry_bp that is waiting 15055 * for this one to complete, so we would be 15056 * deadlocked if we put this command back onto 15057 * the waitq for later retry (since un_retry_bp 15058 * must complete before the driver gets back to 15059 * commands on the waitq). 15060 * 15061 * To avoid deadlock we must schedule a callback 15062 * that will restart this command after a set 15063 * interval. This should keep retrying for as 15064 * long as the underlying transport keeps 15065 * returning TRAN_BUSY (just like for other 15066 * commands). Use the same timeout interval as 15067 * for the ordinary TRAN_BUSY retry. 15068 */ 15069 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15070 "sd_start_cmds: scsi_transport() returned " 15071 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n"); 15072 15073 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 15074 un->un_direct_priority_timeid = 15075 timeout(sd_start_direct_priority_command, 15076 bp, un->un_busy_timeout / 500); 15077 15078 goto exit; 15079 } 15080 15081 /* 15082 * For TRAN_BUSY, we want to reduce the throttle value, 15083 * unless we are retrying a command. 15084 */ 15085 if (bp != un->un_retry_bp) { 15086 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); 15087 } 15088 15089 /* 15090 * Set up the bp to be tried again 10 ms later. 15091 * Note:x86: Is there a timeout value in the sd_lun 15092 * for this condition? 15093 */ 15094 sd_set_retry_bp(un, bp, un->un_busy_timeout / 500, 15095 kstat_runq_back_to_waitq); 15096 goto exit; 15097 15098 case TRAN_FATAL_ERROR: 15099 un->un_tran_fatal_count++; 15100 /* FALLTHRU */ 15101 15102 case TRAN_BADPKT: 15103 default: 15104 un->un_ncmds_in_transport--; 15105 ASSERT(un->un_ncmds_in_transport >= 0); 15106 15107 /* 15108 * If this is our REQUEST SENSE command with a 15109 * transport error, we must get back the pointers 15110 * to the original buf, and mark the REQUEST 15111 * SENSE command as "available". 15112 */ 15113 if (bp == un->un_rqs_bp) { 15114 bp = sd_mark_rqs_idle(un, xp); 15115 xp = SD_GET_XBUF(bp); 15116 } else { 15117 /* 15118 * Legacy behavior: do not update transport 15119 * error count for request sense commands. 15120 */ 15121 SD_UPDATE_ERRSTATS(un, sd_transerrs); 15122 } 15123 15124 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 15125 sd_print_transport_rejected_message(un, xp, rval); 15126 15127 /* 15128 * This command will be terminated by SD driver due 15129 * to a fatal transport error. We should post 15130 * ereport.io.scsi.cmd.disk.tran with driver-assessment 15131 * of "fail" for any command to indicate this 15132 * situation. 15133 */ 15134 if (xp->xb_ena > 0) { 15135 ASSERT(un->un_fm_private != NULL); 15136 sfip = un->un_fm_private; 15137 sfip->fm_ssc.ssc_flags |= SSC_FLAGS_TRAN_ABORT; 15138 sd_ssc_extract_info(&sfip->fm_ssc, un, 15139 xp->xb_pktp, bp, xp); 15140 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL); 15141 } 15142 15143 /* 15144 * We must use sd_return_failed_command_no_restart() to 15145 * avoid a recursive call back into sd_start_cmds(). 15146 * However this also means that we must keep processing 15147 * the waitq here in order to avoid stalling. 15148 */ 15149 sd_return_failed_command_no_restart(un, bp, EIO); 15150 15151 /* 15152 * Notify any threads waiting in sd_ddi_suspend() that 15153 * a command completion has occurred. 15154 */ 15155 if (un->un_state == SD_STATE_SUSPENDED) { 15156 cv_broadcast(&un->un_disk_busy_cv); 15157 } 15158 15159 if (bp == immed_bp) { 15160 /* immed_bp is gone by now, so clear this */ 15161 immed_bp = NULL; 15162 } 15163 break; 15164 } 15165 15166 } while (immed_bp == NULL); 15167 15168 exit: 15169 ASSERT(mutex_owned(SD_MUTEX(un))); 15170 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n"); 15171 } 15172 15173 15174 /* 15175 * Function: sd_return_command 15176 * 15177 * Description: Returns a command to its originator (with or without an 15178 * error). Also starts commands waiting to be transported 15179 * to the target. 15180 * 15181 * Context: May be called from interrupt, kernel, or timeout context 15182 */ 15183 15184 static void 15185 sd_return_command(struct sd_lun *un, struct buf *bp) 15186 { 15187 struct sd_xbuf *xp; 15188 struct scsi_pkt *pktp; 15189 struct sd_fm_internal *sfip; 15190 15191 ASSERT(bp != NULL); 15192 ASSERT(un != NULL); 15193 ASSERT(mutex_owned(SD_MUTEX(un))); 15194 ASSERT(bp != un->un_rqs_bp); 15195 xp = SD_GET_XBUF(bp); 15196 ASSERT(xp != NULL); 15197 15198 pktp = SD_GET_PKTP(bp); 15199 sfip = (struct sd_fm_internal *)un->un_fm_private; 15200 ASSERT(sfip != NULL); 15201 15202 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n"); 15203 15204 /* 15205 * Note: check for the "sdrestart failed" case. 15206 */ 15207 if ((un->un_partial_dma_supported == 1) && 15208 ((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) && 15209 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) && 15210 (xp->xb_pktp->pkt_resid == 0)) { 15211 15212 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) { 15213 /* 15214 * Successfully set up next portion of cmd 15215 * transfer, try sending it 15216 */ 15217 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 15218 NULL, NULL, 0, (clock_t)0, NULL); 15219 sd_start_cmds(un, NULL); 15220 return; /* Note:x86: need a return here? */ 15221 } 15222 } 15223 15224 /* 15225 * If this is the failfast bp, clear it from un_failfast_bp. This 15226 * can happen if upon being re-tried the failfast bp either 15227 * succeeded or encountered another error (possibly even a different 15228 * error than the one that precipitated the failfast state, but in 15229 * that case it would have had to exhaust retries as well). Regardless, 15230 * this should not occur whenever the instance is in the active 15231 * failfast state. 15232 */ 15233 if (bp == un->un_failfast_bp) { 15234 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 15235 un->un_failfast_bp = NULL; 15236 } 15237 15238 /* 15239 * Clear the failfast state upon successful completion of ANY cmd. 15240 */ 15241 if (bp->b_error == 0) { 15242 un->un_failfast_state = SD_FAILFAST_INACTIVE; 15243 /* 15244 * If this is a successful command, but used to be retried, 15245 * we will take it as a recovered command and post an 15246 * ereport with driver-assessment of "recovered". 15247 */ 15248 if (xp->xb_ena > 0) { 15249 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 15250 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RECOVERY); 15251 } 15252 } else { 15253 /* 15254 * If this is a failed non-USCSI command we will post an 15255 * ereport with driver-assessment set accordingly("fail" or 15256 * "fatal"). 15257 */ 15258 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 15259 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 15260 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL); 15261 } 15262 } 15263 15264 /* 15265 * This is used if the command was retried one or more times. Show that 15266 * we are done with it, and allow processing of the waitq to resume. 15267 */ 15268 if (bp == un->un_retry_bp) { 15269 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15270 "sd_return_command: un:0x%p: " 15271 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 15272 un->un_retry_bp = NULL; 15273 un->un_retry_statp = NULL; 15274 } 15275 15276 SD_UPDATE_RDWR_STATS(un, bp); 15277 SD_UPDATE_PARTITION_STATS(un, bp); 15278 15279 switch (un->un_state) { 15280 case SD_STATE_SUSPENDED: 15281 /* 15282 * Notify any threads waiting in sd_ddi_suspend() that 15283 * a command completion has occurred. 15284 */ 15285 cv_broadcast(&un->un_disk_busy_cv); 15286 break; 15287 default: 15288 sd_start_cmds(un, NULL); 15289 break; 15290 } 15291 15292 /* Return this command up the iodone chain to its originator. */ 15293 mutex_exit(SD_MUTEX(un)); 15294 15295 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 15296 xp->xb_pktp = NULL; 15297 15298 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 15299 15300 ASSERT(!mutex_owned(SD_MUTEX(un))); 15301 mutex_enter(SD_MUTEX(un)); 15302 15303 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n"); 15304 } 15305 15306 15307 /* 15308 * Function: sd_return_failed_command 15309 * 15310 * Description: Command completion when an error occurred. 15311 * 15312 * Context: May be called from interrupt context 15313 */ 15314 15315 static void 15316 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode) 15317 { 15318 ASSERT(bp != NULL); 15319 ASSERT(un != NULL); 15320 ASSERT(mutex_owned(SD_MUTEX(un))); 15321 15322 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15323 "sd_return_failed_command: entry\n"); 15324 15325 /* 15326 * b_resid could already be nonzero due to a partial data 15327 * transfer, so do not change it here. 15328 */ 15329 SD_BIOERROR(bp, errcode); 15330 15331 sd_return_command(un, bp); 15332 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15333 "sd_return_failed_command: exit\n"); 15334 } 15335 15336 15337 /* 15338 * Function: sd_return_failed_command_no_restart 15339 * 15340 * Description: Same as sd_return_failed_command, but ensures that no 15341 * call back into sd_start_cmds will be issued. 15342 * 15343 * Context: May be called from interrupt context 15344 */ 15345 15346 static void 15347 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp, 15348 int errcode) 15349 { 15350 struct sd_xbuf *xp; 15351 15352 ASSERT(bp != NULL); 15353 ASSERT(un != NULL); 15354 ASSERT(mutex_owned(SD_MUTEX(un))); 15355 xp = SD_GET_XBUF(bp); 15356 ASSERT(xp != NULL); 15357 ASSERT(errcode != 0); 15358 15359 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15360 "sd_return_failed_command_no_restart: entry\n"); 15361 15362 /* 15363 * b_resid could already be nonzero due to a partial data 15364 * transfer, so do not change it here. 15365 */ 15366 SD_BIOERROR(bp, errcode); 15367 15368 /* 15369 * If this is the failfast bp, clear it. This can happen if the 15370 * failfast bp encounterd a fatal error when we attempted to 15371 * re-try it (such as a scsi_transport(9F) failure). However 15372 * we should NOT be in an active failfast state if the failfast 15373 * bp is not NULL. 15374 */ 15375 if (bp == un->un_failfast_bp) { 15376 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 15377 un->un_failfast_bp = NULL; 15378 } 15379 15380 if (bp == un->un_retry_bp) { 15381 /* 15382 * This command was retried one or more times. Show that we are 15383 * done with it, and allow processing of the waitq to resume. 15384 */ 15385 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15386 "sd_return_failed_command_no_restart: " 15387 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 15388 un->un_retry_bp = NULL; 15389 un->un_retry_statp = NULL; 15390 } 15391 15392 SD_UPDATE_RDWR_STATS(un, bp); 15393 SD_UPDATE_PARTITION_STATS(un, bp); 15394 15395 mutex_exit(SD_MUTEX(un)); 15396 15397 if (xp->xb_pktp != NULL) { 15398 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 15399 xp->xb_pktp = NULL; 15400 } 15401 15402 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 15403 15404 mutex_enter(SD_MUTEX(un)); 15405 15406 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15407 "sd_return_failed_command_no_restart: exit\n"); 15408 } 15409 15410 15411 /* 15412 * Function: sd_retry_command 15413 * 15414 * Description: queue up a command for retry, or (optionally) fail it 15415 * if retry counts are exhausted. 15416 * 15417 * Arguments: un - Pointer to the sd_lun struct for the target. 15418 * 15419 * bp - Pointer to the buf for the command to be retried. 15420 * 15421 * retry_check_flag - Flag to see which (if any) of the retry 15422 * counts should be decremented/checked. If the indicated 15423 * retry count is exhausted, then the command will not be 15424 * retried; it will be failed instead. This should use a 15425 * value equal to one of the following: 15426 * 15427 * SD_RETRIES_NOCHECK 15428 * SD_RESD_RETRIES_STANDARD 15429 * SD_RETRIES_VICTIM 15430 * 15431 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE 15432 * if the check should be made to see of FLAG_ISOLATE is set 15433 * in the pkt. If FLAG_ISOLATE is set, then the command is 15434 * not retried, it is simply failed. 15435 * 15436 * user_funcp - Ptr to function to call before dispatching the 15437 * command. May be NULL if no action needs to be performed. 15438 * (Primarily intended for printing messages.) 15439 * 15440 * user_arg - Optional argument to be passed along to 15441 * the user_funcp call. 15442 * 15443 * failure_code - errno return code to set in the bp if the 15444 * command is going to be failed. 15445 * 15446 * retry_delay - Retry delay interval in (clock_t) units. May 15447 * be zero which indicates that the retry should be retried 15448 * immediately (ie, without an intervening delay). 15449 * 15450 * statp - Ptr to kstat function to be updated if the command 15451 * is queued for a delayed retry. May be NULL if no kstat 15452 * update is desired. 15453 * 15454 * Context: May be called from interrupt context. 15455 */ 15456 15457 static void 15458 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag, 15459 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int 15460 code), void *user_arg, int failure_code, clock_t retry_delay, 15461 void (*statp)(kstat_io_t *)) 15462 { 15463 struct sd_xbuf *xp; 15464 struct scsi_pkt *pktp; 15465 struct sd_fm_internal *sfip; 15466 15467 ASSERT(un != NULL); 15468 ASSERT(mutex_owned(SD_MUTEX(un))); 15469 ASSERT(bp != NULL); 15470 xp = SD_GET_XBUF(bp); 15471 ASSERT(xp != NULL); 15472 pktp = SD_GET_PKTP(bp); 15473 ASSERT(pktp != NULL); 15474 15475 sfip = (struct sd_fm_internal *)un->un_fm_private; 15476 ASSERT(sfip != NULL); 15477 15478 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15479 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp); 15480 15481 /* 15482 * If we are syncing or dumping, fail the command to avoid 15483 * recursively calling back into scsi_transport(). 15484 */ 15485 if (ddi_in_panic()) { 15486 goto fail_command_no_log; 15487 } 15488 15489 /* 15490 * We should never be be retrying a command with FLAG_DIAGNOSE set, so 15491 * log an error and fail the command. 15492 */ 15493 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 15494 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 15495 "ERROR, retrying FLAG_DIAGNOSE command.\n"); 15496 sd_dump_memory(un, SD_LOG_IO, "CDB", 15497 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15498 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 15499 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 15500 goto fail_command; 15501 } 15502 15503 /* 15504 * If we are suspended, then put the command onto head of the 15505 * wait queue since we don't want to start more commands, and 15506 * clear the un_retry_bp. Next time when we are resumed, will 15507 * handle the command in the wait queue. 15508 */ 15509 switch (un->un_state) { 15510 case SD_STATE_SUSPENDED: 15511 case SD_STATE_DUMPING: 15512 bp->av_forw = un->un_waitq_headp; 15513 un->un_waitq_headp = bp; 15514 if (un->un_waitq_tailp == NULL) { 15515 un->un_waitq_tailp = bp; 15516 } 15517 if (bp == un->un_retry_bp) { 15518 un->un_retry_bp = NULL; 15519 un->un_retry_statp = NULL; 15520 } 15521 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 15522 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: " 15523 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp); 15524 return; 15525 default: 15526 break; 15527 } 15528 15529 /* 15530 * If the caller wants us to check FLAG_ISOLATE, then see if that 15531 * is set; if it is then we do not want to retry the command. 15532 * Normally, FLAG_ISOLATE is only used with USCSI cmds. 15533 */ 15534 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) { 15535 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) { 15536 goto fail_command; 15537 } 15538 } 15539 15540 15541 /* 15542 * If SD_RETRIES_FAILFAST is set, it indicates that either a 15543 * command timeout or a selection timeout has occurred. This means 15544 * that we were unable to establish an kind of communication with 15545 * the target, and subsequent retries and/or commands are likely 15546 * to encounter similar results and take a long time to complete. 15547 * 15548 * If this is a failfast error condition, we need to update the 15549 * failfast state, even if this bp does not have B_FAILFAST set. 15550 */ 15551 if (retry_check_flag & SD_RETRIES_FAILFAST) { 15552 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) { 15553 ASSERT(un->un_failfast_bp == NULL); 15554 /* 15555 * If we are already in the active failfast state, and 15556 * another failfast error condition has been detected, 15557 * then fail this command if it has B_FAILFAST set. 15558 * If B_FAILFAST is clear, then maintain the legacy 15559 * behavior of retrying heroically, even tho this will 15560 * take a lot more time to fail the command. 15561 */ 15562 if (bp->b_flags & B_FAILFAST) { 15563 goto fail_command; 15564 } 15565 } else { 15566 /* 15567 * We're not in the active failfast state, but we 15568 * have a failfast error condition, so we must begin 15569 * transition to the next state. We do this regardless 15570 * of whether or not this bp has B_FAILFAST set. 15571 */ 15572 if (un->un_failfast_bp == NULL) { 15573 /* 15574 * This is the first bp to meet a failfast 15575 * condition so save it on un_failfast_bp & 15576 * do normal retry processing. Do not enter 15577 * active failfast state yet. This marks 15578 * entry into the "failfast pending" state. 15579 */ 15580 un->un_failfast_bp = bp; 15581 15582 } else if (un->un_failfast_bp == bp) { 15583 /* 15584 * This is the second time *this* bp has 15585 * encountered a failfast error condition, 15586 * so enter active failfast state & flush 15587 * queues as appropriate. 15588 */ 15589 un->un_failfast_state = SD_FAILFAST_ACTIVE; 15590 un->un_failfast_bp = NULL; 15591 sd_failfast_flushq(un); 15592 15593 /* 15594 * Fail this bp now if B_FAILFAST set; 15595 * otherwise continue with retries. (It would 15596 * be pretty ironic if this bp succeeded on a 15597 * subsequent retry after we just flushed all 15598 * the queues). 15599 */ 15600 if (bp->b_flags & B_FAILFAST) { 15601 goto fail_command; 15602 } 15603 15604 #if !defined(lint) && !defined(__lint) 15605 } else { 15606 /* 15607 * If neither of the preceeding conditionals 15608 * was true, it means that there is some 15609 * *other* bp that has met an inital failfast 15610 * condition and is currently either being 15611 * retried or is waiting to be retried. In 15612 * that case we should perform normal retry 15613 * processing on *this* bp, since there is a 15614 * chance that the current failfast condition 15615 * is transient and recoverable. If that does 15616 * not turn out to be the case, then retries 15617 * will be cleared when the wait queue is 15618 * flushed anyway. 15619 */ 15620 #endif 15621 } 15622 } 15623 } else { 15624 /* 15625 * SD_RETRIES_FAILFAST is clear, which indicates that we 15626 * likely were able to at least establish some level of 15627 * communication with the target and subsequent commands 15628 * and/or retries are likely to get through to the target, 15629 * In this case we want to be aggressive about clearing 15630 * the failfast state. Note that this does not affect 15631 * the "failfast pending" condition. 15632 */ 15633 un->un_failfast_state = SD_FAILFAST_INACTIVE; 15634 } 15635 15636 15637 /* 15638 * Check the specified retry count to see if we can still do 15639 * any retries with this pkt before we should fail it. 15640 */ 15641 switch (retry_check_flag & SD_RETRIES_MASK) { 15642 case SD_RETRIES_VICTIM: 15643 /* 15644 * Check the victim retry count. If exhausted, then fall 15645 * thru & check against the standard retry count. 15646 */ 15647 if (xp->xb_victim_retry_count < un->un_victim_retry_count) { 15648 /* Increment count & proceed with the retry */ 15649 xp->xb_victim_retry_count++; 15650 break; 15651 } 15652 /* Victim retries exhausted, fall back to std. retries... */ 15653 /* FALLTHRU */ 15654 15655 case SD_RETRIES_STANDARD: 15656 if (xp->xb_retry_count >= un->un_retry_count) { 15657 /* Retries exhausted, fail the command */ 15658 SD_TRACE(SD_LOG_IO_CORE, un, 15659 "sd_retry_command: retries exhausted!\n"); 15660 /* 15661 * update b_resid for failed SCMD_READ & SCMD_WRITE 15662 * commands with nonzero pkt_resid. 15663 */ 15664 if ((pktp->pkt_reason == CMD_CMPLT) && 15665 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) && 15666 (pktp->pkt_resid != 0)) { 15667 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F; 15668 if ((op == SCMD_READ) || (op == SCMD_WRITE)) { 15669 SD_UPDATE_B_RESID(bp, pktp); 15670 } 15671 } 15672 goto fail_command; 15673 } 15674 xp->xb_retry_count++; 15675 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15676 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 15677 break; 15678 15679 case SD_RETRIES_UA: 15680 if (xp->xb_ua_retry_count >= sd_ua_retry_count) { 15681 /* Retries exhausted, fail the command */ 15682 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15683 "Unit Attention retries exhausted. " 15684 "Check the target.\n"); 15685 goto fail_command; 15686 } 15687 xp->xb_ua_retry_count++; 15688 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15689 "sd_retry_command: retry count:%d\n", 15690 xp->xb_ua_retry_count); 15691 break; 15692 15693 case SD_RETRIES_BUSY: 15694 if (xp->xb_retry_count >= un->un_busy_retry_count) { 15695 /* Retries exhausted, fail the command */ 15696 SD_TRACE(SD_LOG_IO_CORE, un, 15697 "sd_retry_command: retries exhausted!\n"); 15698 goto fail_command; 15699 } 15700 xp->xb_retry_count++; 15701 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15702 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 15703 break; 15704 15705 case SD_RETRIES_NOCHECK: 15706 default: 15707 /* No retry count to check. Just proceed with the retry */ 15708 break; 15709 } 15710 15711 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 15712 15713 /* 15714 * If this is a non-USCSI command being retried 15715 * during execution last time, we should post an ereport with 15716 * driver-assessment of the value "retry". 15717 * For partial DMA, request sense and STATUS_QFULL, there are no 15718 * hardware errors, we bypass ereport posting. 15719 */ 15720 if (failure_code != 0) { 15721 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 15722 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 15723 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RETRY); 15724 } 15725 } 15726 15727 /* 15728 * If we were given a zero timeout, we must attempt to retry the 15729 * command immediately (ie, without a delay). 15730 */ 15731 if (retry_delay == 0) { 15732 /* 15733 * Check some limiting conditions to see if we can actually 15734 * do the immediate retry. If we cannot, then we must 15735 * fall back to queueing up a delayed retry. 15736 */ 15737 if (un->un_ncmds_in_transport >= un->un_throttle) { 15738 /* 15739 * We are at the throttle limit for the target, 15740 * fall back to delayed retry. 15741 */ 15742 retry_delay = un->un_busy_timeout; 15743 statp = kstat_waitq_enter; 15744 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15745 "sd_retry_command: immed. retry hit " 15746 "throttle!\n"); 15747 } else { 15748 /* 15749 * We're clear to proceed with the immediate retry. 15750 * First call the user-provided function (if any) 15751 */ 15752 if (user_funcp != NULL) { 15753 (*user_funcp)(un, bp, user_arg, 15754 SD_IMMEDIATE_RETRY_ISSUED); 15755 #ifdef __lock_lint 15756 sd_print_incomplete_msg(un, bp, user_arg, 15757 SD_IMMEDIATE_RETRY_ISSUED); 15758 sd_print_cmd_incomplete_msg(un, bp, user_arg, 15759 SD_IMMEDIATE_RETRY_ISSUED); 15760 sd_print_sense_failed_msg(un, bp, user_arg, 15761 SD_IMMEDIATE_RETRY_ISSUED); 15762 #endif 15763 } 15764 15765 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15766 "sd_retry_command: issuing immediate retry\n"); 15767 15768 /* 15769 * Call sd_start_cmds() to transport the command to 15770 * the target. 15771 */ 15772 sd_start_cmds(un, bp); 15773 15774 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15775 "sd_retry_command exit\n"); 15776 return; 15777 } 15778 } 15779 15780 /* 15781 * Set up to retry the command after a delay. 15782 * First call the user-provided function (if any) 15783 */ 15784 if (user_funcp != NULL) { 15785 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED); 15786 } 15787 15788 sd_set_retry_bp(un, bp, retry_delay, statp); 15789 15790 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15791 return; 15792 15793 fail_command: 15794 15795 if (user_funcp != NULL) { 15796 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED); 15797 } 15798 15799 fail_command_no_log: 15800 15801 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15802 "sd_retry_command: returning failed command\n"); 15803 15804 sd_return_failed_command(un, bp, failure_code); 15805 15806 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15807 } 15808 15809 15810 /* 15811 * Function: sd_set_retry_bp 15812 * 15813 * Description: Set up the given bp for retry. 15814 * 15815 * Arguments: un - ptr to associated softstate 15816 * bp - ptr to buf(9S) for the command 15817 * retry_delay - time interval before issuing retry (may be 0) 15818 * statp - optional pointer to kstat function 15819 * 15820 * Context: May be called under interrupt context 15821 */ 15822 15823 static void 15824 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, 15825 void (*statp)(kstat_io_t *)) 15826 { 15827 ASSERT(un != NULL); 15828 ASSERT(mutex_owned(SD_MUTEX(un))); 15829 ASSERT(bp != NULL); 15830 15831 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15832 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp); 15833 15834 /* 15835 * Indicate that the command is being retried. This will not allow any 15836 * other commands on the wait queue to be transported to the target 15837 * until this command has been completed (success or failure). The 15838 * "retry command" is not transported to the target until the given 15839 * time delay expires, unless the user specified a 0 retry_delay. 15840 * 15841 * Note: the timeout(9F) callback routine is what actually calls 15842 * sd_start_cmds() to transport the command, with the exception of a 15843 * zero retry_delay. The only current implementor of a zero retry delay 15844 * is the case where a START_STOP_UNIT is sent to spin-up a device. 15845 */ 15846 if (un->un_retry_bp == NULL) { 15847 ASSERT(un->un_retry_statp == NULL); 15848 un->un_retry_bp = bp; 15849 15850 /* 15851 * If the user has not specified a delay the command should 15852 * be queued and no timeout should be scheduled. 15853 */ 15854 if (retry_delay == 0) { 15855 /* 15856 * Save the kstat pointer that will be used in the 15857 * call to SD_UPDATE_KSTATS() below, so that 15858 * sd_start_cmds() can correctly decrement the waitq 15859 * count when it is time to transport this command. 15860 */ 15861 un->un_retry_statp = statp; 15862 goto done; 15863 } 15864 } 15865 15866 if (un->un_retry_bp == bp) { 15867 /* 15868 * Save the kstat pointer that will be used in the call to 15869 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can 15870 * correctly decrement the waitq count when it is time to 15871 * transport this command. 15872 */ 15873 un->un_retry_statp = statp; 15874 15875 /* 15876 * Schedule a timeout if: 15877 * 1) The user has specified a delay. 15878 * 2) There is not a START_STOP_UNIT callback pending. 15879 * 15880 * If no delay has been specified, then it is up to the caller 15881 * to ensure that IO processing continues without stalling. 15882 * Effectively, this means that the caller will issue the 15883 * required call to sd_start_cmds(). The START_STOP_UNIT 15884 * callback does this after the START STOP UNIT command has 15885 * completed. In either of these cases we should not schedule 15886 * a timeout callback here. Also don't schedule the timeout if 15887 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart. 15888 */ 15889 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) && 15890 (un->un_direct_priority_timeid == NULL)) { 15891 un->un_retry_timeid = 15892 timeout(sd_start_retry_command, un, retry_delay); 15893 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15894 "sd_set_retry_bp: setting timeout: un: 0x%p" 15895 " bp:0x%p un_retry_timeid:0x%p\n", 15896 un, bp, un->un_retry_timeid); 15897 } 15898 } else { 15899 /* 15900 * We only get in here if there is already another command 15901 * waiting to be retried. In this case, we just put the 15902 * given command onto the wait queue, so it can be transported 15903 * after the current retry command has completed. 15904 * 15905 * Also we have to make sure that if the command at the head 15906 * of the wait queue is the un_failfast_bp, that we do not 15907 * put ahead of it any other commands that are to be retried. 15908 */ 15909 if ((un->un_failfast_bp != NULL) && 15910 (un->un_failfast_bp == un->un_waitq_headp)) { 15911 /* 15912 * Enqueue this command AFTER the first command on 15913 * the wait queue (which is also un_failfast_bp). 15914 */ 15915 bp->av_forw = un->un_waitq_headp->av_forw; 15916 un->un_waitq_headp->av_forw = bp; 15917 if (un->un_waitq_headp == un->un_waitq_tailp) { 15918 un->un_waitq_tailp = bp; 15919 } 15920 } else { 15921 /* Enqueue this command at the head of the waitq. */ 15922 bp->av_forw = un->un_waitq_headp; 15923 un->un_waitq_headp = bp; 15924 if (un->un_waitq_tailp == NULL) { 15925 un->un_waitq_tailp = bp; 15926 } 15927 } 15928 15929 if (statp == NULL) { 15930 statp = kstat_waitq_enter; 15931 } 15932 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15933 "sd_set_retry_bp: un:0x%p already delayed retry\n", un); 15934 } 15935 15936 done: 15937 if (statp != NULL) { 15938 SD_UPDATE_KSTATS(un, statp, bp); 15939 } 15940 15941 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15942 "sd_set_retry_bp: exit un:0x%p\n", un); 15943 } 15944 15945 15946 /* 15947 * Function: sd_start_retry_command 15948 * 15949 * Description: Start the command that has been waiting on the target's 15950 * retry queue. Called from timeout(9F) context after the 15951 * retry delay interval has expired. 15952 * 15953 * Arguments: arg - pointer to associated softstate for the device. 15954 * 15955 * Context: timeout(9F) thread context. May not sleep. 15956 */ 15957 15958 static void 15959 sd_start_retry_command(void *arg) 15960 { 15961 struct sd_lun *un = arg; 15962 15963 ASSERT(un != NULL); 15964 ASSERT(!mutex_owned(SD_MUTEX(un))); 15965 15966 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15967 "sd_start_retry_command: entry\n"); 15968 15969 mutex_enter(SD_MUTEX(un)); 15970 15971 un->un_retry_timeid = NULL; 15972 15973 if (un->un_retry_bp != NULL) { 15974 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15975 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n", 15976 un, un->un_retry_bp); 15977 sd_start_cmds(un, un->un_retry_bp); 15978 } 15979 15980 mutex_exit(SD_MUTEX(un)); 15981 15982 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15983 "sd_start_retry_command: exit\n"); 15984 } 15985 15986 /* 15987 * Function: sd_rmw_msg_print_handler 15988 * 15989 * Description: If RMW mode is enabled and warning message is triggered 15990 * print I/O count during a fixed interval. 15991 * 15992 * Arguments: arg - pointer to associated softstate for the device. 15993 * 15994 * Context: timeout(9F) thread context. May not sleep. 15995 */ 15996 static void 15997 sd_rmw_msg_print_handler(void *arg) 15998 { 15999 struct sd_lun *un = arg; 16000 16001 ASSERT(un != NULL); 16002 ASSERT(!mutex_owned(SD_MUTEX(un))); 16003 16004 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16005 "sd_rmw_msg_print_handler: entry\n"); 16006 16007 mutex_enter(SD_MUTEX(un)); 16008 16009 if (un->un_rmw_incre_count > 0) { 16010 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16011 "%"PRIu64" I/O requests are not aligned with %d disk " 16012 "sector size in %ld seconds. They are handled through " 16013 "Read Modify Write but the performance is very low!\n", 16014 un->un_rmw_incre_count, un->un_tgt_blocksize, 16015 drv_hztousec(SD_RMW_MSG_PRINT_TIMEOUT) / 1000000); 16016 un->un_rmw_incre_count = 0; 16017 un->un_rmw_msg_timeid = timeout(sd_rmw_msg_print_handler, 16018 un, SD_RMW_MSG_PRINT_TIMEOUT); 16019 } else { 16020 un->un_rmw_msg_timeid = NULL; 16021 } 16022 16023 mutex_exit(SD_MUTEX(un)); 16024 16025 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16026 "sd_rmw_msg_print_handler: exit\n"); 16027 } 16028 16029 /* 16030 * Function: sd_start_direct_priority_command 16031 * 16032 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had 16033 * received TRAN_BUSY when we called scsi_transport() to send it 16034 * to the underlying HBA. This function is called from timeout(9F) 16035 * context after the delay interval has expired. 16036 * 16037 * Arguments: arg - pointer to associated buf(9S) to be restarted. 16038 * 16039 * Context: timeout(9F) thread context. May not sleep. 16040 */ 16041 16042 static void 16043 sd_start_direct_priority_command(void *arg) 16044 { 16045 struct buf *priority_bp = arg; 16046 struct sd_lun *un; 16047 16048 ASSERT(priority_bp != NULL); 16049 un = SD_GET_UN(priority_bp); 16050 ASSERT(un != NULL); 16051 ASSERT(!mutex_owned(SD_MUTEX(un))); 16052 16053 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16054 "sd_start_direct_priority_command: entry\n"); 16055 16056 mutex_enter(SD_MUTEX(un)); 16057 un->un_direct_priority_timeid = NULL; 16058 sd_start_cmds(un, priority_bp); 16059 mutex_exit(SD_MUTEX(un)); 16060 16061 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16062 "sd_start_direct_priority_command: exit\n"); 16063 } 16064 16065 16066 /* 16067 * Function: sd_send_request_sense_command 16068 * 16069 * Description: Sends a REQUEST SENSE command to the target 16070 * 16071 * Context: May be called from interrupt context. 16072 */ 16073 16074 static void 16075 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 16076 struct scsi_pkt *pktp) 16077 { 16078 ASSERT(bp != NULL); 16079 ASSERT(un != NULL); 16080 ASSERT(mutex_owned(SD_MUTEX(un))); 16081 16082 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: " 16083 "entry: buf:0x%p\n", bp); 16084 16085 /* 16086 * If we are syncing or dumping, then fail the command to avoid a 16087 * recursive callback into scsi_transport(). Also fail the command 16088 * if we are suspended (legacy behavior). 16089 */ 16090 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 16091 (un->un_state == SD_STATE_DUMPING)) { 16092 sd_return_failed_command(un, bp, EIO); 16093 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16094 "sd_send_request_sense_command: syncing/dumping, exit\n"); 16095 return; 16096 } 16097 16098 /* 16099 * Retry the failed command and don't issue the request sense if: 16100 * 1) the sense buf is busy 16101 * 2) we have 1 or more outstanding commands on the target 16102 * (the sense data will be cleared or invalidated any way) 16103 * 16104 * Note: There could be an issue with not checking a retry limit here, 16105 * the problem is determining which retry limit to check. 16106 */ 16107 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { 16108 /* Don't retry if the command is flagged as non-retryable */ 16109 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 16110 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 16111 NULL, NULL, 0, un->un_busy_timeout, 16112 kstat_waitq_enter); 16113 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16114 "sd_send_request_sense_command: " 16115 "at full throttle, retrying exit\n"); 16116 } else { 16117 sd_return_failed_command(un, bp, EIO); 16118 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16119 "sd_send_request_sense_command: " 16120 "at full throttle, non-retryable exit\n"); 16121 } 16122 return; 16123 } 16124 16125 sd_mark_rqs_busy(un, bp); 16126 sd_start_cmds(un, un->un_rqs_bp); 16127 16128 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16129 "sd_send_request_sense_command: exit\n"); 16130 } 16131 16132 16133 /* 16134 * Function: sd_mark_rqs_busy 16135 * 16136 * Description: Indicate that the request sense bp for this instance is 16137 * in use. 16138 * 16139 * Context: May be called under interrupt context 16140 */ 16141 16142 static void 16143 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp) 16144 { 16145 struct sd_xbuf *sense_xp; 16146 16147 ASSERT(un != NULL); 16148 ASSERT(bp != NULL); 16149 ASSERT(mutex_owned(SD_MUTEX(un))); 16150 ASSERT(un->un_sense_isbusy == 0); 16151 16152 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: " 16153 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un); 16154 16155 sense_xp = SD_GET_XBUF(un->un_rqs_bp); 16156 ASSERT(sense_xp != NULL); 16157 16158 SD_INFO(SD_LOG_IO, un, 16159 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp); 16160 16161 ASSERT(sense_xp->xb_pktp != NULL); 16162 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) 16163 == (FLAG_SENSING | FLAG_HEAD)); 16164 16165 un->un_sense_isbusy = 1; 16166 un->un_rqs_bp->b_resid = 0; 16167 sense_xp->xb_pktp->pkt_resid = 0; 16168 sense_xp->xb_pktp->pkt_reason = 0; 16169 16170 /* So we can get back the bp at interrupt time! */ 16171 sense_xp->xb_sense_bp = bp; 16172 16173 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH); 16174 16175 /* 16176 * Mark this buf as awaiting sense data. (This is already set in 16177 * the pkt_flags for the RQS packet.) 16178 */ 16179 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING; 16180 16181 /* Request sense down same path */ 16182 if (scsi_pkt_allocated_correctly((SD_GET_XBUF(bp))->xb_pktp) && 16183 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance) 16184 sense_xp->xb_pktp->pkt_path_instance = 16185 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance; 16186 16187 sense_xp->xb_retry_count = 0; 16188 sense_xp->xb_victim_retry_count = 0; 16189 sense_xp->xb_ua_retry_count = 0; 16190 sense_xp->xb_nr_retry_count = 0; 16191 sense_xp->xb_dma_resid = 0; 16192 16193 /* Clean up the fields for auto-request sense */ 16194 sense_xp->xb_sense_status = 0; 16195 sense_xp->xb_sense_state = 0; 16196 sense_xp->xb_sense_resid = 0; 16197 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data)); 16198 16199 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n"); 16200 } 16201 16202 16203 /* 16204 * Function: sd_mark_rqs_idle 16205 * 16206 * Description: SD_MUTEX must be held continuously through this routine 16207 * to prevent reuse of the rqs struct before the caller can 16208 * complete it's processing. 16209 * 16210 * Return Code: Pointer to the RQS buf 16211 * 16212 * Context: May be called under interrupt context 16213 */ 16214 16215 static struct buf * 16216 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp) 16217 { 16218 struct buf *bp; 16219 ASSERT(un != NULL); 16220 ASSERT(sense_xp != NULL); 16221 ASSERT(mutex_owned(SD_MUTEX(un))); 16222 ASSERT(un->un_sense_isbusy != 0); 16223 16224 un->un_sense_isbusy = 0; 16225 bp = sense_xp->xb_sense_bp; 16226 sense_xp->xb_sense_bp = NULL; 16227 16228 /* This pkt is no longer interested in getting sense data */ 16229 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING; 16230 16231 return (bp); 16232 } 16233 16234 16235 16236 /* 16237 * Function: sd_alloc_rqs 16238 * 16239 * Description: Set up the unit to receive auto request sense data 16240 * 16241 * Return Code: DDI_SUCCESS or DDI_FAILURE 16242 * 16243 * Context: Called under attach(9E) context 16244 */ 16245 16246 static int 16247 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un) 16248 { 16249 struct sd_xbuf *xp; 16250 16251 ASSERT(un != NULL); 16252 ASSERT(!mutex_owned(SD_MUTEX(un))); 16253 ASSERT(un->un_rqs_bp == NULL); 16254 ASSERT(un->un_rqs_pktp == NULL); 16255 16256 /* 16257 * First allocate the required buf and scsi_pkt structs, then set up 16258 * the CDB in the scsi_pkt for a REQUEST SENSE command. 16259 */ 16260 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL, 16261 MAX_SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL); 16262 if (un->un_rqs_bp == NULL) { 16263 return (DDI_FAILURE); 16264 } 16265 16266 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp, 16267 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL); 16268 16269 if (un->un_rqs_pktp == NULL) { 16270 sd_free_rqs(un); 16271 return (DDI_FAILURE); 16272 } 16273 16274 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */ 16275 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp, 16276 SCMD_REQUEST_SENSE, 0, MAX_SENSE_LENGTH, 0); 16277 16278 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp); 16279 16280 /* Set up the other needed members in the ARQ scsi_pkt. */ 16281 un->un_rqs_pktp->pkt_comp = sdintr; 16282 un->un_rqs_pktp->pkt_time = sd_io_time; 16283 un->un_rqs_pktp->pkt_flags |= 16284 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */ 16285 16286 /* 16287 * Allocate & init the sd_xbuf struct for the RQS command. Do not 16288 * provide any intpkt, destroypkt routines as we take care of 16289 * scsi_pkt allocation/freeing here and in sd_free_rqs(). 16290 */ 16291 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 16292 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL); 16293 xp->xb_pktp = un->un_rqs_pktp; 16294 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16295 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n", 16296 un, xp, un->un_rqs_pktp, un->un_rqs_bp); 16297 16298 /* 16299 * Save the pointer to the request sense private bp so it can 16300 * be retrieved in sdintr. 16301 */ 16302 un->un_rqs_pktp->pkt_private = un->un_rqs_bp; 16303 ASSERT(un->un_rqs_bp->b_private == xp); 16304 16305 /* 16306 * See if the HBA supports auto-request sense for the specified 16307 * target/lun. If it does, then try to enable it (if not already 16308 * enabled). 16309 * 16310 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return 16311 * failure, while for other HBAs (pln) scsi_ifsetcap will always 16312 * return success. However, in both of these cases ARQ is always 16313 * enabled and scsi_ifgetcap will always return true. The best approach 16314 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). 16315 * 16316 * The 3rd case is the HBA (adp) always return enabled on 16317 * scsi_ifgetgetcap even when it's not enable, the best approach 16318 * is issue a scsi_ifsetcap then a scsi_ifgetcap 16319 * Note: this case is to circumvent the Adaptec bug. (x86 only) 16320 */ 16321 16322 if (un->un_f_is_fibre == TRUE) { 16323 un->un_f_arq_enabled = TRUE; 16324 } else { 16325 #if defined(__i386) || defined(__amd64) 16326 /* 16327 * Circumvent the Adaptec bug, remove this code when 16328 * the bug is fixed 16329 */ 16330 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); 16331 #endif 16332 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { 16333 case 0: 16334 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16335 "sd_alloc_rqs: HBA supports ARQ\n"); 16336 /* 16337 * ARQ is supported by this HBA but currently is not 16338 * enabled. Attempt to enable it and if successful then 16339 * mark this instance as ARQ enabled. 16340 */ 16341 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1) 16342 == 1) { 16343 /* Successfully enabled ARQ in the HBA */ 16344 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16345 "sd_alloc_rqs: ARQ enabled\n"); 16346 un->un_f_arq_enabled = TRUE; 16347 } else { 16348 /* Could not enable ARQ in the HBA */ 16349 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16350 "sd_alloc_rqs: failed ARQ enable\n"); 16351 un->un_f_arq_enabled = FALSE; 16352 } 16353 break; 16354 case 1: 16355 /* 16356 * ARQ is supported by this HBA and is already enabled. 16357 * Just mark ARQ as enabled for this instance. 16358 */ 16359 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16360 "sd_alloc_rqs: ARQ already enabled\n"); 16361 un->un_f_arq_enabled = TRUE; 16362 break; 16363 default: 16364 /* 16365 * ARQ is not supported by this HBA; disable it for this 16366 * instance. 16367 */ 16368 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16369 "sd_alloc_rqs: HBA does not support ARQ\n"); 16370 un->un_f_arq_enabled = FALSE; 16371 break; 16372 } 16373 } 16374 16375 return (DDI_SUCCESS); 16376 } 16377 16378 16379 /* 16380 * Function: sd_free_rqs 16381 * 16382 * Description: Cleanup for the pre-instance RQS command. 16383 * 16384 * Context: Kernel thread context 16385 */ 16386 16387 static void 16388 sd_free_rqs(struct sd_lun *un) 16389 { 16390 ASSERT(un != NULL); 16391 16392 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n"); 16393 16394 /* 16395 * If consistent memory is bound to a scsi_pkt, the pkt 16396 * has to be destroyed *before* freeing the consistent memory. 16397 * Don't change the sequence of this operations. 16398 * scsi_destroy_pkt() might access memory, which isn't allowed, 16399 * after it was freed in scsi_free_consistent_buf(). 16400 */ 16401 if (un->un_rqs_pktp != NULL) { 16402 scsi_destroy_pkt(un->un_rqs_pktp); 16403 un->un_rqs_pktp = NULL; 16404 } 16405 16406 if (un->un_rqs_bp != NULL) { 16407 struct sd_xbuf *xp = SD_GET_XBUF(un->un_rqs_bp); 16408 if (xp != NULL) { 16409 kmem_free(xp, sizeof (struct sd_xbuf)); 16410 } 16411 scsi_free_consistent_buf(un->un_rqs_bp); 16412 un->un_rqs_bp = NULL; 16413 } 16414 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n"); 16415 } 16416 16417 16418 16419 /* 16420 * Function: sd_reduce_throttle 16421 * 16422 * Description: Reduces the maximum # of outstanding commands on a 16423 * target to the current number of outstanding commands. 16424 * Queues a tiemout(9F) callback to restore the limit 16425 * after a specified interval has elapsed. 16426 * Typically used when we get a TRAN_BUSY return code 16427 * back from scsi_transport(). 16428 * 16429 * Arguments: un - ptr to the sd_lun softstate struct 16430 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL 16431 * 16432 * Context: May be called from interrupt context 16433 */ 16434 16435 static void 16436 sd_reduce_throttle(struct sd_lun *un, int throttle_type) 16437 { 16438 ASSERT(un != NULL); 16439 ASSERT(mutex_owned(SD_MUTEX(un))); 16440 ASSERT(un->un_ncmds_in_transport >= 0); 16441 16442 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 16443 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n", 16444 un, un->un_throttle, un->un_ncmds_in_transport); 16445 16446 if (un->un_throttle > 1) { 16447 if (un->un_f_use_adaptive_throttle == TRUE) { 16448 switch (throttle_type) { 16449 case SD_THROTTLE_TRAN_BUSY: 16450 if (un->un_busy_throttle == 0) { 16451 un->un_busy_throttle = un->un_throttle; 16452 } 16453 break; 16454 case SD_THROTTLE_QFULL: 16455 un->un_busy_throttle = 0; 16456 break; 16457 default: 16458 ASSERT(FALSE); 16459 } 16460 16461 if (un->un_ncmds_in_transport > 0) { 16462 un->un_throttle = un->un_ncmds_in_transport; 16463 } 16464 16465 } else { 16466 if (un->un_ncmds_in_transport == 0) { 16467 un->un_throttle = 1; 16468 } else { 16469 un->un_throttle = un->un_ncmds_in_transport; 16470 } 16471 } 16472 } 16473 16474 /* Reschedule the timeout if none is currently active */ 16475 if (un->un_reset_throttle_timeid == NULL) { 16476 un->un_reset_throttle_timeid = timeout(sd_restore_throttle, 16477 un, SD_THROTTLE_RESET_INTERVAL); 16478 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16479 "sd_reduce_throttle: timeout scheduled!\n"); 16480 } 16481 16482 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 16483 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle); 16484 } 16485 16486 16487 16488 /* 16489 * Function: sd_restore_throttle 16490 * 16491 * Description: Callback function for timeout(9F). Resets the current 16492 * value of un->un_throttle to its default. 16493 * 16494 * Arguments: arg - pointer to associated softstate for the device. 16495 * 16496 * Context: May be called from interrupt context 16497 */ 16498 16499 static void 16500 sd_restore_throttle(void *arg) 16501 { 16502 struct sd_lun *un = arg; 16503 16504 ASSERT(un != NULL); 16505 ASSERT(!mutex_owned(SD_MUTEX(un))); 16506 16507 mutex_enter(SD_MUTEX(un)); 16508 16509 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 16510 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle); 16511 16512 un->un_reset_throttle_timeid = NULL; 16513 16514 if (un->un_f_use_adaptive_throttle == TRUE) { 16515 /* 16516 * If un_busy_throttle is nonzero, then it contains the 16517 * value that un_throttle was when we got a TRAN_BUSY back 16518 * from scsi_transport(). We want to revert back to this 16519 * value. 16520 * 16521 * In the QFULL case, the throttle limit will incrementally 16522 * increase until it reaches max throttle. 16523 */ 16524 if (un->un_busy_throttle > 0) { 16525 un->un_throttle = un->un_busy_throttle; 16526 un->un_busy_throttle = 0; 16527 } else { 16528 /* 16529 * increase throttle by 10% open gate slowly, schedule 16530 * another restore if saved throttle has not been 16531 * reached 16532 */ 16533 short throttle; 16534 if (sd_qfull_throttle_enable) { 16535 throttle = un->un_throttle + 16536 max((un->un_throttle / 10), 1); 16537 un->un_throttle = 16538 (throttle < un->un_saved_throttle) ? 16539 throttle : un->un_saved_throttle; 16540 if (un->un_throttle < un->un_saved_throttle) { 16541 un->un_reset_throttle_timeid = 16542 timeout(sd_restore_throttle, 16543 un, 16544 SD_QFULL_THROTTLE_RESET_INTERVAL); 16545 } 16546 } 16547 } 16548 16549 /* 16550 * If un_throttle has fallen below the low-water mark, we 16551 * restore the maximum value here (and allow it to ratchet 16552 * down again if necessary). 16553 */ 16554 if (un->un_throttle < un->un_min_throttle) { 16555 un->un_throttle = un->un_saved_throttle; 16556 } 16557 } else { 16558 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 16559 "restoring limit from 0x%x to 0x%x\n", 16560 un->un_throttle, un->un_saved_throttle); 16561 un->un_throttle = un->un_saved_throttle; 16562 } 16563 16564 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 16565 "sd_restore_throttle: calling sd_start_cmds!\n"); 16566 16567 sd_start_cmds(un, NULL); 16568 16569 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 16570 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n", 16571 un, un->un_throttle); 16572 16573 mutex_exit(SD_MUTEX(un)); 16574 16575 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n"); 16576 } 16577 16578 /* 16579 * Function: sdrunout 16580 * 16581 * Description: Callback routine for scsi_init_pkt when a resource allocation 16582 * fails. 16583 * 16584 * Arguments: arg - a pointer to the sd_lun unit struct for the particular 16585 * soft state instance. 16586 * 16587 * Return Code: The scsi_init_pkt routine allows for the callback function to 16588 * return a 0 indicating the callback should be rescheduled or a 1 16589 * indicating not to reschedule. This routine always returns 1 16590 * because the driver always provides a callback function to 16591 * scsi_init_pkt. This results in a callback always being scheduled 16592 * (via the scsi_init_pkt callback implementation) if a resource 16593 * failure occurs. 16594 * 16595 * Context: This callback function may not block or call routines that block 16596 * 16597 * Note: Using the scsi_init_pkt callback facility can result in an I/O 16598 * request persisting at the head of the list which cannot be 16599 * satisfied even after multiple retries. In the future the driver 16600 * may implement some time of maximum runout count before failing 16601 * an I/O. 16602 */ 16603 16604 static int 16605 sdrunout(caddr_t arg) 16606 { 16607 struct sd_lun *un = (struct sd_lun *)arg; 16608 16609 ASSERT(un != NULL); 16610 ASSERT(!mutex_owned(SD_MUTEX(un))); 16611 16612 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n"); 16613 16614 mutex_enter(SD_MUTEX(un)); 16615 sd_start_cmds(un, NULL); 16616 mutex_exit(SD_MUTEX(un)); 16617 /* 16618 * This callback routine always returns 1 (i.e. do not reschedule) 16619 * because we always specify sdrunout as the callback handler for 16620 * scsi_init_pkt inside the call to sd_start_cmds. 16621 */ 16622 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n"); 16623 return (1); 16624 } 16625 16626 16627 /* 16628 * Function: sdintr 16629 * 16630 * Description: Completion callback routine for scsi_pkt(9S) structs 16631 * sent to the HBA driver via scsi_transport(9F). 16632 * 16633 * Context: Interrupt context 16634 */ 16635 16636 static void 16637 sdintr(struct scsi_pkt *pktp) 16638 { 16639 struct buf *bp; 16640 struct sd_xbuf *xp; 16641 struct sd_lun *un; 16642 size_t actual_len; 16643 sd_ssc_t *sscp; 16644 16645 ASSERT(pktp != NULL); 16646 bp = (struct buf *)pktp->pkt_private; 16647 ASSERT(bp != NULL); 16648 xp = SD_GET_XBUF(bp); 16649 ASSERT(xp != NULL); 16650 ASSERT(xp->xb_pktp != NULL); 16651 un = SD_GET_UN(bp); 16652 ASSERT(un != NULL); 16653 ASSERT(!mutex_owned(SD_MUTEX(un))); 16654 16655 #ifdef SD_FAULT_INJECTION 16656 16657 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n"); 16658 /* SD FaultInjection */ 16659 sd_faultinjection(pktp); 16660 16661 #endif /* SD_FAULT_INJECTION */ 16662 16663 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p," 16664 " xp:0x%p, un:0x%p\n", bp, xp, un); 16665 16666 mutex_enter(SD_MUTEX(un)); 16667 16668 ASSERT(un->un_fm_private != NULL); 16669 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc; 16670 ASSERT(sscp != NULL); 16671 16672 /* Reduce the count of the #commands currently in transport */ 16673 un->un_ncmds_in_transport--; 16674 ASSERT(un->un_ncmds_in_transport >= 0); 16675 16676 /* Increment counter to indicate that the callback routine is active */ 16677 un->un_in_callback++; 16678 16679 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 16680 16681 #ifdef SDDEBUG 16682 if (bp == un->un_retry_bp) { 16683 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: " 16684 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n", 16685 un, un->un_retry_bp, un->un_ncmds_in_transport); 16686 } 16687 #endif 16688 16689 /* 16690 * If pkt_reason is CMD_DEV_GONE, fail the command, and update the media 16691 * state if needed. 16692 */ 16693 if (pktp->pkt_reason == CMD_DEV_GONE) { 16694 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16695 "Command failed to complete...Device is gone\n"); 16696 if (un->un_mediastate != DKIO_DEV_GONE) { 16697 un->un_mediastate = DKIO_DEV_GONE; 16698 cv_broadcast(&un->un_state_cv); 16699 } 16700 sd_return_failed_command(un, bp, EIO); 16701 goto exit; 16702 } 16703 16704 if (pktp->pkt_state & STATE_XARQ_DONE) { 16705 SD_TRACE(SD_LOG_COMMON, un, 16706 "sdintr: extra sense data received. pkt=%p\n", pktp); 16707 } 16708 16709 /* 16710 * First see if the pkt has auto-request sense data with it.... 16711 * Look at the packet state first so we don't take a performance 16712 * hit looking at the arq enabled flag unless absolutely necessary. 16713 */ 16714 if ((pktp->pkt_state & STATE_ARQ_DONE) && 16715 (un->un_f_arq_enabled == TRUE)) { 16716 /* 16717 * The HBA did an auto request sense for this command so check 16718 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 16719 * driver command that should not be retried. 16720 */ 16721 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 16722 /* 16723 * Save the relevant sense info into the xp for the 16724 * original cmd. 16725 */ 16726 struct scsi_arq_status *asp; 16727 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 16728 xp->xb_sense_status = 16729 *((uchar_t *)(&(asp->sts_rqpkt_status))); 16730 xp->xb_sense_state = asp->sts_rqpkt_state; 16731 xp->xb_sense_resid = asp->sts_rqpkt_resid; 16732 if (pktp->pkt_state & STATE_XARQ_DONE) { 16733 actual_len = MAX_SENSE_LENGTH - 16734 xp->xb_sense_resid; 16735 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16736 MAX_SENSE_LENGTH); 16737 } else { 16738 if (xp->xb_sense_resid > SENSE_LENGTH) { 16739 actual_len = MAX_SENSE_LENGTH - 16740 xp->xb_sense_resid; 16741 } else { 16742 actual_len = SENSE_LENGTH - 16743 xp->xb_sense_resid; 16744 } 16745 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 16746 if ((((struct uscsi_cmd *) 16747 (xp->xb_pktinfo))->uscsi_rqlen) > 16748 actual_len) { 16749 xp->xb_sense_resid = 16750 (((struct uscsi_cmd *) 16751 (xp->xb_pktinfo))-> 16752 uscsi_rqlen) - actual_len; 16753 } else { 16754 xp->xb_sense_resid = 0; 16755 } 16756 } 16757 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16758 SENSE_LENGTH); 16759 } 16760 16761 /* fail the command */ 16762 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16763 "sdintr: arq done and FLAG_DIAGNOSE set\n"); 16764 sd_return_failed_command(un, bp, EIO); 16765 goto exit; 16766 } 16767 16768 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 16769 /* 16770 * We want to either retry or fail this command, so free 16771 * the DMA resources here. If we retry the command then 16772 * the DMA resources will be reallocated in sd_start_cmds(). 16773 * Note that when PKT_DMA_PARTIAL is used, this reallocation 16774 * causes the *entire* transfer to start over again from the 16775 * beginning of the request, even for PARTIAL chunks that 16776 * have already transferred successfully. 16777 */ 16778 if ((un->un_f_is_fibre == TRUE) && 16779 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 16780 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 16781 scsi_dmafree(pktp); 16782 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 16783 } 16784 #endif 16785 16786 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16787 "sdintr: arq done, sd_handle_auto_request_sense\n"); 16788 16789 sd_handle_auto_request_sense(un, bp, xp, pktp); 16790 goto exit; 16791 } 16792 16793 /* Next see if this is the REQUEST SENSE pkt for the instance */ 16794 if (pktp->pkt_flags & FLAG_SENSING) { 16795 /* This pktp is from the unit's REQUEST_SENSE command */ 16796 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16797 "sdintr: sd_handle_request_sense\n"); 16798 sd_handle_request_sense(un, bp, xp, pktp); 16799 goto exit; 16800 } 16801 16802 /* 16803 * Check to see if the command successfully completed as requested; 16804 * this is the most common case (and also the hot performance path). 16805 * 16806 * Requirements for successful completion are: 16807 * pkt_reason is CMD_CMPLT and packet status is status good. 16808 * In addition: 16809 * - A residual of zero indicates successful completion no matter what 16810 * the command is. 16811 * - If the residual is not zero and the command is not a read or 16812 * write, then it's still defined as successful completion. In other 16813 * words, if the command is a read or write the residual must be 16814 * zero for successful completion. 16815 * - If the residual is not zero and the command is a read or 16816 * write, and it's a USCSICMD, then it's still defined as 16817 * successful completion. 16818 */ 16819 if ((pktp->pkt_reason == CMD_CMPLT) && 16820 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) { 16821 16822 /* 16823 * Since this command is returned with a good status, we 16824 * can reset the count for Sonoma failover. 16825 */ 16826 un->un_sonoma_failure_count = 0; 16827 16828 /* 16829 * Return all USCSI commands on good status 16830 */ 16831 if (pktp->pkt_resid == 0) { 16832 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16833 "sdintr: returning command for resid == 0\n"); 16834 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) && 16835 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) { 16836 SD_UPDATE_B_RESID(bp, pktp); 16837 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16838 "sdintr: returning command for resid != 0\n"); 16839 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 16840 SD_UPDATE_B_RESID(bp, pktp); 16841 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16842 "sdintr: returning uscsi command\n"); 16843 } else { 16844 goto not_successful; 16845 } 16846 sd_return_command(un, bp); 16847 16848 /* 16849 * Decrement counter to indicate that the callback routine 16850 * is done. 16851 */ 16852 un->un_in_callback--; 16853 ASSERT(un->un_in_callback >= 0); 16854 mutex_exit(SD_MUTEX(un)); 16855 16856 return; 16857 } 16858 16859 not_successful: 16860 16861 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 16862 /* 16863 * The following is based upon knowledge of the underlying transport 16864 * and its use of DMA resources. This code should be removed when 16865 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor 16866 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf() 16867 * and sd_start_cmds(). 16868 * 16869 * Free any DMA resources associated with this command if there 16870 * is a chance it could be retried or enqueued for later retry. 16871 * If we keep the DMA binding then mpxio cannot reissue the 16872 * command on another path whenever a path failure occurs. 16873 * 16874 * Note that when PKT_DMA_PARTIAL is used, free/reallocation 16875 * causes the *entire* transfer to start over again from the 16876 * beginning of the request, even for PARTIAL chunks that 16877 * have already transferred successfully. 16878 * 16879 * This is only done for non-uscsi commands (and also skipped for the 16880 * driver's internal RQS command). Also just do this for Fibre Channel 16881 * devices as these are the only ones that support mpxio. 16882 */ 16883 if ((un->un_f_is_fibre == TRUE) && 16884 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 16885 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 16886 scsi_dmafree(pktp); 16887 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 16888 } 16889 #endif 16890 16891 /* 16892 * The command did not successfully complete as requested so check 16893 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 16894 * driver command that should not be retried so just return. If 16895 * FLAG_DIAGNOSE is not set the error will be processed below. 16896 */ 16897 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 16898 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16899 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n"); 16900 /* 16901 * Issue a request sense if a check condition caused the error 16902 * (we handle the auto request sense case above), otherwise 16903 * just fail the command. 16904 */ 16905 if ((pktp->pkt_reason == CMD_CMPLT) && 16906 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { 16907 sd_send_request_sense_command(un, bp, pktp); 16908 } else { 16909 sd_return_failed_command(un, bp, EIO); 16910 } 16911 goto exit; 16912 } 16913 16914 /* 16915 * The command did not successfully complete as requested so process 16916 * the error, retry, and/or attempt recovery. 16917 */ 16918 switch (pktp->pkt_reason) { 16919 case CMD_CMPLT: 16920 switch (SD_GET_PKT_STATUS(pktp)) { 16921 case STATUS_GOOD: 16922 /* 16923 * The command completed successfully with a non-zero 16924 * residual 16925 */ 16926 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16927 "sdintr: STATUS_GOOD \n"); 16928 sd_pkt_status_good(un, bp, xp, pktp); 16929 break; 16930 16931 case STATUS_CHECK: 16932 case STATUS_TERMINATED: 16933 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16934 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n"); 16935 sd_pkt_status_check_condition(un, bp, xp, pktp); 16936 break; 16937 16938 case STATUS_BUSY: 16939 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16940 "sdintr: STATUS_BUSY\n"); 16941 sd_pkt_status_busy(un, bp, xp, pktp); 16942 break; 16943 16944 case STATUS_RESERVATION_CONFLICT: 16945 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16946 "sdintr: STATUS_RESERVATION_CONFLICT\n"); 16947 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 16948 break; 16949 16950 case STATUS_QFULL: 16951 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16952 "sdintr: STATUS_QFULL\n"); 16953 sd_pkt_status_qfull(un, bp, xp, pktp); 16954 break; 16955 16956 case STATUS_MET: 16957 case STATUS_INTERMEDIATE: 16958 case STATUS_SCSI2: 16959 case STATUS_INTERMEDIATE_MET: 16960 case STATUS_ACA_ACTIVE: 16961 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16962 "Unexpected SCSI status received: 0x%x\n", 16963 SD_GET_PKT_STATUS(pktp)); 16964 /* 16965 * Mark the ssc_flags when detected invalid status 16966 * code for non-USCSI command. 16967 */ 16968 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16969 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS, 16970 0, "stat-code"); 16971 } 16972 sd_return_failed_command(un, bp, EIO); 16973 break; 16974 16975 default: 16976 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16977 "Invalid SCSI status received: 0x%x\n", 16978 SD_GET_PKT_STATUS(pktp)); 16979 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16980 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS, 16981 0, "stat-code"); 16982 } 16983 sd_return_failed_command(un, bp, EIO); 16984 break; 16985 16986 } 16987 break; 16988 16989 case CMD_INCOMPLETE: 16990 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16991 "sdintr: CMD_INCOMPLETE\n"); 16992 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp); 16993 break; 16994 case CMD_TRAN_ERR: 16995 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16996 "sdintr: CMD_TRAN_ERR\n"); 16997 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp); 16998 break; 16999 case CMD_RESET: 17000 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17001 "sdintr: CMD_RESET \n"); 17002 sd_pkt_reason_cmd_reset(un, bp, xp, pktp); 17003 break; 17004 case CMD_ABORTED: 17005 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17006 "sdintr: CMD_ABORTED \n"); 17007 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp); 17008 break; 17009 case CMD_TIMEOUT: 17010 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17011 "sdintr: CMD_TIMEOUT\n"); 17012 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp); 17013 break; 17014 case CMD_UNX_BUS_FREE: 17015 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17016 "sdintr: CMD_UNX_BUS_FREE \n"); 17017 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp); 17018 break; 17019 case CMD_TAG_REJECT: 17020 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17021 "sdintr: CMD_TAG_REJECT\n"); 17022 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp); 17023 break; 17024 default: 17025 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17026 "sdintr: default\n"); 17027 /* 17028 * Mark the ssc_flags for detecting invliad pkt_reason. 17029 */ 17030 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17031 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_PKT_REASON, 17032 0, "pkt-reason"); 17033 } 17034 sd_pkt_reason_default(un, bp, xp, pktp); 17035 break; 17036 } 17037 17038 exit: 17039 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n"); 17040 17041 /* Decrement counter to indicate that the callback routine is done. */ 17042 un->un_in_callback--; 17043 ASSERT(un->un_in_callback >= 0); 17044 17045 /* 17046 * At this point, the pkt has been dispatched, ie, it is either 17047 * being re-tried or has been returned to its caller and should 17048 * not be referenced. 17049 */ 17050 17051 mutex_exit(SD_MUTEX(un)); 17052 } 17053 17054 17055 /* 17056 * Function: sd_print_incomplete_msg 17057 * 17058 * Description: Prints the error message for a CMD_INCOMPLETE error. 17059 * 17060 * Arguments: un - ptr to associated softstate for the device. 17061 * bp - ptr to the buf(9S) for the command. 17062 * arg - message string ptr 17063 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED, 17064 * or SD_NO_RETRY_ISSUED. 17065 * 17066 * Context: May be called under interrupt context 17067 */ 17068 17069 static void 17070 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 17071 { 17072 struct scsi_pkt *pktp; 17073 char *msgp; 17074 char *cmdp = arg; 17075 17076 ASSERT(un != NULL); 17077 ASSERT(mutex_owned(SD_MUTEX(un))); 17078 ASSERT(bp != NULL); 17079 ASSERT(arg != NULL); 17080 pktp = SD_GET_PKTP(bp); 17081 ASSERT(pktp != NULL); 17082 17083 switch (code) { 17084 case SD_DELAYED_RETRY_ISSUED: 17085 case SD_IMMEDIATE_RETRY_ISSUED: 17086 msgp = "retrying"; 17087 break; 17088 case SD_NO_RETRY_ISSUED: 17089 default: 17090 msgp = "giving up"; 17091 break; 17092 } 17093 17094 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 17095 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17096 "incomplete %s- %s\n", cmdp, msgp); 17097 } 17098 } 17099 17100 17101 17102 /* 17103 * Function: sd_pkt_status_good 17104 * 17105 * Description: Processing for a STATUS_GOOD code in pkt_status. 17106 * 17107 * Context: May be called under interrupt context 17108 */ 17109 17110 static void 17111 sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 17112 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17113 { 17114 char *cmdp; 17115 17116 ASSERT(un != NULL); 17117 ASSERT(mutex_owned(SD_MUTEX(un))); 17118 ASSERT(bp != NULL); 17119 ASSERT(xp != NULL); 17120 ASSERT(pktp != NULL); 17121 ASSERT(pktp->pkt_reason == CMD_CMPLT); 17122 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD); 17123 ASSERT(pktp->pkt_resid != 0); 17124 17125 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n"); 17126 17127 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17128 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) { 17129 case SCMD_READ: 17130 cmdp = "read"; 17131 break; 17132 case SCMD_WRITE: 17133 cmdp = "write"; 17134 break; 17135 default: 17136 SD_UPDATE_B_RESID(bp, pktp); 17137 sd_return_command(un, bp); 17138 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 17139 return; 17140 } 17141 17142 /* 17143 * See if we can retry the read/write, preferrably immediately. 17144 * If retries are exhaused, then sd_retry_command() will update 17145 * the b_resid count. 17146 */ 17147 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg, 17148 cmdp, EIO, (clock_t)0, NULL); 17149 17150 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 17151 } 17152 17153 17154 17155 17156 17157 /* 17158 * Function: sd_handle_request_sense 17159 * 17160 * Description: Processing for non-auto Request Sense command. 17161 * 17162 * Arguments: un - ptr to associated softstate 17163 * sense_bp - ptr to buf(9S) for the RQS command 17164 * sense_xp - ptr to the sd_xbuf for the RQS command 17165 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command 17166 * 17167 * Context: May be called under interrupt context 17168 */ 17169 17170 static void 17171 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp, 17172 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp) 17173 { 17174 struct buf *cmd_bp; /* buf for the original command */ 17175 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */ 17176 struct scsi_pkt *cmd_pktp; /* pkt for the original command */ 17177 size_t actual_len; /* actual sense data length */ 17178 17179 ASSERT(un != NULL); 17180 ASSERT(mutex_owned(SD_MUTEX(un))); 17181 ASSERT(sense_bp != NULL); 17182 ASSERT(sense_xp != NULL); 17183 ASSERT(sense_pktp != NULL); 17184 17185 /* 17186 * Note the sense_bp, sense_xp, and sense_pktp here are for the 17187 * RQS command and not the original command. 17188 */ 17189 ASSERT(sense_pktp == un->un_rqs_pktp); 17190 ASSERT(sense_bp == un->un_rqs_bp); 17191 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) == 17192 (FLAG_SENSING | FLAG_HEAD)); 17193 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) & 17194 FLAG_SENSING) == FLAG_SENSING); 17195 17196 /* These are the bp, xp, and pktp for the original command */ 17197 cmd_bp = sense_xp->xb_sense_bp; 17198 cmd_xp = SD_GET_XBUF(cmd_bp); 17199 cmd_pktp = SD_GET_PKTP(cmd_bp); 17200 17201 if (sense_pktp->pkt_reason != CMD_CMPLT) { 17202 /* 17203 * The REQUEST SENSE command failed. Release the REQUEST 17204 * SENSE command for re-use, get back the bp for the original 17205 * command, and attempt to re-try the original command if 17206 * FLAG_DIAGNOSE is not set in the original packet. 17207 */ 17208 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17209 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 17210 cmd_bp = sd_mark_rqs_idle(un, sense_xp); 17211 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD, 17212 NULL, NULL, EIO, (clock_t)0, NULL); 17213 return; 17214 } 17215 } 17216 17217 /* 17218 * Save the relevant sense info into the xp for the original cmd. 17219 * 17220 * Note: if the request sense failed the state info will be zero 17221 * as set in sd_mark_rqs_busy() 17222 */ 17223 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp); 17224 cmd_xp->xb_sense_state = sense_pktp->pkt_state; 17225 actual_len = MAX_SENSE_LENGTH - sense_pktp->pkt_resid; 17226 if ((cmd_xp->xb_pkt_flags & SD_XB_USCSICMD) && 17227 (((struct uscsi_cmd *)cmd_xp->xb_pktinfo)->uscsi_rqlen > 17228 SENSE_LENGTH)) { 17229 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 17230 MAX_SENSE_LENGTH); 17231 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid; 17232 } else { 17233 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 17234 SENSE_LENGTH); 17235 if (actual_len < SENSE_LENGTH) { 17236 cmd_xp->xb_sense_resid = SENSE_LENGTH - actual_len; 17237 } else { 17238 cmd_xp->xb_sense_resid = 0; 17239 } 17240 } 17241 17242 /* 17243 * Free up the RQS command.... 17244 * NOTE: 17245 * Must do this BEFORE calling sd_validate_sense_data! 17246 * sd_validate_sense_data may return the original command in 17247 * which case the pkt will be freed and the flags can no 17248 * longer be touched. 17249 * SD_MUTEX is held through this process until the command 17250 * is dispatched based upon the sense data, so there are 17251 * no race conditions. 17252 */ 17253 (void) sd_mark_rqs_idle(un, sense_xp); 17254 17255 /* 17256 * For a retryable command see if we have valid sense data, if so then 17257 * turn it over to sd_decode_sense() to figure out the right course of 17258 * action. Just fail a non-retryable command. 17259 */ 17260 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 17261 if (sd_validate_sense_data(un, cmd_bp, cmd_xp, actual_len) == 17262 SD_SENSE_DATA_IS_VALID) { 17263 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp); 17264 } 17265 } else { 17266 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB", 17267 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 17268 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data", 17269 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 17270 sd_return_failed_command(un, cmd_bp, EIO); 17271 } 17272 } 17273 17274 17275 17276 17277 /* 17278 * Function: sd_handle_auto_request_sense 17279 * 17280 * Description: Processing for auto-request sense information. 17281 * 17282 * Arguments: un - ptr to associated softstate 17283 * bp - ptr to buf(9S) for the command 17284 * xp - ptr to the sd_xbuf for the command 17285 * pktp - ptr to the scsi_pkt(9S) for the command 17286 * 17287 * Context: May be called under interrupt context 17288 */ 17289 17290 static void 17291 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 17292 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17293 { 17294 struct scsi_arq_status *asp; 17295 size_t actual_len; 17296 17297 ASSERT(un != NULL); 17298 ASSERT(mutex_owned(SD_MUTEX(un))); 17299 ASSERT(bp != NULL); 17300 ASSERT(xp != NULL); 17301 ASSERT(pktp != NULL); 17302 ASSERT(pktp != un->un_rqs_pktp); 17303 ASSERT(bp != un->un_rqs_bp); 17304 17305 /* 17306 * For auto-request sense, we get a scsi_arq_status back from 17307 * the HBA, with the sense data in the sts_sensedata member. 17308 * The pkt_scbp of the packet points to this scsi_arq_status. 17309 */ 17310 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 17311 17312 if (asp->sts_rqpkt_reason != CMD_CMPLT) { 17313 /* 17314 * The auto REQUEST SENSE failed; see if we can re-try 17315 * the original command. 17316 */ 17317 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17318 "auto request sense failed (reason=%s)\n", 17319 scsi_rname(asp->sts_rqpkt_reason)); 17320 17321 sd_reset_target(un, pktp); 17322 17323 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 17324 NULL, NULL, EIO, (clock_t)0, NULL); 17325 return; 17326 } 17327 17328 /* Save the relevant sense info into the xp for the original cmd. */ 17329 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status))); 17330 xp->xb_sense_state = asp->sts_rqpkt_state; 17331 xp->xb_sense_resid = asp->sts_rqpkt_resid; 17332 if (xp->xb_sense_state & STATE_XARQ_DONE) { 17333 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 17334 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 17335 MAX_SENSE_LENGTH); 17336 } else { 17337 if (xp->xb_sense_resid > SENSE_LENGTH) { 17338 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 17339 } else { 17340 actual_len = SENSE_LENGTH - xp->xb_sense_resid; 17341 } 17342 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 17343 if ((((struct uscsi_cmd *) 17344 (xp->xb_pktinfo))->uscsi_rqlen) > actual_len) { 17345 xp->xb_sense_resid = (((struct uscsi_cmd *) 17346 (xp->xb_pktinfo))->uscsi_rqlen) - 17347 actual_len; 17348 } else { 17349 xp->xb_sense_resid = 0; 17350 } 17351 } 17352 bcopy(&asp->sts_sensedata, xp->xb_sense_data, SENSE_LENGTH); 17353 } 17354 17355 /* 17356 * See if we have valid sense data, if so then turn it over to 17357 * sd_decode_sense() to figure out the right course of action. 17358 */ 17359 if (sd_validate_sense_data(un, bp, xp, actual_len) == 17360 SD_SENSE_DATA_IS_VALID) { 17361 sd_decode_sense(un, bp, xp, pktp); 17362 } 17363 } 17364 17365 17366 /* 17367 * Function: sd_print_sense_failed_msg 17368 * 17369 * Description: Print log message when RQS has failed. 17370 * 17371 * Arguments: un - ptr to associated softstate 17372 * bp - ptr to buf(9S) for the command 17373 * arg - generic message string ptr 17374 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17375 * or SD_NO_RETRY_ISSUED 17376 * 17377 * Context: May be called from interrupt context 17378 */ 17379 17380 static void 17381 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg, 17382 int code) 17383 { 17384 char *msgp = arg; 17385 17386 ASSERT(un != NULL); 17387 ASSERT(mutex_owned(SD_MUTEX(un))); 17388 ASSERT(bp != NULL); 17389 17390 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) { 17391 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp); 17392 } 17393 } 17394 17395 17396 /* 17397 * Function: sd_validate_sense_data 17398 * 17399 * Description: Check the given sense data for validity. 17400 * If the sense data is not valid, the command will 17401 * be either failed or retried! 17402 * 17403 * Return Code: SD_SENSE_DATA_IS_INVALID 17404 * SD_SENSE_DATA_IS_VALID 17405 * 17406 * Context: May be called from interrupt context 17407 */ 17408 17409 static int 17410 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 17411 size_t actual_len) 17412 { 17413 struct scsi_extended_sense *esp; 17414 struct scsi_pkt *pktp; 17415 char *msgp = NULL; 17416 sd_ssc_t *sscp; 17417 17418 ASSERT(un != NULL); 17419 ASSERT(mutex_owned(SD_MUTEX(un))); 17420 ASSERT(bp != NULL); 17421 ASSERT(bp != un->un_rqs_bp); 17422 ASSERT(xp != NULL); 17423 ASSERT(un->un_fm_private != NULL); 17424 17425 pktp = SD_GET_PKTP(bp); 17426 ASSERT(pktp != NULL); 17427 17428 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc; 17429 ASSERT(sscp != NULL); 17430 17431 /* 17432 * Check the status of the RQS command (auto or manual). 17433 */ 17434 switch (xp->xb_sense_status & STATUS_MASK) { 17435 case STATUS_GOOD: 17436 break; 17437 17438 case STATUS_RESERVATION_CONFLICT: 17439 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 17440 return (SD_SENSE_DATA_IS_INVALID); 17441 17442 case STATUS_BUSY: 17443 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17444 "Busy Status on REQUEST SENSE\n"); 17445 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL, 17446 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); 17447 return (SD_SENSE_DATA_IS_INVALID); 17448 17449 case STATUS_QFULL: 17450 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17451 "QFULL Status on REQUEST SENSE\n"); 17452 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, 17453 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); 17454 return (SD_SENSE_DATA_IS_INVALID); 17455 17456 case STATUS_CHECK: 17457 case STATUS_TERMINATED: 17458 msgp = "Check Condition on REQUEST SENSE\n"; 17459 goto sense_failed; 17460 17461 default: 17462 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n"; 17463 goto sense_failed; 17464 } 17465 17466 /* 17467 * See if we got the minimum required amount of sense data. 17468 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes 17469 * or less. 17470 */ 17471 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) || 17472 (actual_len == 0)) { 17473 msgp = "Request Sense couldn't get sense data\n"; 17474 goto sense_failed; 17475 } 17476 17477 if (actual_len < SUN_MIN_SENSE_LENGTH) { 17478 msgp = "Not enough sense information\n"; 17479 /* Mark the ssc_flags for detecting invalid sense data */ 17480 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17481 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0, 17482 "sense-data"); 17483 } 17484 goto sense_failed; 17485 } 17486 17487 /* 17488 * We require the extended sense data 17489 */ 17490 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 17491 if (esp->es_class != CLASS_EXTENDED_SENSE) { 17492 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 17493 static char tmp[8]; 17494 static char buf[148]; 17495 char *p = (char *)(xp->xb_sense_data); 17496 int i; 17497 17498 mutex_enter(&sd_sense_mutex); 17499 (void) strcpy(buf, "undecodable sense information:"); 17500 for (i = 0; i < actual_len; i++) { 17501 (void) sprintf(tmp, " 0x%x", *(p++)&0xff); 17502 (void) strcpy(&buf[strlen(buf)], tmp); 17503 } 17504 i = strlen(buf); 17505 (void) strcpy(&buf[i], "-(assumed fatal)\n"); 17506 17507 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP) { 17508 scsi_log(SD_DEVINFO(un), sd_label, 17509 CE_WARN, buf); 17510 } 17511 mutex_exit(&sd_sense_mutex); 17512 } 17513 17514 /* Mark the ssc_flags for detecting invalid sense data */ 17515 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17516 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0, 17517 "sense-data"); 17518 } 17519 17520 /* Note: Legacy behavior, fail the command with no retry */ 17521 sd_return_failed_command(un, bp, EIO); 17522 return (SD_SENSE_DATA_IS_INVALID); 17523 } 17524 17525 /* 17526 * Check that es_code is valid (es_class concatenated with es_code 17527 * make up the "response code" field. es_class will always be 7, so 17528 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the 17529 * format. 17530 */ 17531 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) && 17532 (esp->es_code != CODE_FMT_FIXED_DEFERRED) && 17533 (esp->es_code != CODE_FMT_DESCR_CURRENT) && 17534 (esp->es_code != CODE_FMT_DESCR_DEFERRED) && 17535 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) { 17536 /* Mark the ssc_flags for detecting invalid sense data */ 17537 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17538 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0, 17539 "sense-data"); 17540 } 17541 goto sense_failed; 17542 } 17543 17544 return (SD_SENSE_DATA_IS_VALID); 17545 17546 sense_failed: 17547 /* 17548 * If the request sense failed (for whatever reason), attempt 17549 * to retry the original command. 17550 */ 17551 #if defined(__i386) || defined(__amd64) 17552 /* 17553 * SD_RETRY_DELAY is conditionally compile (#if fibre) in 17554 * sddef.h for Sparc platform, and x86 uses 1 binary 17555 * for both SCSI/FC. 17556 * The SD_RETRY_DELAY value need to be adjusted here 17557 * when SD_RETRY_DELAY change in sddef.h 17558 */ 17559 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 17560 sd_print_sense_failed_msg, msgp, EIO, 17561 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL); 17562 #else 17563 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 17564 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL); 17565 #endif 17566 17567 return (SD_SENSE_DATA_IS_INVALID); 17568 } 17569 17570 /* 17571 * Function: sd_decode_sense 17572 * 17573 * Description: Take recovery action(s) when SCSI Sense Data is received. 17574 * 17575 * Context: Interrupt context. 17576 */ 17577 17578 static void 17579 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 17580 struct scsi_pkt *pktp) 17581 { 17582 uint8_t sense_key; 17583 17584 ASSERT(un != NULL); 17585 ASSERT(mutex_owned(SD_MUTEX(un))); 17586 ASSERT(bp != NULL); 17587 ASSERT(bp != un->un_rqs_bp); 17588 ASSERT(xp != NULL); 17589 ASSERT(pktp != NULL); 17590 17591 sense_key = scsi_sense_key(xp->xb_sense_data); 17592 17593 switch (sense_key) { 17594 case KEY_NO_SENSE: 17595 sd_sense_key_no_sense(un, bp, xp, pktp); 17596 break; 17597 case KEY_RECOVERABLE_ERROR: 17598 sd_sense_key_recoverable_error(un, xp->xb_sense_data, 17599 bp, xp, pktp); 17600 break; 17601 case KEY_NOT_READY: 17602 sd_sense_key_not_ready(un, xp->xb_sense_data, 17603 bp, xp, pktp); 17604 break; 17605 case KEY_MEDIUM_ERROR: 17606 case KEY_HARDWARE_ERROR: 17607 sd_sense_key_medium_or_hardware_error(un, 17608 xp->xb_sense_data, bp, xp, pktp); 17609 break; 17610 case KEY_ILLEGAL_REQUEST: 17611 sd_sense_key_illegal_request(un, bp, xp, pktp); 17612 break; 17613 case KEY_UNIT_ATTENTION: 17614 sd_sense_key_unit_attention(un, xp->xb_sense_data, 17615 bp, xp, pktp); 17616 break; 17617 case KEY_WRITE_PROTECT: 17618 case KEY_VOLUME_OVERFLOW: 17619 case KEY_MISCOMPARE: 17620 sd_sense_key_fail_command(un, bp, xp, pktp); 17621 break; 17622 case KEY_BLANK_CHECK: 17623 sd_sense_key_blank_check(un, bp, xp, pktp); 17624 break; 17625 case KEY_ABORTED_COMMAND: 17626 sd_sense_key_aborted_command(un, bp, xp, pktp); 17627 break; 17628 case KEY_VENDOR_UNIQUE: 17629 case KEY_COPY_ABORTED: 17630 case KEY_EQUAL: 17631 case KEY_RESERVED: 17632 default: 17633 sd_sense_key_default(un, xp->xb_sense_data, 17634 bp, xp, pktp); 17635 break; 17636 } 17637 } 17638 17639 17640 /* 17641 * Function: sd_dump_memory 17642 * 17643 * Description: Debug logging routine to print the contents of a user provided 17644 * buffer. The output of the buffer is broken up into 256 byte 17645 * segments due to a size constraint of the scsi_log. 17646 * implementation. 17647 * 17648 * Arguments: un - ptr to softstate 17649 * comp - component mask 17650 * title - "title" string to preceed data when printed 17651 * data - ptr to data block to be printed 17652 * len - size of data block to be printed 17653 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c) 17654 * 17655 * Context: May be called from interrupt context 17656 */ 17657 17658 #define SD_DUMP_MEMORY_BUF_SIZE 256 17659 17660 static char *sd_dump_format_string[] = { 17661 " 0x%02x", 17662 " %c" 17663 }; 17664 17665 static void 17666 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data, 17667 int len, int fmt) 17668 { 17669 int i, j; 17670 int avail_count; 17671 int start_offset; 17672 int end_offset; 17673 size_t entry_len; 17674 char *bufp; 17675 char *local_buf; 17676 char *format_string; 17677 17678 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR)); 17679 17680 /* 17681 * In the debug version of the driver, this function is called from a 17682 * number of places which are NOPs in the release driver. 17683 * The debug driver therefore has additional methods of filtering 17684 * debug output. 17685 */ 17686 #ifdef SDDEBUG 17687 /* 17688 * In the debug version of the driver we can reduce the amount of debug 17689 * messages by setting sd_error_level to something other than 17690 * SCSI_ERR_ALL and clearing bits in sd_level_mask and 17691 * sd_component_mask. 17692 */ 17693 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) || 17694 (sd_error_level != SCSI_ERR_ALL)) { 17695 return; 17696 } 17697 if (((sd_component_mask & comp) == 0) || 17698 (sd_error_level != SCSI_ERR_ALL)) { 17699 return; 17700 } 17701 #else 17702 if (sd_error_level != SCSI_ERR_ALL) { 17703 return; 17704 } 17705 #endif 17706 17707 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP); 17708 bufp = local_buf; 17709 /* 17710 * Available length is the length of local_buf[], minus the 17711 * length of the title string, minus one for the ":", minus 17712 * one for the newline, minus one for the NULL terminator. 17713 * This gives the #bytes available for holding the printed 17714 * values from the given data buffer. 17715 */ 17716 if (fmt == SD_LOG_HEX) { 17717 format_string = sd_dump_format_string[0]; 17718 } else /* SD_LOG_CHAR */ { 17719 format_string = sd_dump_format_string[1]; 17720 } 17721 /* 17722 * Available count is the number of elements from the given 17723 * data buffer that we can fit into the available length. 17724 * This is based upon the size of the format string used. 17725 * Make one entry and find it's size. 17726 */ 17727 (void) sprintf(bufp, format_string, data[0]); 17728 entry_len = strlen(bufp); 17729 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len; 17730 17731 j = 0; 17732 while (j < len) { 17733 bufp = local_buf; 17734 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE); 17735 start_offset = j; 17736 17737 end_offset = start_offset + avail_count; 17738 17739 (void) sprintf(bufp, "%s:", title); 17740 bufp += strlen(bufp); 17741 for (i = start_offset; ((i < end_offset) && (j < len)); 17742 i++, j++) { 17743 (void) sprintf(bufp, format_string, data[i]); 17744 bufp += entry_len; 17745 } 17746 (void) sprintf(bufp, "\n"); 17747 17748 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf); 17749 } 17750 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE); 17751 } 17752 17753 /* 17754 * Function: sd_print_sense_msg 17755 * 17756 * Description: Log a message based upon the given sense data. 17757 * 17758 * Arguments: un - ptr to associated softstate 17759 * bp - ptr to buf(9S) for the command 17760 * arg - ptr to associate sd_sense_info struct 17761 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17762 * or SD_NO_RETRY_ISSUED 17763 * 17764 * Context: May be called from interrupt context 17765 */ 17766 17767 static void 17768 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 17769 { 17770 struct sd_xbuf *xp; 17771 struct scsi_pkt *pktp; 17772 uint8_t *sensep; 17773 daddr_t request_blkno; 17774 diskaddr_t err_blkno; 17775 int severity; 17776 int pfa_flag; 17777 extern struct scsi_key_strings scsi_cmds[]; 17778 17779 ASSERT(un != NULL); 17780 ASSERT(mutex_owned(SD_MUTEX(un))); 17781 ASSERT(bp != NULL); 17782 xp = SD_GET_XBUF(bp); 17783 ASSERT(xp != NULL); 17784 pktp = SD_GET_PKTP(bp); 17785 ASSERT(pktp != NULL); 17786 ASSERT(arg != NULL); 17787 17788 severity = ((struct sd_sense_info *)(arg))->ssi_severity; 17789 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag; 17790 17791 if ((code == SD_DELAYED_RETRY_ISSUED) || 17792 (code == SD_IMMEDIATE_RETRY_ISSUED)) { 17793 severity = SCSI_ERR_RETRYABLE; 17794 } 17795 17796 /* Use absolute block number for the request block number */ 17797 request_blkno = xp->xb_blkno; 17798 17799 /* 17800 * Now try to get the error block number from the sense data 17801 */ 17802 sensep = xp->xb_sense_data; 17803 17804 if (scsi_sense_info_uint64(sensep, SENSE_LENGTH, 17805 (uint64_t *)&err_blkno)) { 17806 /* 17807 * We retrieved the error block number from the information 17808 * portion of the sense data. 17809 * 17810 * For USCSI commands we are better off using the error 17811 * block no. as the requested block no. (This is the best 17812 * we can estimate.) 17813 */ 17814 if ((SD_IS_BUFIO(xp) == FALSE) && 17815 ((pktp->pkt_flags & FLAG_SILENT) == 0)) { 17816 request_blkno = err_blkno; 17817 } 17818 } else { 17819 /* 17820 * Without the es_valid bit set (for fixed format) or an 17821 * information descriptor (for descriptor format) we cannot 17822 * be certain of the error blkno, so just use the 17823 * request_blkno. 17824 */ 17825 err_blkno = (diskaddr_t)request_blkno; 17826 } 17827 17828 /* 17829 * The following will log the buffer contents for the release driver 17830 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error 17831 * level is set to verbose. 17832 */ 17833 sd_dump_memory(un, SD_LOG_IO, "Failed CDB", 17834 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 17835 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 17836 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX); 17837 17838 if (pfa_flag == FALSE) { 17839 /* This is normally only set for USCSI */ 17840 if ((pktp->pkt_flags & FLAG_SILENT) != 0) { 17841 return; 17842 } 17843 17844 if ((SD_IS_BUFIO(xp) == TRUE) && 17845 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) && 17846 (severity < sd_error_level))) { 17847 return; 17848 } 17849 } 17850 /* 17851 * Check for Sonoma Failover and keep a count of how many failed I/O's 17852 */ 17853 if ((SD_IS_LSI(un)) && 17854 (scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) && 17855 (scsi_sense_asc(sensep) == 0x94) && 17856 (scsi_sense_ascq(sensep) == 0x01)) { 17857 un->un_sonoma_failure_count++; 17858 if (un->un_sonoma_failure_count > 1) { 17859 return; 17860 } 17861 } 17862 17863 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP || 17864 ((scsi_sense_key(sensep) == KEY_RECOVERABLE_ERROR) && 17865 (pktp->pkt_resid == 0))) { 17866 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity, 17867 request_blkno, err_blkno, scsi_cmds, 17868 (struct scsi_extended_sense *)sensep, 17869 un->un_additional_codes, NULL); 17870 } 17871 } 17872 17873 /* 17874 * Function: sd_sense_key_no_sense 17875 * 17876 * Description: Recovery action when sense data was not received. 17877 * 17878 * Context: May be called from interrupt context 17879 */ 17880 17881 static void 17882 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 17883 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17884 { 17885 struct sd_sense_info si; 17886 17887 ASSERT(un != NULL); 17888 ASSERT(mutex_owned(SD_MUTEX(un))); 17889 ASSERT(bp != NULL); 17890 ASSERT(xp != NULL); 17891 ASSERT(pktp != NULL); 17892 17893 si.ssi_severity = SCSI_ERR_FATAL; 17894 si.ssi_pfa_flag = FALSE; 17895 17896 SD_UPDATE_ERRSTATS(un, sd_softerrs); 17897 17898 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17899 &si, EIO, (clock_t)0, NULL); 17900 } 17901 17902 17903 /* 17904 * Function: sd_sense_key_recoverable_error 17905 * 17906 * Description: Recovery actions for a SCSI "Recovered Error" sense key. 17907 * 17908 * Context: May be called from interrupt context 17909 */ 17910 17911 static void 17912 sd_sense_key_recoverable_error(struct sd_lun *un, 17913 uint8_t *sense_datap, 17914 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17915 { 17916 struct sd_sense_info si; 17917 uint8_t asc = scsi_sense_asc(sense_datap); 17918 17919 ASSERT(un != NULL); 17920 ASSERT(mutex_owned(SD_MUTEX(un))); 17921 ASSERT(bp != NULL); 17922 ASSERT(xp != NULL); 17923 ASSERT(pktp != NULL); 17924 17925 /* 17926 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED 17927 */ 17928 if ((asc == 0x5D) && (sd_report_pfa != 0)) { 17929 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 17930 si.ssi_severity = SCSI_ERR_INFO; 17931 si.ssi_pfa_flag = TRUE; 17932 } else { 17933 SD_UPDATE_ERRSTATS(un, sd_softerrs); 17934 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err); 17935 si.ssi_severity = SCSI_ERR_RECOVERED; 17936 si.ssi_pfa_flag = FALSE; 17937 } 17938 17939 if (pktp->pkt_resid == 0) { 17940 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17941 sd_return_command(un, bp); 17942 return; 17943 } 17944 17945 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17946 &si, EIO, (clock_t)0, NULL); 17947 } 17948 17949 17950 17951 17952 /* 17953 * Function: sd_sense_key_not_ready 17954 * 17955 * Description: Recovery actions for a SCSI "Not Ready" sense key. 17956 * 17957 * Context: May be called from interrupt context 17958 */ 17959 17960 static void 17961 sd_sense_key_not_ready(struct sd_lun *un, 17962 uint8_t *sense_datap, 17963 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17964 { 17965 struct sd_sense_info si; 17966 uint8_t asc = scsi_sense_asc(sense_datap); 17967 uint8_t ascq = scsi_sense_ascq(sense_datap); 17968 17969 ASSERT(un != NULL); 17970 ASSERT(mutex_owned(SD_MUTEX(un))); 17971 ASSERT(bp != NULL); 17972 ASSERT(xp != NULL); 17973 ASSERT(pktp != NULL); 17974 17975 si.ssi_severity = SCSI_ERR_FATAL; 17976 si.ssi_pfa_flag = FALSE; 17977 17978 /* 17979 * Update error stats after first NOT READY error. Disks may have 17980 * been powered down and may need to be restarted. For CDROMs, 17981 * report NOT READY errors only if media is present. 17982 */ 17983 if ((ISCD(un) && (asc == 0x3A)) || 17984 (xp->xb_nr_retry_count > 0)) { 17985 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17986 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err); 17987 } 17988 17989 /* 17990 * Just fail if the "not ready" retry limit has been reached. 17991 */ 17992 if (xp->xb_nr_retry_count >= un->un_notready_retry_count) { 17993 /* Special check for error message printing for removables. */ 17994 if (un->un_f_has_removable_media && (asc == 0x04) && 17995 (ascq >= 0x04)) { 17996 si.ssi_severity = SCSI_ERR_ALL; 17997 } 17998 goto fail_command; 17999 } 18000 18001 /* 18002 * Check the ASC and ASCQ in the sense data as needed, to determine 18003 * what to do. 18004 */ 18005 switch (asc) { 18006 case 0x04: /* LOGICAL UNIT NOT READY */ 18007 /* 18008 * disk drives that don't spin up result in a very long delay 18009 * in format without warning messages. We will log a message 18010 * if the error level is set to verbose. 18011 */ 18012 if (sd_error_level < SCSI_ERR_RETRYABLE) { 18013 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18014 "logical unit not ready, resetting disk\n"); 18015 } 18016 18017 /* 18018 * There are different requirements for CDROMs and disks for 18019 * the number of retries. If a CD-ROM is giving this, it is 18020 * probably reading TOC and is in the process of getting 18021 * ready, so we should keep on trying for a long time to make 18022 * sure that all types of media are taken in account (for 18023 * some media the drive takes a long time to read TOC). For 18024 * disks we do not want to retry this too many times as this 18025 * can cause a long hang in format when the drive refuses to 18026 * spin up (a very common failure). 18027 */ 18028 switch (ascq) { 18029 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */ 18030 /* 18031 * Disk drives frequently refuse to spin up which 18032 * results in a very long hang in format without 18033 * warning messages. 18034 * 18035 * Note: This code preserves the legacy behavior of 18036 * comparing xb_nr_retry_count against zero for fibre 18037 * channel targets instead of comparing against the 18038 * un_reset_retry_count value. The reason for this 18039 * discrepancy has been so utterly lost beneath the 18040 * Sands of Time that even Indiana Jones could not 18041 * find it. 18042 */ 18043 if (un->un_f_is_fibre == TRUE) { 18044 if (((sd_level_mask & SD_LOGMASK_DIAG) || 18045 (xp->xb_nr_retry_count > 0)) && 18046 (un->un_startstop_timeid == NULL)) { 18047 scsi_log(SD_DEVINFO(un), sd_label, 18048 CE_WARN, "logical unit not ready, " 18049 "resetting disk\n"); 18050 sd_reset_target(un, pktp); 18051 } 18052 } else { 18053 if (((sd_level_mask & SD_LOGMASK_DIAG) || 18054 (xp->xb_nr_retry_count > 18055 un->un_reset_retry_count)) && 18056 (un->un_startstop_timeid == NULL)) { 18057 scsi_log(SD_DEVINFO(un), sd_label, 18058 CE_WARN, "logical unit not ready, " 18059 "resetting disk\n"); 18060 sd_reset_target(un, pktp); 18061 } 18062 } 18063 break; 18064 18065 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */ 18066 /* 18067 * If the target is in the process of becoming 18068 * ready, just proceed with the retry. This can 18069 * happen with CD-ROMs that take a long time to 18070 * read TOC after a power cycle or reset. 18071 */ 18072 goto do_retry; 18073 18074 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */ 18075 break; 18076 18077 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */ 18078 /* 18079 * Retries cannot help here so just fail right away. 18080 */ 18081 goto fail_command; 18082 18083 case 0x88: 18084 /* 18085 * Vendor-unique code for T3/T4: it indicates a 18086 * path problem in a mutipathed config, but as far as 18087 * the target driver is concerned it equates to a fatal 18088 * error, so we should just fail the command right away 18089 * (without printing anything to the console). If this 18090 * is not a T3/T4, fall thru to the default recovery 18091 * action. 18092 * T3/T4 is FC only, don't need to check is_fibre 18093 */ 18094 if (SD_IS_T3(un) || SD_IS_T4(un)) { 18095 sd_return_failed_command(un, bp, EIO); 18096 return; 18097 } 18098 /* FALLTHRU */ 18099 18100 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */ 18101 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */ 18102 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */ 18103 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */ 18104 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */ 18105 default: /* Possible future codes in SCSI spec? */ 18106 /* 18107 * For removable-media devices, do not retry if 18108 * ASCQ > 2 as these result mostly from USCSI commands 18109 * on MMC devices issued to check status of an 18110 * operation initiated in immediate mode. Also for 18111 * ASCQ >= 4 do not print console messages as these 18112 * mainly represent a user-initiated operation 18113 * instead of a system failure. 18114 */ 18115 if (un->un_f_has_removable_media) { 18116 si.ssi_severity = SCSI_ERR_ALL; 18117 goto fail_command; 18118 } 18119 break; 18120 } 18121 18122 /* 18123 * As part of our recovery attempt for the NOT READY 18124 * condition, we issue a START STOP UNIT command. However 18125 * we want to wait for a short delay before attempting this 18126 * as there may still be more commands coming back from the 18127 * target with the check condition. To do this we use 18128 * timeout(9F) to call sd_start_stop_unit_callback() after 18129 * the delay interval expires. (sd_start_stop_unit_callback() 18130 * dispatches sd_start_stop_unit_task(), which will issue 18131 * the actual START STOP UNIT command. The delay interval 18132 * is one-half of the delay that we will use to retry the 18133 * command that generated the NOT READY condition. 18134 * 18135 * Note that we could just dispatch sd_start_stop_unit_task() 18136 * from here and allow it to sleep for the delay interval, 18137 * but then we would be tying up the taskq thread 18138 * uncesessarily for the duration of the delay. 18139 * 18140 * Do not issue the START STOP UNIT if the current command 18141 * is already a START STOP UNIT. 18142 */ 18143 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) { 18144 break; 18145 } 18146 18147 /* 18148 * Do not schedule the timeout if one is already pending. 18149 */ 18150 if (un->un_startstop_timeid != NULL) { 18151 SD_INFO(SD_LOG_ERROR, un, 18152 "sd_sense_key_not_ready: restart already issued to" 18153 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)), 18154 ddi_get_instance(SD_DEVINFO(un))); 18155 break; 18156 } 18157 18158 /* 18159 * Schedule the START STOP UNIT command, then queue the command 18160 * for a retry. 18161 * 18162 * Note: A timeout is not scheduled for this retry because we 18163 * want the retry to be serial with the START_STOP_UNIT. The 18164 * retry will be started when the START_STOP_UNIT is completed 18165 * in sd_start_stop_unit_task. 18166 */ 18167 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback, 18168 un, un->un_busy_timeout / 2); 18169 xp->xb_nr_retry_count++; 18170 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter); 18171 return; 18172 18173 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */ 18174 if (sd_error_level < SCSI_ERR_RETRYABLE) { 18175 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18176 "unit does not respond to selection\n"); 18177 } 18178 break; 18179 18180 case 0x3A: /* MEDIUM NOT PRESENT */ 18181 if (sd_error_level >= SCSI_ERR_FATAL) { 18182 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18183 "Caddy not inserted in drive\n"); 18184 } 18185 18186 sr_ejected(un); 18187 un->un_mediastate = DKIO_EJECTED; 18188 /* The state has changed, inform the media watch routines */ 18189 cv_broadcast(&un->un_state_cv); 18190 /* Just fail if no media is present in the drive. */ 18191 goto fail_command; 18192 18193 default: 18194 if (sd_error_level < SCSI_ERR_RETRYABLE) { 18195 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 18196 "Unit not Ready. Additional sense code 0x%x\n", 18197 asc); 18198 } 18199 break; 18200 } 18201 18202 do_retry: 18203 18204 /* 18205 * Retry the command, as some targets may report NOT READY for 18206 * several seconds after being reset. 18207 */ 18208 xp->xb_nr_retry_count++; 18209 si.ssi_severity = SCSI_ERR_RETRYABLE; 18210 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 18211 &si, EIO, un->un_busy_timeout, NULL); 18212 18213 return; 18214 18215 fail_command: 18216 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18217 sd_return_failed_command(un, bp, EIO); 18218 } 18219 18220 18221 18222 /* 18223 * Function: sd_sense_key_medium_or_hardware_error 18224 * 18225 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error" 18226 * sense key. 18227 * 18228 * Context: May be called from interrupt context 18229 */ 18230 18231 static void 18232 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 18233 uint8_t *sense_datap, 18234 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 18235 { 18236 struct sd_sense_info si; 18237 uint8_t sense_key = scsi_sense_key(sense_datap); 18238 uint8_t asc = scsi_sense_asc(sense_datap); 18239 18240 ASSERT(un != NULL); 18241 ASSERT(mutex_owned(SD_MUTEX(un))); 18242 ASSERT(bp != NULL); 18243 ASSERT(xp != NULL); 18244 ASSERT(pktp != NULL); 18245 18246 si.ssi_severity = SCSI_ERR_FATAL; 18247 si.ssi_pfa_flag = FALSE; 18248 18249 if (sense_key == KEY_MEDIUM_ERROR) { 18250 SD_UPDATE_ERRSTATS(un, sd_rq_media_err); 18251 } 18252 18253 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18254 18255 if ((un->un_reset_retry_count != 0) && 18256 (xp->xb_retry_count == un->un_reset_retry_count)) { 18257 mutex_exit(SD_MUTEX(un)); 18258 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */ 18259 if (un->un_f_allow_bus_device_reset == TRUE) { 18260 18261 boolean_t try_resetting_target = B_TRUE; 18262 18263 /* 18264 * We need to be able to handle specific ASC when we are 18265 * handling a KEY_HARDWARE_ERROR. In particular 18266 * taking the default action of resetting the target may 18267 * not be the appropriate way to attempt recovery. 18268 * Resetting a target because of a single LUN failure 18269 * victimizes all LUNs on that target. 18270 * 18271 * This is true for the LSI arrays, if an LSI 18272 * array controller returns an ASC of 0x84 (LUN Dead) we 18273 * should trust it. 18274 */ 18275 18276 if (sense_key == KEY_HARDWARE_ERROR) { 18277 switch (asc) { 18278 case 0x84: 18279 if (SD_IS_LSI(un)) { 18280 try_resetting_target = B_FALSE; 18281 } 18282 break; 18283 default: 18284 break; 18285 } 18286 } 18287 18288 if (try_resetting_target == B_TRUE) { 18289 int reset_retval = 0; 18290 if (un->un_f_lun_reset_enabled == TRUE) { 18291 SD_TRACE(SD_LOG_IO_CORE, un, 18292 "sd_sense_key_medium_or_hardware_" 18293 "error: issuing RESET_LUN\n"); 18294 reset_retval = 18295 scsi_reset(SD_ADDRESS(un), 18296 RESET_LUN); 18297 } 18298 if (reset_retval == 0) { 18299 SD_TRACE(SD_LOG_IO_CORE, un, 18300 "sd_sense_key_medium_or_hardware_" 18301 "error: issuing RESET_TARGET\n"); 18302 (void) scsi_reset(SD_ADDRESS(un), 18303 RESET_TARGET); 18304 } 18305 } 18306 } 18307 mutex_enter(SD_MUTEX(un)); 18308 } 18309 18310 /* 18311 * This really ought to be a fatal error, but we will retry anyway 18312 * as some drives report this as a spurious error. 18313 */ 18314 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18315 &si, EIO, (clock_t)0, NULL); 18316 } 18317 18318 18319 18320 /* 18321 * Function: sd_sense_key_illegal_request 18322 * 18323 * Description: Recovery actions for a SCSI "Illegal Request" sense key. 18324 * 18325 * Context: May be called from interrupt context 18326 */ 18327 18328 static void 18329 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 18330 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18331 { 18332 struct sd_sense_info si; 18333 18334 ASSERT(un != NULL); 18335 ASSERT(mutex_owned(SD_MUTEX(un))); 18336 ASSERT(bp != NULL); 18337 ASSERT(xp != NULL); 18338 ASSERT(pktp != NULL); 18339 18340 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err); 18341 18342 si.ssi_severity = SCSI_ERR_INFO; 18343 si.ssi_pfa_flag = FALSE; 18344 18345 /* Pointless to retry if the target thinks it's an illegal request */ 18346 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18347 sd_return_failed_command(un, bp, EIO); 18348 } 18349 18350 18351 18352 18353 /* 18354 * Function: sd_sense_key_unit_attention 18355 * 18356 * Description: Recovery actions for a SCSI "Unit Attention" sense key. 18357 * 18358 * Context: May be called from interrupt context 18359 */ 18360 18361 static void 18362 sd_sense_key_unit_attention(struct sd_lun *un, 18363 uint8_t *sense_datap, 18364 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 18365 { 18366 /* 18367 * For UNIT ATTENTION we allow retries for one minute. Devices 18368 * like Sonoma can return UNIT ATTENTION close to a minute 18369 * under certain conditions. 18370 */ 18371 int retry_check_flag = SD_RETRIES_UA; 18372 boolean_t kstat_updated = B_FALSE; 18373 struct sd_sense_info si; 18374 uint8_t asc = scsi_sense_asc(sense_datap); 18375 uint8_t ascq = scsi_sense_ascq(sense_datap); 18376 18377 ASSERT(un != NULL); 18378 ASSERT(mutex_owned(SD_MUTEX(un))); 18379 ASSERT(bp != NULL); 18380 ASSERT(xp != NULL); 18381 ASSERT(pktp != NULL); 18382 18383 si.ssi_severity = SCSI_ERR_INFO; 18384 si.ssi_pfa_flag = FALSE; 18385 18386 18387 switch (asc) { 18388 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */ 18389 if (sd_report_pfa != 0) { 18390 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 18391 si.ssi_pfa_flag = TRUE; 18392 retry_check_flag = SD_RETRIES_STANDARD; 18393 goto do_retry; 18394 } 18395 18396 break; 18397 18398 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */ 18399 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 18400 un->un_resvd_status |= 18401 (SD_LOST_RESERVE | SD_WANT_RESERVE); 18402 } 18403 #ifdef _LP64 18404 if (un->un_blockcount + 1 > SD_GROUP1_MAX_ADDRESS) { 18405 if (taskq_dispatch(sd_tq, sd_reenable_dsense_task, 18406 un, KM_NOSLEEP) == 0) { 18407 /* 18408 * If we can't dispatch the task we'll just 18409 * live without descriptor sense. We can 18410 * try again on the next "unit attention" 18411 */ 18412 SD_ERROR(SD_LOG_ERROR, un, 18413 "sd_sense_key_unit_attention: " 18414 "Could not dispatch " 18415 "sd_reenable_dsense_task\n"); 18416 } 18417 } 18418 #endif /* _LP64 */ 18419 /* FALLTHRU */ 18420 18421 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */ 18422 if (!un->un_f_has_removable_media) { 18423 break; 18424 } 18425 18426 /* 18427 * When we get a unit attention from a removable-media device, 18428 * it may be in a state that will take a long time to recover 18429 * (e.g., from a reset). Since we are executing in interrupt 18430 * context here, we cannot wait around for the device to come 18431 * back. So hand this command off to sd_media_change_task() 18432 * for deferred processing under taskq thread context. (Note 18433 * that the command still may be failed if a problem is 18434 * encountered at a later time.) 18435 */ 18436 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp, 18437 KM_NOSLEEP) == 0) { 18438 /* 18439 * Cannot dispatch the request so fail the command. 18440 */ 18441 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18442 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 18443 si.ssi_severity = SCSI_ERR_FATAL; 18444 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18445 sd_return_failed_command(un, bp, EIO); 18446 } 18447 18448 /* 18449 * If failed to dispatch sd_media_change_task(), we already 18450 * updated kstat. If succeed to dispatch sd_media_change_task(), 18451 * we should update kstat later if it encounters an error. So, 18452 * we update kstat_updated flag here. 18453 */ 18454 kstat_updated = B_TRUE; 18455 18456 /* 18457 * Either the command has been successfully dispatched to a 18458 * task Q for retrying, or the dispatch failed. In either case 18459 * do NOT retry again by calling sd_retry_command. This sets up 18460 * two retries of the same command and when one completes and 18461 * frees the resources the other will access freed memory, 18462 * a bad thing. 18463 */ 18464 return; 18465 18466 default: 18467 break; 18468 } 18469 18470 /* 18471 * ASC ASCQ 18472 * 2A 09 Capacity data has changed 18473 * 2A 01 Mode parameters changed 18474 * 3F 0E Reported luns data has changed 18475 * Arrays that support logical unit expansion should report 18476 * capacity changes(2Ah/09). Mode parameters changed and 18477 * reported luns data has changed are the approximation. 18478 */ 18479 if (((asc == 0x2a) && (ascq == 0x09)) || 18480 ((asc == 0x2a) && (ascq == 0x01)) || 18481 ((asc == 0x3f) && (ascq == 0x0e))) { 18482 if (taskq_dispatch(sd_tq, sd_target_change_task, un, 18483 KM_NOSLEEP) == 0) { 18484 SD_ERROR(SD_LOG_ERROR, un, 18485 "sd_sense_key_unit_attention: " 18486 "Could not dispatch sd_target_change_task\n"); 18487 } 18488 } 18489 18490 /* 18491 * Update kstat if we haven't done that. 18492 */ 18493 if (!kstat_updated) { 18494 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18495 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 18496 } 18497 18498 do_retry: 18499 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si, 18500 EIO, SD_UA_RETRY_DELAY, NULL); 18501 } 18502 18503 18504 18505 /* 18506 * Function: sd_sense_key_fail_command 18507 * 18508 * Description: Use to fail a command when we don't like the sense key that 18509 * was returned. 18510 * 18511 * Context: May be called from interrupt context 18512 */ 18513 18514 static void 18515 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 18516 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18517 { 18518 struct sd_sense_info si; 18519 18520 ASSERT(un != NULL); 18521 ASSERT(mutex_owned(SD_MUTEX(un))); 18522 ASSERT(bp != NULL); 18523 ASSERT(xp != NULL); 18524 ASSERT(pktp != NULL); 18525 18526 si.ssi_severity = SCSI_ERR_FATAL; 18527 si.ssi_pfa_flag = FALSE; 18528 18529 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18530 sd_return_failed_command(un, bp, EIO); 18531 } 18532 18533 18534 18535 /* 18536 * Function: sd_sense_key_blank_check 18537 * 18538 * Description: Recovery actions for a SCSI "Blank Check" sense key. 18539 * Has no monetary connotation. 18540 * 18541 * Context: May be called from interrupt context 18542 */ 18543 18544 static void 18545 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 18546 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18547 { 18548 struct sd_sense_info si; 18549 18550 ASSERT(un != NULL); 18551 ASSERT(mutex_owned(SD_MUTEX(un))); 18552 ASSERT(bp != NULL); 18553 ASSERT(xp != NULL); 18554 ASSERT(pktp != NULL); 18555 18556 /* 18557 * Blank check is not fatal for removable devices, therefore 18558 * it does not require a console message. 18559 */ 18560 si.ssi_severity = (un->un_f_has_removable_media) ? SCSI_ERR_ALL : 18561 SCSI_ERR_FATAL; 18562 si.ssi_pfa_flag = FALSE; 18563 18564 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18565 sd_return_failed_command(un, bp, EIO); 18566 } 18567 18568 18569 18570 18571 /* 18572 * Function: sd_sense_key_aborted_command 18573 * 18574 * Description: Recovery actions for a SCSI "Aborted Command" sense key. 18575 * 18576 * Context: May be called from interrupt context 18577 */ 18578 18579 static void 18580 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 18581 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18582 { 18583 struct sd_sense_info si; 18584 18585 ASSERT(un != NULL); 18586 ASSERT(mutex_owned(SD_MUTEX(un))); 18587 ASSERT(bp != NULL); 18588 ASSERT(xp != NULL); 18589 ASSERT(pktp != NULL); 18590 18591 si.ssi_severity = SCSI_ERR_FATAL; 18592 si.ssi_pfa_flag = FALSE; 18593 18594 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18595 18596 /* 18597 * This really ought to be a fatal error, but we will retry anyway 18598 * as some drives report this as a spurious error. 18599 */ 18600 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18601 &si, EIO, drv_usectohz(100000), NULL); 18602 } 18603 18604 18605 18606 /* 18607 * Function: sd_sense_key_default 18608 * 18609 * Description: Default recovery action for several SCSI sense keys (basically 18610 * attempts a retry). 18611 * 18612 * Context: May be called from interrupt context 18613 */ 18614 18615 static void 18616 sd_sense_key_default(struct sd_lun *un, 18617 uint8_t *sense_datap, 18618 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 18619 { 18620 struct sd_sense_info si; 18621 uint8_t sense_key = scsi_sense_key(sense_datap); 18622 18623 ASSERT(un != NULL); 18624 ASSERT(mutex_owned(SD_MUTEX(un))); 18625 ASSERT(bp != NULL); 18626 ASSERT(xp != NULL); 18627 ASSERT(pktp != NULL); 18628 18629 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18630 18631 /* 18632 * Undecoded sense key. Attempt retries and hope that will fix 18633 * the problem. Otherwise, we're dead. 18634 */ 18635 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 18636 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18637 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]); 18638 } 18639 18640 si.ssi_severity = SCSI_ERR_FATAL; 18641 si.ssi_pfa_flag = FALSE; 18642 18643 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18644 &si, EIO, (clock_t)0, NULL); 18645 } 18646 18647 18648 18649 /* 18650 * Function: sd_print_retry_msg 18651 * 18652 * Description: Print a message indicating the retry action being taken. 18653 * 18654 * Arguments: un - ptr to associated softstate 18655 * bp - ptr to buf(9S) for the command 18656 * arg - not used. 18657 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 18658 * or SD_NO_RETRY_ISSUED 18659 * 18660 * Context: May be called from interrupt context 18661 */ 18662 /* ARGSUSED */ 18663 static void 18664 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag) 18665 { 18666 struct sd_xbuf *xp; 18667 struct scsi_pkt *pktp; 18668 char *reasonp; 18669 char *msgp; 18670 18671 ASSERT(un != NULL); 18672 ASSERT(mutex_owned(SD_MUTEX(un))); 18673 ASSERT(bp != NULL); 18674 pktp = SD_GET_PKTP(bp); 18675 ASSERT(pktp != NULL); 18676 xp = SD_GET_XBUF(bp); 18677 ASSERT(xp != NULL); 18678 18679 ASSERT(!mutex_owned(&un->un_pm_mutex)); 18680 mutex_enter(&un->un_pm_mutex); 18681 if ((un->un_state == SD_STATE_SUSPENDED) || 18682 (SD_DEVICE_IS_IN_LOW_POWER(un)) || 18683 (pktp->pkt_flags & FLAG_SILENT)) { 18684 mutex_exit(&un->un_pm_mutex); 18685 goto update_pkt_reason; 18686 } 18687 mutex_exit(&un->un_pm_mutex); 18688 18689 /* 18690 * Suppress messages if they are all the same pkt_reason; with 18691 * TQ, many (up to 256) are returned with the same pkt_reason. 18692 * If we are in panic, then suppress the retry messages. 18693 */ 18694 switch (flag) { 18695 case SD_NO_RETRY_ISSUED: 18696 msgp = "giving up"; 18697 break; 18698 case SD_IMMEDIATE_RETRY_ISSUED: 18699 case SD_DELAYED_RETRY_ISSUED: 18700 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) || 18701 ((pktp->pkt_reason == un->un_last_pkt_reason) && 18702 (sd_error_level != SCSI_ERR_ALL))) { 18703 return; 18704 } 18705 msgp = "retrying command"; 18706 break; 18707 default: 18708 goto update_pkt_reason; 18709 } 18710 18711 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" : 18712 scsi_rname(pktp->pkt_reason)); 18713 18714 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP) { 18715 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18716 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp); 18717 } 18718 18719 update_pkt_reason: 18720 /* 18721 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason. 18722 * This is to prevent multiple console messages for the same failure 18723 * condition. Note that un->un_last_pkt_reason is NOT restored if & 18724 * when the command is retried successfully because there still may be 18725 * more commands coming back with the same value of pktp->pkt_reason. 18726 */ 18727 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) { 18728 un->un_last_pkt_reason = pktp->pkt_reason; 18729 } 18730 } 18731 18732 18733 /* 18734 * Function: sd_print_cmd_incomplete_msg 18735 * 18736 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason. 18737 * 18738 * Arguments: un - ptr to associated softstate 18739 * bp - ptr to buf(9S) for the command 18740 * arg - passed to sd_print_retry_msg() 18741 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 18742 * or SD_NO_RETRY_ISSUED 18743 * 18744 * Context: May be called from interrupt context 18745 */ 18746 18747 static void 18748 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, 18749 int code) 18750 { 18751 dev_info_t *dip; 18752 18753 ASSERT(un != NULL); 18754 ASSERT(mutex_owned(SD_MUTEX(un))); 18755 ASSERT(bp != NULL); 18756 18757 switch (code) { 18758 case SD_NO_RETRY_ISSUED: 18759 /* Command was failed. Someone turned off this target? */ 18760 if (un->un_state != SD_STATE_OFFLINE) { 18761 /* 18762 * Suppress message if we are detaching and 18763 * device has been disconnected 18764 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation 18765 * private interface and not part of the DDI 18766 */ 18767 dip = un->un_sd->sd_dev; 18768 if (!(DEVI_IS_DETACHING(dip) && 18769 DEVI_IS_DEVICE_REMOVED(dip))) { 18770 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18771 "disk not responding to selection\n"); 18772 } 18773 New_state(un, SD_STATE_OFFLINE); 18774 } 18775 break; 18776 18777 case SD_DELAYED_RETRY_ISSUED: 18778 case SD_IMMEDIATE_RETRY_ISSUED: 18779 default: 18780 /* Command was successfully queued for retry */ 18781 sd_print_retry_msg(un, bp, arg, code); 18782 break; 18783 } 18784 } 18785 18786 18787 /* 18788 * Function: sd_pkt_reason_cmd_incomplete 18789 * 18790 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason. 18791 * 18792 * Context: May be called from interrupt context 18793 */ 18794 18795 static void 18796 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 18797 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18798 { 18799 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE; 18800 18801 ASSERT(un != NULL); 18802 ASSERT(mutex_owned(SD_MUTEX(un))); 18803 ASSERT(bp != NULL); 18804 ASSERT(xp != NULL); 18805 ASSERT(pktp != NULL); 18806 18807 /* Do not do a reset if selection did not complete */ 18808 /* Note: Should this not just check the bit? */ 18809 if (pktp->pkt_state != STATE_GOT_BUS) { 18810 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18811 sd_reset_target(un, pktp); 18812 } 18813 18814 /* 18815 * If the target was not successfully selected, then set 18816 * SD_RETRIES_FAILFAST to indicate that we lost communication 18817 * with the target, and further retries and/or commands are 18818 * likely to take a long time. 18819 */ 18820 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) { 18821 flag |= SD_RETRIES_FAILFAST; 18822 } 18823 18824 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18825 18826 sd_retry_command(un, bp, flag, 18827 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18828 } 18829 18830 18831 18832 /* 18833 * Function: sd_pkt_reason_cmd_tran_err 18834 * 18835 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason. 18836 * 18837 * Context: May be called from interrupt context 18838 */ 18839 18840 static void 18841 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 18842 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18843 { 18844 ASSERT(un != NULL); 18845 ASSERT(mutex_owned(SD_MUTEX(un))); 18846 ASSERT(bp != NULL); 18847 ASSERT(xp != NULL); 18848 ASSERT(pktp != NULL); 18849 18850 /* 18851 * Do not reset if we got a parity error, or if 18852 * selection did not complete. 18853 */ 18854 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18855 /* Note: Should this not just check the bit for pkt_state? */ 18856 if (((pktp->pkt_statistics & STAT_PERR) == 0) && 18857 (pktp->pkt_state != STATE_GOT_BUS)) { 18858 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18859 sd_reset_target(un, pktp); 18860 } 18861 18862 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18863 18864 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18865 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18866 } 18867 18868 18869 18870 /* 18871 * Function: sd_pkt_reason_cmd_reset 18872 * 18873 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason. 18874 * 18875 * Context: May be called from interrupt context 18876 */ 18877 18878 static void 18879 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 18880 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18881 { 18882 ASSERT(un != NULL); 18883 ASSERT(mutex_owned(SD_MUTEX(un))); 18884 ASSERT(bp != NULL); 18885 ASSERT(xp != NULL); 18886 ASSERT(pktp != NULL); 18887 18888 /* The target may still be running the command, so try to reset. */ 18889 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18890 sd_reset_target(un, pktp); 18891 18892 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18893 18894 /* 18895 * If pkt_reason is CMD_RESET chances are that this pkt got 18896 * reset because another target on this bus caused it. The target 18897 * that caused it should get CMD_TIMEOUT with pkt_statistics 18898 * of STAT_TIMEOUT/STAT_DEV_RESET. 18899 */ 18900 18901 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 18902 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18903 } 18904 18905 18906 18907 18908 /* 18909 * Function: sd_pkt_reason_cmd_aborted 18910 * 18911 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason. 18912 * 18913 * Context: May be called from interrupt context 18914 */ 18915 18916 static void 18917 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 18918 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18919 { 18920 ASSERT(un != NULL); 18921 ASSERT(mutex_owned(SD_MUTEX(un))); 18922 ASSERT(bp != NULL); 18923 ASSERT(xp != NULL); 18924 ASSERT(pktp != NULL); 18925 18926 /* The target may still be running the command, so try to reset. */ 18927 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18928 sd_reset_target(un, pktp); 18929 18930 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18931 18932 /* 18933 * If pkt_reason is CMD_ABORTED chances are that this pkt got 18934 * aborted because another target on this bus caused it. The target 18935 * that caused it should get CMD_TIMEOUT with pkt_statistics 18936 * of STAT_TIMEOUT/STAT_DEV_RESET. 18937 */ 18938 18939 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 18940 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18941 } 18942 18943 18944 18945 /* 18946 * Function: sd_pkt_reason_cmd_timeout 18947 * 18948 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason. 18949 * 18950 * Context: May be called from interrupt context 18951 */ 18952 18953 static void 18954 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 18955 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18956 { 18957 ASSERT(un != NULL); 18958 ASSERT(mutex_owned(SD_MUTEX(un))); 18959 ASSERT(bp != NULL); 18960 ASSERT(xp != NULL); 18961 ASSERT(pktp != NULL); 18962 18963 18964 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18965 sd_reset_target(un, pktp); 18966 18967 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18968 18969 /* 18970 * A command timeout indicates that we could not establish 18971 * communication with the target, so set SD_RETRIES_FAILFAST 18972 * as further retries/commands are likely to take a long time. 18973 */ 18974 sd_retry_command(un, bp, 18975 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST), 18976 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18977 } 18978 18979 18980 18981 /* 18982 * Function: sd_pkt_reason_cmd_unx_bus_free 18983 * 18984 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason. 18985 * 18986 * Context: May be called from interrupt context 18987 */ 18988 18989 static void 18990 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 18991 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18992 { 18993 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code); 18994 18995 ASSERT(un != NULL); 18996 ASSERT(mutex_owned(SD_MUTEX(un))); 18997 ASSERT(bp != NULL); 18998 ASSERT(xp != NULL); 18999 ASSERT(pktp != NULL); 19000 19001 SD_UPDATE_ERRSTATS(un, sd_harderrs); 19002 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19003 19004 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? 19005 sd_print_retry_msg : NULL; 19006 19007 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 19008 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19009 } 19010 19011 19012 /* 19013 * Function: sd_pkt_reason_cmd_tag_reject 19014 * 19015 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason. 19016 * 19017 * Context: May be called from interrupt context 19018 */ 19019 19020 static void 19021 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 19022 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19023 { 19024 ASSERT(un != NULL); 19025 ASSERT(mutex_owned(SD_MUTEX(un))); 19026 ASSERT(bp != NULL); 19027 ASSERT(xp != NULL); 19028 ASSERT(pktp != NULL); 19029 19030 SD_UPDATE_ERRSTATS(un, sd_harderrs); 19031 pktp->pkt_flags = 0; 19032 un->un_tagflags = 0; 19033 if (un->un_f_opt_queueing == TRUE) { 19034 un->un_throttle = min(un->un_throttle, 3); 19035 } else { 19036 un->un_throttle = 1; 19037 } 19038 mutex_exit(SD_MUTEX(un)); 19039 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 19040 mutex_enter(SD_MUTEX(un)); 19041 19042 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19043 19044 /* Legacy behavior not to check retry counts here. */ 19045 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE), 19046 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19047 } 19048 19049 19050 /* 19051 * Function: sd_pkt_reason_default 19052 * 19053 * Description: Default recovery actions for SCSA pkt_reason values that 19054 * do not have more explicit recovery actions. 19055 * 19056 * Context: May be called from interrupt context 19057 */ 19058 19059 static void 19060 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 19061 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19062 { 19063 ASSERT(un != NULL); 19064 ASSERT(mutex_owned(SD_MUTEX(un))); 19065 ASSERT(bp != NULL); 19066 ASSERT(xp != NULL); 19067 ASSERT(pktp != NULL); 19068 19069 SD_UPDATE_ERRSTATS(un, sd_transerrs); 19070 sd_reset_target(un, pktp); 19071 19072 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19073 19074 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 19075 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19076 } 19077 19078 19079 19080 /* 19081 * Function: sd_pkt_status_check_condition 19082 * 19083 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status. 19084 * 19085 * Context: May be called from interrupt context 19086 */ 19087 19088 static void 19089 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 19090 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19091 { 19092 ASSERT(un != NULL); 19093 ASSERT(mutex_owned(SD_MUTEX(un))); 19094 ASSERT(bp != NULL); 19095 ASSERT(xp != NULL); 19096 ASSERT(pktp != NULL); 19097 19098 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: " 19099 "entry: buf:0x%p xp:0x%p\n", bp, xp); 19100 19101 /* 19102 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the 19103 * command will be retried after the request sense). Otherwise, retry 19104 * the command. Note: we are issuing the request sense even though the 19105 * retry limit may have been reached for the failed command. 19106 */ 19107 if (un->un_f_arq_enabled == FALSE) { 19108 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 19109 "no ARQ, sending request sense command\n"); 19110 sd_send_request_sense_command(un, bp, pktp); 19111 } else { 19112 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 19113 "ARQ,retrying request sense command\n"); 19114 #if defined(__i386) || defined(__amd64) 19115 /* 19116 * The SD_RETRY_DELAY value need to be adjusted here 19117 * when SD_RETRY_DELAY change in sddef.h 19118 */ 19119 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 19120 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, 19121 NULL); 19122 #else 19123 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 19124 EIO, SD_RETRY_DELAY, NULL); 19125 #endif 19126 } 19127 19128 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); 19129 } 19130 19131 19132 /* 19133 * Function: sd_pkt_status_busy 19134 * 19135 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status. 19136 * 19137 * Context: May be called from interrupt context 19138 */ 19139 19140 static void 19141 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 19142 struct scsi_pkt *pktp) 19143 { 19144 ASSERT(un != NULL); 19145 ASSERT(mutex_owned(SD_MUTEX(un))); 19146 ASSERT(bp != NULL); 19147 ASSERT(xp != NULL); 19148 ASSERT(pktp != NULL); 19149 19150 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19151 "sd_pkt_status_busy: entry\n"); 19152 19153 /* If retries are exhausted, just fail the command. */ 19154 if (xp->xb_retry_count >= un->un_busy_retry_count) { 19155 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 19156 "device busy too long\n"); 19157 sd_return_failed_command(un, bp, EIO); 19158 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19159 "sd_pkt_status_busy: exit\n"); 19160 return; 19161 } 19162 xp->xb_retry_count++; 19163 19164 /* 19165 * Try to reset the target. However, we do not want to perform 19166 * more than one reset if the device continues to fail. The reset 19167 * will be performed when the retry count reaches the reset 19168 * threshold. This threshold should be set such that at least 19169 * one retry is issued before the reset is performed. 19170 */ 19171 if (xp->xb_retry_count == 19172 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) { 19173 int rval = 0; 19174 mutex_exit(SD_MUTEX(un)); 19175 if (un->un_f_allow_bus_device_reset == TRUE) { 19176 /* 19177 * First try to reset the LUN; if we cannot then 19178 * try to reset the target. 19179 */ 19180 if (un->un_f_lun_reset_enabled == TRUE) { 19181 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19182 "sd_pkt_status_busy: RESET_LUN\n"); 19183 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 19184 } 19185 if (rval == 0) { 19186 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19187 "sd_pkt_status_busy: RESET_TARGET\n"); 19188 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 19189 } 19190 } 19191 if (rval == 0) { 19192 /* 19193 * If the RESET_LUN and/or RESET_TARGET failed, 19194 * try RESET_ALL 19195 */ 19196 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19197 "sd_pkt_status_busy: RESET_ALL\n"); 19198 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL); 19199 } 19200 mutex_enter(SD_MUTEX(un)); 19201 if (rval == 0) { 19202 /* 19203 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed. 19204 * At this point we give up & fail the command. 19205 */ 19206 sd_return_failed_command(un, bp, EIO); 19207 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19208 "sd_pkt_status_busy: exit (failed cmd)\n"); 19209 return; 19210 } 19211 } 19212 19213 /* 19214 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as 19215 * we have already checked the retry counts above. 19216 */ 19217 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 19218 EIO, un->un_busy_timeout, NULL); 19219 19220 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19221 "sd_pkt_status_busy: exit\n"); 19222 } 19223 19224 19225 /* 19226 * Function: sd_pkt_status_reservation_conflict 19227 * 19228 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI 19229 * command status. 19230 * 19231 * Context: May be called from interrupt context 19232 */ 19233 19234 static void 19235 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp, 19236 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19237 { 19238 ASSERT(un != NULL); 19239 ASSERT(mutex_owned(SD_MUTEX(un))); 19240 ASSERT(bp != NULL); 19241 ASSERT(xp != NULL); 19242 ASSERT(pktp != NULL); 19243 19244 /* 19245 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation 19246 * conflict could be due to various reasons like incorrect keys, not 19247 * registered or not reserved etc. So, we return EACCES to the caller. 19248 */ 19249 if (un->un_reservation_type == SD_SCSI3_RESERVATION) { 19250 int cmd = SD_GET_PKT_OPCODE(pktp); 19251 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) || 19252 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) { 19253 sd_return_failed_command(un, bp, EACCES); 19254 return; 19255 } 19256 } 19257 19258 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 19259 19260 if ((un->un_resvd_status & SD_FAILFAST) != 0) { 19261 if (sd_failfast_enable != 0) { 19262 /* By definition, we must panic here.... */ 19263 sd_panic_for_res_conflict(un); 19264 /*NOTREACHED*/ 19265 } 19266 SD_ERROR(SD_LOG_IO, un, 19267 "sd_handle_resv_conflict: Disk Reserved\n"); 19268 sd_return_failed_command(un, bp, EACCES); 19269 return; 19270 } 19271 19272 /* 19273 * 1147670: retry only if sd_retry_on_reservation_conflict 19274 * property is set (default is 1). Retries will not succeed 19275 * on a disk reserved by another initiator. HA systems 19276 * may reset this via sd.conf to avoid these retries. 19277 * 19278 * Note: The legacy return code for this failure is EIO, however EACCES 19279 * seems more appropriate for a reservation conflict. 19280 */ 19281 if (sd_retry_on_reservation_conflict == 0) { 19282 SD_ERROR(SD_LOG_IO, un, 19283 "sd_handle_resv_conflict: Device Reserved\n"); 19284 sd_return_failed_command(un, bp, EIO); 19285 return; 19286 } 19287 19288 /* 19289 * Retry the command if we can. 19290 * 19291 * Note: The legacy return code for this failure is EIO, however EACCES 19292 * seems more appropriate for a reservation conflict. 19293 */ 19294 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 19295 (clock_t)2, NULL); 19296 } 19297 19298 19299 19300 /* 19301 * Function: sd_pkt_status_qfull 19302 * 19303 * Description: Handle a QUEUE FULL condition from the target. This can 19304 * occur if the HBA does not handle the queue full condition. 19305 * (Basically this means third-party HBAs as Sun HBAs will 19306 * handle the queue full condition.) Note that if there are 19307 * some commands already in the transport, then the queue full 19308 * has occurred because the queue for this nexus is actually 19309 * full. If there are no commands in the transport, then the 19310 * queue full is resulting from some other initiator or lun 19311 * consuming all the resources at the target. 19312 * 19313 * Context: May be called from interrupt context 19314 */ 19315 19316 static void 19317 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 19318 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19319 { 19320 ASSERT(un != NULL); 19321 ASSERT(mutex_owned(SD_MUTEX(un))); 19322 ASSERT(bp != NULL); 19323 ASSERT(xp != NULL); 19324 ASSERT(pktp != NULL); 19325 19326 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19327 "sd_pkt_status_qfull: entry\n"); 19328 19329 /* 19330 * Just lower the QFULL throttle and retry the command. Note that 19331 * we do not limit the number of retries here. 19332 */ 19333 sd_reduce_throttle(un, SD_THROTTLE_QFULL); 19334 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, 19335 SD_RESTART_TIMEOUT, NULL); 19336 19337 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19338 "sd_pkt_status_qfull: exit\n"); 19339 } 19340 19341 19342 /* 19343 * Function: sd_reset_target 19344 * 19345 * Description: Issue a scsi_reset(9F), with either RESET_LUN, 19346 * RESET_TARGET, or RESET_ALL. 19347 * 19348 * Context: May be called under interrupt context. 19349 */ 19350 19351 static void 19352 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp) 19353 { 19354 int rval = 0; 19355 19356 ASSERT(un != NULL); 19357 ASSERT(mutex_owned(SD_MUTEX(un))); 19358 ASSERT(pktp != NULL); 19359 19360 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n"); 19361 19362 /* 19363 * No need to reset if the transport layer has already done so. 19364 */ 19365 if ((pktp->pkt_statistics & 19366 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) { 19367 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19368 "sd_reset_target: no reset\n"); 19369 return; 19370 } 19371 19372 mutex_exit(SD_MUTEX(un)); 19373 19374 if (un->un_f_allow_bus_device_reset == TRUE) { 19375 if (un->un_f_lun_reset_enabled == TRUE) { 19376 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19377 "sd_reset_target: RESET_LUN\n"); 19378 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 19379 } 19380 if (rval == 0) { 19381 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19382 "sd_reset_target: RESET_TARGET\n"); 19383 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 19384 } 19385 } 19386 19387 if (rval == 0) { 19388 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19389 "sd_reset_target: RESET_ALL\n"); 19390 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 19391 } 19392 19393 mutex_enter(SD_MUTEX(un)); 19394 19395 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n"); 19396 } 19397 19398 /* 19399 * Function: sd_target_change_task 19400 * 19401 * Description: Handle dynamic target change 19402 * 19403 * Context: Executes in a taskq() thread context 19404 */ 19405 static void 19406 sd_target_change_task(void *arg) 19407 { 19408 struct sd_lun *un = arg; 19409 uint64_t capacity; 19410 diskaddr_t label_cap; 19411 uint_t lbasize; 19412 sd_ssc_t *ssc; 19413 19414 ASSERT(un != NULL); 19415 ASSERT(!mutex_owned(SD_MUTEX(un))); 19416 19417 if ((un->un_f_blockcount_is_valid == FALSE) || 19418 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 19419 return; 19420 } 19421 19422 ssc = sd_ssc_init(un); 19423 19424 if (sd_send_scsi_READ_CAPACITY(ssc, &capacity, 19425 &lbasize, SD_PATH_DIRECT) != 0) { 19426 SD_ERROR(SD_LOG_ERROR, un, 19427 "sd_target_change_task: fail to read capacity\n"); 19428 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19429 goto task_exit; 19430 } 19431 19432 mutex_enter(SD_MUTEX(un)); 19433 if (capacity <= un->un_blockcount) { 19434 mutex_exit(SD_MUTEX(un)); 19435 goto task_exit; 19436 } 19437 19438 sd_update_block_info(un, lbasize, capacity); 19439 mutex_exit(SD_MUTEX(un)); 19440 19441 /* 19442 * If lun is EFI labeled and lun capacity is greater than the 19443 * capacity contained in the label, log a sys event. 19444 */ 19445 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 19446 (void*)SD_PATH_DIRECT) == 0) { 19447 mutex_enter(SD_MUTEX(un)); 19448 if (un->un_f_blockcount_is_valid && 19449 un->un_blockcount > label_cap) { 19450 mutex_exit(SD_MUTEX(un)); 19451 sd_log_lun_expansion_event(un, KM_SLEEP); 19452 } else { 19453 mutex_exit(SD_MUTEX(un)); 19454 } 19455 } 19456 19457 task_exit: 19458 sd_ssc_fini(ssc); 19459 } 19460 19461 /* 19462 * Function: sd_log_lun_expansion_event 19463 * 19464 * Description: Log lun expansion sys event 19465 * 19466 * Context: Never called from interrupt context 19467 */ 19468 static void 19469 sd_log_lun_expansion_event(struct sd_lun *un, int km_flag) 19470 { 19471 int err; 19472 char *path; 19473 nvlist_t *dle_attr_list; 19474 19475 /* Allocate and build sysevent attribute list */ 19476 err = nvlist_alloc(&dle_attr_list, NV_UNIQUE_NAME_TYPE, km_flag); 19477 if (err != 0) { 19478 SD_ERROR(SD_LOG_ERROR, un, 19479 "sd_log_lun_expansion_event: fail to allocate space\n"); 19480 return; 19481 } 19482 19483 path = kmem_alloc(MAXPATHLEN, km_flag); 19484 if (path == NULL) { 19485 nvlist_free(dle_attr_list); 19486 SD_ERROR(SD_LOG_ERROR, un, 19487 "sd_log_lun_expansion_event: fail to allocate space\n"); 19488 return; 19489 } 19490 /* 19491 * Add path attribute to identify the lun. 19492 * We are using minor node 'a' as the sysevent attribute. 19493 */ 19494 (void) snprintf(path, MAXPATHLEN, "/devices"); 19495 (void) ddi_pathname(SD_DEVINFO(un), path + strlen(path)); 19496 (void) snprintf(path + strlen(path), MAXPATHLEN - strlen(path), 19497 ":a"); 19498 19499 err = nvlist_add_string(dle_attr_list, DEV_PHYS_PATH, path); 19500 if (err != 0) { 19501 nvlist_free(dle_attr_list); 19502 kmem_free(path, MAXPATHLEN); 19503 SD_ERROR(SD_LOG_ERROR, un, 19504 "sd_log_lun_expansion_event: fail to add attribute\n"); 19505 return; 19506 } 19507 19508 /* Log dynamic lun expansion sysevent */ 19509 err = ddi_log_sysevent(SD_DEVINFO(un), SUNW_VENDOR, EC_DEV_STATUS, 19510 ESC_DEV_DLE, dle_attr_list, NULL, km_flag); 19511 if (err != DDI_SUCCESS) { 19512 SD_ERROR(SD_LOG_ERROR, un, 19513 "sd_log_lun_expansion_event: fail to log sysevent\n"); 19514 } 19515 19516 nvlist_free(dle_attr_list); 19517 kmem_free(path, MAXPATHLEN); 19518 } 19519 19520 /* 19521 * Function: sd_media_change_task 19522 * 19523 * Description: Recovery action for CDROM to become available. 19524 * 19525 * Context: Executes in a taskq() thread context 19526 */ 19527 19528 static void 19529 sd_media_change_task(void *arg) 19530 { 19531 struct scsi_pkt *pktp = arg; 19532 struct sd_lun *un; 19533 struct buf *bp; 19534 struct sd_xbuf *xp; 19535 int err = 0; 19536 int retry_count = 0; 19537 int retry_limit = SD_UNIT_ATTENTION_RETRY/10; 19538 struct sd_sense_info si; 19539 19540 ASSERT(pktp != NULL); 19541 bp = (struct buf *)pktp->pkt_private; 19542 ASSERT(bp != NULL); 19543 xp = SD_GET_XBUF(bp); 19544 ASSERT(xp != NULL); 19545 un = SD_GET_UN(bp); 19546 ASSERT(un != NULL); 19547 ASSERT(!mutex_owned(SD_MUTEX(un))); 19548 ASSERT(un->un_f_monitor_media_state); 19549 19550 si.ssi_severity = SCSI_ERR_INFO; 19551 si.ssi_pfa_flag = FALSE; 19552 19553 /* 19554 * When a reset is issued on a CDROM, it takes a long time to 19555 * recover. First few attempts to read capacity and other things 19556 * related to handling unit attention fail (with a ASC 0x4 and 19557 * ASCQ 0x1). In that case we want to do enough retries and we want 19558 * to limit the retries in other cases of genuine failures like 19559 * no media in drive. 19560 */ 19561 while (retry_count++ < retry_limit) { 19562 if ((err = sd_handle_mchange(un)) == 0) { 19563 break; 19564 } 19565 if (err == EAGAIN) { 19566 retry_limit = SD_UNIT_ATTENTION_RETRY; 19567 } 19568 /* Sleep for 0.5 sec. & try again */ 19569 delay(drv_usectohz(500000)); 19570 } 19571 19572 /* 19573 * Dispatch (retry or fail) the original command here, 19574 * along with appropriate console messages.... 19575 * 19576 * Must grab the mutex before calling sd_retry_command, 19577 * sd_print_sense_msg and sd_return_failed_command. 19578 */ 19579 mutex_enter(SD_MUTEX(un)); 19580 if (err != SD_CMD_SUCCESS) { 19581 SD_UPDATE_ERRSTATS(un, sd_harderrs); 19582 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 19583 si.ssi_severity = SCSI_ERR_FATAL; 19584 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 19585 sd_return_failed_command(un, bp, EIO); 19586 } else { 19587 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 19588 &si, EIO, (clock_t)0, NULL); 19589 } 19590 mutex_exit(SD_MUTEX(un)); 19591 } 19592 19593 19594 19595 /* 19596 * Function: sd_handle_mchange 19597 * 19598 * Description: Perform geometry validation & other recovery when CDROM 19599 * has been removed from drive. 19600 * 19601 * Return Code: 0 for success 19602 * errno-type return code of either sd_send_scsi_DOORLOCK() or 19603 * sd_send_scsi_READ_CAPACITY() 19604 * 19605 * Context: Executes in a taskq() thread context 19606 */ 19607 19608 static int 19609 sd_handle_mchange(struct sd_lun *un) 19610 { 19611 uint64_t capacity; 19612 uint32_t lbasize; 19613 int rval; 19614 sd_ssc_t *ssc; 19615 19616 ASSERT(!mutex_owned(SD_MUTEX(un))); 19617 ASSERT(un->un_f_monitor_media_state); 19618 19619 ssc = sd_ssc_init(un); 19620 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize, 19621 SD_PATH_DIRECT_PRIORITY); 19622 19623 if (rval != 0) 19624 goto failed; 19625 19626 mutex_enter(SD_MUTEX(un)); 19627 sd_update_block_info(un, lbasize, capacity); 19628 19629 if (un->un_errstats != NULL) { 19630 struct sd_errstats *stp = 19631 (struct sd_errstats *)un->un_errstats->ks_data; 19632 stp->sd_capacity.value.ui64 = (uint64_t) 19633 ((uint64_t)un->un_blockcount * 19634 (uint64_t)un->un_tgt_blocksize); 19635 } 19636 19637 /* 19638 * Check if the media in the device is writable or not 19639 */ 19640 if (ISCD(un)) { 19641 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT_PRIORITY); 19642 } 19643 19644 /* 19645 * Note: Maybe let the strategy/partitioning chain worry about getting 19646 * valid geometry. 19647 */ 19648 mutex_exit(SD_MUTEX(un)); 19649 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 19650 19651 19652 if (cmlb_validate(un->un_cmlbhandle, 0, 19653 (void *)SD_PATH_DIRECT_PRIORITY) != 0) { 19654 sd_ssc_fini(ssc); 19655 return (EIO); 19656 } else { 19657 if (un->un_f_pkstats_enabled) { 19658 sd_set_pstats(un); 19659 SD_TRACE(SD_LOG_IO_PARTITION, un, 19660 "sd_handle_mchange: un:0x%p pstats created and " 19661 "set\n", un); 19662 } 19663 } 19664 19665 /* 19666 * Try to lock the door 19667 */ 19668 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 19669 SD_PATH_DIRECT_PRIORITY); 19670 failed: 19671 if (rval != 0) 19672 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19673 sd_ssc_fini(ssc); 19674 return (rval); 19675 } 19676 19677 19678 /* 19679 * Function: sd_send_scsi_DOORLOCK 19680 * 19681 * Description: Issue the scsi DOOR LOCK command 19682 * 19683 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 19684 * structure for this target. 19685 * flag - SD_REMOVAL_ALLOW 19686 * SD_REMOVAL_PREVENT 19687 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19688 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19689 * to use the USCSI "direct" chain and bypass the normal 19690 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19691 * command is issued as part of an error recovery action. 19692 * 19693 * Return Code: 0 - Success 19694 * errno return code from sd_ssc_send() 19695 * 19696 * Context: Can sleep. 19697 */ 19698 19699 static int 19700 sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag) 19701 { 19702 struct scsi_extended_sense sense_buf; 19703 union scsi_cdb cdb; 19704 struct uscsi_cmd ucmd_buf; 19705 int status; 19706 struct sd_lun *un; 19707 19708 ASSERT(ssc != NULL); 19709 un = ssc->ssc_un; 19710 ASSERT(un != NULL); 19711 ASSERT(!mutex_owned(SD_MUTEX(un))); 19712 19713 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un); 19714 19715 /* already determined doorlock is not supported, fake success */ 19716 if (un->un_f_doorlock_supported == FALSE) { 19717 return (0); 19718 } 19719 19720 /* 19721 * If we are ejecting and see an SD_REMOVAL_PREVENT 19722 * ignore the command so we can complete the eject 19723 * operation. 19724 */ 19725 if (flag == SD_REMOVAL_PREVENT) { 19726 mutex_enter(SD_MUTEX(un)); 19727 if (un->un_f_ejecting == TRUE) { 19728 mutex_exit(SD_MUTEX(un)); 19729 return (EAGAIN); 19730 } 19731 mutex_exit(SD_MUTEX(un)); 19732 } 19733 19734 bzero(&cdb, sizeof (cdb)); 19735 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19736 19737 cdb.scc_cmd = SCMD_DOORLOCK; 19738 cdb.cdb_opaque[4] = (uchar_t)flag; 19739 19740 ucmd_buf.uscsi_cdb = (char *)&cdb; 19741 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19742 ucmd_buf.uscsi_bufaddr = NULL; 19743 ucmd_buf.uscsi_buflen = 0; 19744 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19745 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19746 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19747 ucmd_buf.uscsi_timeout = 15; 19748 19749 SD_TRACE(SD_LOG_IO, un, 19750 "sd_send_scsi_DOORLOCK: returning sd_ssc_send\n"); 19751 19752 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19753 UIO_SYSSPACE, path_flag); 19754 19755 if (status == 0) 19756 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19757 19758 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) && 19759 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19760 (scsi_sense_key((uint8_t *)&sense_buf) == KEY_ILLEGAL_REQUEST)) { 19761 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19762 19763 /* fake success and skip subsequent doorlock commands */ 19764 un->un_f_doorlock_supported = FALSE; 19765 return (0); 19766 } 19767 19768 return (status); 19769 } 19770 19771 /* 19772 * Function: sd_send_scsi_READ_CAPACITY 19773 * 19774 * Description: This routine uses the scsi READ CAPACITY command to determine 19775 * the device capacity in number of blocks and the device native 19776 * block size. If this function returns a failure, then the 19777 * values in *capp and *lbap are undefined. If the capacity 19778 * returned is 0xffffffff then the lun is too large for a 19779 * normal READ CAPACITY command and the results of a 19780 * READ CAPACITY 16 will be used instead. 19781 * 19782 * Arguments: ssc - ssc contains ptr to soft state struct for the target 19783 * capp - ptr to unsigned 64-bit variable to receive the 19784 * capacity value from the command. 19785 * lbap - ptr to unsigned 32-bit varaible to receive the 19786 * block size value from the command 19787 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19788 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19789 * to use the USCSI "direct" chain and bypass the normal 19790 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19791 * command is issued as part of an error recovery action. 19792 * 19793 * Return Code: 0 - Success 19794 * EIO - IO error 19795 * EACCES - Reservation conflict detected 19796 * EAGAIN - Device is becoming ready 19797 * errno return code from sd_ssc_send() 19798 * 19799 * Context: Can sleep. Blocks until command completes. 19800 */ 19801 19802 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity) 19803 19804 static int 19805 sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp, uint32_t *lbap, 19806 int path_flag) 19807 { 19808 struct scsi_extended_sense sense_buf; 19809 struct uscsi_cmd ucmd_buf; 19810 union scsi_cdb cdb; 19811 uint32_t *capacity_buf; 19812 uint64_t capacity; 19813 uint32_t lbasize; 19814 uint32_t pbsize; 19815 int status; 19816 struct sd_lun *un; 19817 19818 ASSERT(ssc != NULL); 19819 19820 un = ssc->ssc_un; 19821 ASSERT(un != NULL); 19822 ASSERT(!mutex_owned(SD_MUTEX(un))); 19823 ASSERT(capp != NULL); 19824 ASSERT(lbap != NULL); 19825 19826 SD_TRACE(SD_LOG_IO, un, 19827 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 19828 19829 /* 19830 * First send a READ_CAPACITY command to the target. 19831 * (This command is mandatory under SCSI-2.) 19832 * 19833 * Set up the CDB for the READ_CAPACITY command. The Partial 19834 * Medium Indicator bit is cleared. The address field must be 19835 * zero if the PMI bit is zero. 19836 */ 19837 bzero(&cdb, sizeof (cdb)); 19838 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19839 19840 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP); 19841 19842 cdb.scc_cmd = SCMD_READ_CAPACITY; 19843 19844 ucmd_buf.uscsi_cdb = (char *)&cdb; 19845 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19846 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; 19847 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; 19848 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19849 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19850 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19851 ucmd_buf.uscsi_timeout = 60; 19852 19853 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19854 UIO_SYSSPACE, path_flag); 19855 19856 switch (status) { 19857 case 0: 19858 /* Return failure if we did not get valid capacity data. */ 19859 if (ucmd_buf.uscsi_resid != 0) { 19860 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 19861 "sd_send_scsi_READ_CAPACITY received invalid " 19862 "capacity data"); 19863 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19864 return (EIO); 19865 } 19866 /* 19867 * Read capacity and block size from the READ CAPACITY 10 data. 19868 * This data may be adjusted later due to device specific 19869 * issues. 19870 * 19871 * According to the SCSI spec, the READ CAPACITY 10 19872 * command returns the following: 19873 * 19874 * bytes 0-3: Maximum logical block address available. 19875 * (MSB in byte:0 & LSB in byte:3) 19876 * 19877 * bytes 4-7: Block length in bytes 19878 * (MSB in byte:4 & LSB in byte:7) 19879 * 19880 */ 19881 capacity = BE_32(capacity_buf[0]); 19882 lbasize = BE_32(capacity_buf[1]); 19883 19884 /* 19885 * Done with capacity_buf 19886 */ 19887 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19888 19889 /* 19890 * if the reported capacity is set to all 0xf's, then 19891 * this disk is too large and requires SBC-2 commands. 19892 * Reissue the request using READ CAPACITY 16. 19893 */ 19894 if (capacity == 0xffffffff) { 19895 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19896 status = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, 19897 &lbasize, &pbsize, path_flag); 19898 if (status != 0) { 19899 return (status); 19900 } 19901 } 19902 break; /* Success! */ 19903 case EIO: 19904 switch (ucmd_buf.uscsi_status) { 19905 case STATUS_RESERVATION_CONFLICT: 19906 status = EACCES; 19907 break; 19908 case STATUS_CHECK: 19909 /* 19910 * Check condition; look for ASC/ASCQ of 0x04/0x01 19911 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 19912 */ 19913 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19914 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 19915 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 19916 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19917 return (EAGAIN); 19918 } 19919 break; 19920 default: 19921 break; 19922 } 19923 /* FALLTHRU */ 19924 default: 19925 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19926 return (status); 19927 } 19928 19929 /* 19930 * Some ATAPI CD-ROM drives report inaccurate LBA size values 19931 * (2352 and 0 are common) so for these devices always force the value 19932 * to 2048 as required by the ATAPI specs. 19933 */ 19934 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 19935 lbasize = 2048; 19936 } 19937 19938 /* 19939 * Get the maximum LBA value from the READ CAPACITY data. 19940 * Here we assume that the Partial Medium Indicator (PMI) bit 19941 * was cleared when issuing the command. This means that the LBA 19942 * returned from the device is the LBA of the last logical block 19943 * on the logical unit. The actual logical block count will be 19944 * this value plus one. 19945 * 19946 * Currently, for removable media, the capacity is saved in terms 19947 * of un->un_sys_blocksize, so scale the capacity value to reflect this. 19948 */ 19949 if (un->un_f_has_removable_media) 19950 capacity = (capacity + 1) * (lbasize / un->un_sys_blocksize); 19951 19952 /* 19953 * Copy the values from the READ CAPACITY command into the space 19954 * provided by the caller. 19955 */ 19956 *capp = capacity; 19957 *lbap = lbasize; 19958 19959 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: " 19960 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 19961 19962 /* 19963 * Both the lbasize and capacity from the device must be nonzero, 19964 * otherwise we assume that the values are not valid and return 19965 * failure to the caller. (4203735) 19966 */ 19967 if ((capacity == 0) || (lbasize == 0)) { 19968 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 19969 "sd_send_scsi_READ_CAPACITY received invalid value " 19970 "capacity %llu lbasize %d", capacity, lbasize); 19971 return (EIO); 19972 } 19973 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19974 return (0); 19975 } 19976 19977 /* 19978 * Function: sd_send_scsi_READ_CAPACITY_16 19979 * 19980 * Description: This routine uses the scsi READ CAPACITY 16 command to 19981 * determine the device capacity in number of blocks and the 19982 * device native block size. If this function returns a failure, 19983 * then the values in *capp and *lbap are undefined. 19984 * This routine should be called by sd_send_scsi_READ_CAPACITY 19985 * which will apply any device specific adjustments to capacity 19986 * and lbasize. One exception is it is also called by 19987 * sd_get_media_info_ext. In that function, there is no need to 19988 * adjust the capacity and lbasize. 19989 * 19990 * Arguments: ssc - ssc contains ptr to soft state struct for the target 19991 * capp - ptr to unsigned 64-bit variable to receive the 19992 * capacity value from the command. 19993 * lbap - ptr to unsigned 32-bit varaible to receive the 19994 * block size value from the command 19995 * psp - ptr to unsigned 32-bit variable to receive the 19996 * physical block size value from the command 19997 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19998 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19999 * to use the USCSI "direct" chain and bypass the normal 20000 * command waitq. SD_PATH_DIRECT_PRIORITY is used when 20001 * this command is issued as part of an error recovery 20002 * action. 20003 * 20004 * Return Code: 0 - Success 20005 * EIO - IO error 20006 * EACCES - Reservation conflict detected 20007 * EAGAIN - Device is becoming ready 20008 * errno return code from sd_ssc_send() 20009 * 20010 * Context: Can sleep. Blocks until command completes. 20011 */ 20012 20013 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16) 20014 20015 static int 20016 sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp, 20017 uint32_t *lbap, uint32_t *psp, int path_flag) 20018 { 20019 struct scsi_extended_sense sense_buf; 20020 struct uscsi_cmd ucmd_buf; 20021 union scsi_cdb cdb; 20022 uint64_t *capacity16_buf; 20023 uint64_t capacity; 20024 uint32_t lbasize; 20025 uint32_t pbsize; 20026 uint32_t lbpb_exp; 20027 int status; 20028 struct sd_lun *un; 20029 20030 ASSERT(ssc != NULL); 20031 20032 un = ssc->ssc_un; 20033 ASSERT(un != NULL); 20034 ASSERT(!mutex_owned(SD_MUTEX(un))); 20035 ASSERT(capp != NULL); 20036 ASSERT(lbap != NULL); 20037 20038 SD_TRACE(SD_LOG_IO, un, 20039 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 20040 20041 /* 20042 * First send a READ_CAPACITY_16 command to the target. 20043 * 20044 * Set up the CDB for the READ_CAPACITY_16 command. The Partial 20045 * Medium Indicator bit is cleared. The address field must be 20046 * zero if the PMI bit is zero. 20047 */ 20048 bzero(&cdb, sizeof (cdb)); 20049 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20050 20051 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP); 20052 20053 ucmd_buf.uscsi_cdb = (char *)&cdb; 20054 ucmd_buf.uscsi_cdblen = CDB_GROUP4; 20055 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; 20056 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; 20057 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20058 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 20059 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20060 ucmd_buf.uscsi_timeout = 60; 20061 20062 /* 20063 * Read Capacity (16) is a Service Action In command. One 20064 * command byte (0x9E) is overloaded for multiple operations, 20065 * with the second CDB byte specifying the desired operation 20066 */ 20067 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4; 20068 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4; 20069 20070 /* 20071 * Fill in allocation length field 20072 */ 20073 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen); 20074 20075 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20076 UIO_SYSSPACE, path_flag); 20077 20078 switch (status) { 20079 case 0: 20080 /* Return failure if we did not get valid capacity data. */ 20081 if (ucmd_buf.uscsi_resid > 20) { 20082 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 20083 "sd_send_scsi_READ_CAPACITY_16 received invalid " 20084 "capacity data"); 20085 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 20086 return (EIO); 20087 } 20088 20089 /* 20090 * Read capacity and block size from the READ CAPACITY 10 data. 20091 * This data may be adjusted later due to device specific 20092 * issues. 20093 * 20094 * According to the SCSI spec, the READ CAPACITY 10 20095 * command returns the following: 20096 * 20097 * bytes 0-7: Maximum logical block address available. 20098 * (MSB in byte:0 & LSB in byte:7) 20099 * 20100 * bytes 8-11: Block length in bytes 20101 * (MSB in byte:8 & LSB in byte:11) 20102 * 20103 * byte 13: LOGICAL BLOCKS PER PHYSICAL BLOCK EXPONENT 20104 */ 20105 capacity = BE_64(capacity16_buf[0]); 20106 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]); 20107 lbpb_exp = (BE_64(capacity16_buf[1]) >> 40) & 0x0f; 20108 20109 pbsize = lbasize << lbpb_exp; 20110 20111 /* 20112 * Done with capacity16_buf 20113 */ 20114 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 20115 20116 /* 20117 * if the reported capacity is set to all 0xf's, then 20118 * this disk is too large. This could only happen with 20119 * a device that supports LBAs larger than 64 bits which 20120 * are not defined by any current T10 standards. 20121 */ 20122 if (capacity == 0xffffffffffffffff) { 20123 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 20124 "disk is too large"); 20125 return (EIO); 20126 } 20127 break; /* Success! */ 20128 case EIO: 20129 switch (ucmd_buf.uscsi_status) { 20130 case STATUS_RESERVATION_CONFLICT: 20131 status = EACCES; 20132 break; 20133 case STATUS_CHECK: 20134 /* 20135 * Check condition; look for ASC/ASCQ of 0x04/0x01 20136 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 20137 */ 20138 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20139 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 20140 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 20141 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 20142 return (EAGAIN); 20143 } 20144 break; 20145 default: 20146 break; 20147 } 20148 /* FALLTHRU */ 20149 default: 20150 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 20151 return (status); 20152 } 20153 20154 *capp = capacity; 20155 *lbap = lbasize; 20156 *psp = pbsize; 20157 20158 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: " 20159 "capacity:0x%llx lbasize:0x%x, pbsize: 0x%x\n", 20160 capacity, lbasize, pbsize); 20161 20162 return (0); 20163 } 20164 20165 20166 /* 20167 * Function: sd_send_scsi_START_STOP_UNIT 20168 * 20169 * Description: Issue a scsi START STOP UNIT command to the target. 20170 * 20171 * Arguments: ssc - ssc contatins pointer to driver soft state (unit) 20172 * structure for this target. 20173 * pc_flag - SD_POWER_CONDITION 20174 * SD_START_STOP 20175 * flag - SD_TARGET_START 20176 * SD_TARGET_STOP 20177 * SD_TARGET_EJECT 20178 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20179 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20180 * to use the USCSI "direct" chain and bypass the normal 20181 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 20182 * command is issued as part of an error recovery action. 20183 * 20184 * Return Code: 0 - Success 20185 * EIO - IO error 20186 * EACCES - Reservation conflict detected 20187 * ENXIO - Not Ready, medium not present 20188 * errno return code from sd_ssc_send() 20189 * 20190 * Context: Can sleep. 20191 */ 20192 20193 static int 20194 sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int pc_flag, int flag, 20195 int path_flag) 20196 { 20197 struct scsi_extended_sense sense_buf; 20198 union scsi_cdb cdb; 20199 struct uscsi_cmd ucmd_buf; 20200 int status; 20201 struct sd_lun *un; 20202 20203 ASSERT(ssc != NULL); 20204 un = ssc->ssc_un; 20205 ASSERT(un != NULL); 20206 ASSERT(!mutex_owned(SD_MUTEX(un))); 20207 20208 SD_TRACE(SD_LOG_IO, un, 20209 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un); 20210 20211 if (un->un_f_check_start_stop && 20212 ((pc_flag == SD_START_STOP) && (flag != SD_TARGET_EJECT)) && 20213 (un->un_f_start_stop_supported != TRUE)) { 20214 return (0); 20215 } 20216 20217 /* 20218 * If we are performing an eject operation and 20219 * we receive any command other than SD_TARGET_EJECT 20220 * we should immediately return. 20221 */ 20222 if (flag != SD_TARGET_EJECT) { 20223 mutex_enter(SD_MUTEX(un)); 20224 if (un->un_f_ejecting == TRUE) { 20225 mutex_exit(SD_MUTEX(un)); 20226 return (EAGAIN); 20227 } 20228 mutex_exit(SD_MUTEX(un)); 20229 } 20230 20231 bzero(&cdb, sizeof (cdb)); 20232 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20233 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20234 20235 cdb.scc_cmd = SCMD_START_STOP; 20236 cdb.cdb_opaque[4] = (pc_flag == SD_POWER_CONDITION) ? 20237 (uchar_t)(flag << 4) : (uchar_t)flag; 20238 20239 ucmd_buf.uscsi_cdb = (char *)&cdb; 20240 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 20241 ucmd_buf.uscsi_bufaddr = NULL; 20242 ucmd_buf.uscsi_buflen = 0; 20243 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20244 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20245 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 20246 ucmd_buf.uscsi_timeout = 200; 20247 20248 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20249 UIO_SYSSPACE, path_flag); 20250 20251 switch (status) { 20252 case 0: 20253 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20254 break; /* Success! */ 20255 case EIO: 20256 switch (ucmd_buf.uscsi_status) { 20257 case STATUS_RESERVATION_CONFLICT: 20258 status = EACCES; 20259 break; 20260 case STATUS_CHECK: 20261 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) { 20262 switch (scsi_sense_key( 20263 (uint8_t *)&sense_buf)) { 20264 case KEY_ILLEGAL_REQUEST: 20265 status = ENOTSUP; 20266 break; 20267 case KEY_NOT_READY: 20268 if (scsi_sense_asc( 20269 (uint8_t *)&sense_buf) 20270 == 0x3A) { 20271 status = ENXIO; 20272 } 20273 break; 20274 default: 20275 break; 20276 } 20277 } 20278 break; 20279 default: 20280 break; 20281 } 20282 break; 20283 default: 20284 break; 20285 } 20286 20287 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n"); 20288 20289 return (status); 20290 } 20291 20292 20293 /* 20294 * Function: sd_start_stop_unit_callback 20295 * 20296 * Description: timeout(9F) callback to begin recovery process for a 20297 * device that has spun down. 20298 * 20299 * Arguments: arg - pointer to associated softstate struct. 20300 * 20301 * Context: Executes in a timeout(9F) thread context 20302 */ 20303 20304 static void 20305 sd_start_stop_unit_callback(void *arg) 20306 { 20307 struct sd_lun *un = arg; 20308 ASSERT(un != NULL); 20309 ASSERT(!mutex_owned(SD_MUTEX(un))); 20310 20311 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n"); 20312 20313 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP); 20314 } 20315 20316 20317 /* 20318 * Function: sd_start_stop_unit_task 20319 * 20320 * Description: Recovery procedure when a drive is spun down. 20321 * 20322 * Arguments: arg - pointer to associated softstate struct. 20323 * 20324 * Context: Executes in a taskq() thread context 20325 */ 20326 20327 static void 20328 sd_start_stop_unit_task(void *arg) 20329 { 20330 struct sd_lun *un = arg; 20331 sd_ssc_t *ssc; 20332 int power_level; 20333 int rval; 20334 20335 ASSERT(un != NULL); 20336 ASSERT(!mutex_owned(SD_MUTEX(un))); 20337 20338 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n"); 20339 20340 /* 20341 * Some unformatted drives report not ready error, no need to 20342 * restart if format has been initiated. 20343 */ 20344 mutex_enter(SD_MUTEX(un)); 20345 if (un->un_f_format_in_progress == TRUE) { 20346 mutex_exit(SD_MUTEX(un)); 20347 return; 20348 } 20349 mutex_exit(SD_MUTEX(un)); 20350 20351 ssc = sd_ssc_init(un); 20352 /* 20353 * When a START STOP command is issued from here, it is part of a 20354 * failure recovery operation and must be issued before any other 20355 * commands, including any pending retries. Thus it must be sent 20356 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up 20357 * succeeds or not, we will start I/O after the attempt. 20358 * If power condition is supported and the current power level 20359 * is capable of performing I/O, we should set the power condition 20360 * to that level. Otherwise, set the power condition to ACTIVE. 20361 */ 20362 if (un->un_f_power_condition_supported) { 20363 mutex_enter(SD_MUTEX(un)); 20364 ASSERT(SD_PM_IS_LEVEL_VALID(un, un->un_power_level)); 20365 power_level = sd_pwr_pc.ran_perf[un->un_power_level] 20366 > 0 ? un->un_power_level : SD_SPINDLE_ACTIVE; 20367 mutex_exit(SD_MUTEX(un)); 20368 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_POWER_CONDITION, 20369 sd_pl2pc[power_level], SD_PATH_DIRECT_PRIORITY); 20370 } else { 20371 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 20372 SD_TARGET_START, SD_PATH_DIRECT_PRIORITY); 20373 } 20374 20375 if (rval != 0) 20376 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 20377 sd_ssc_fini(ssc); 20378 /* 20379 * The above call blocks until the START_STOP_UNIT command completes. 20380 * Now that it has completed, we must re-try the original IO that 20381 * received the NOT READY condition in the first place. There are 20382 * three possible conditions here: 20383 * 20384 * (1) The original IO is on un_retry_bp. 20385 * (2) The original IO is on the regular wait queue, and un_retry_bp 20386 * is NULL. 20387 * (3) The original IO is on the regular wait queue, and un_retry_bp 20388 * points to some other, unrelated bp. 20389 * 20390 * For each case, we must call sd_start_cmds() with un_retry_bp 20391 * as the argument. If un_retry_bp is NULL, this will initiate 20392 * processing of the regular wait queue. If un_retry_bp is not NULL, 20393 * then this will process the bp on un_retry_bp. That may or may not 20394 * be the original IO, but that does not matter: the important thing 20395 * is to keep the IO processing going at this point. 20396 * 20397 * Note: This is a very specific error recovery sequence associated 20398 * with a drive that is not spun up. We attempt a START_STOP_UNIT and 20399 * serialize the I/O with completion of the spin-up. 20400 */ 20401 mutex_enter(SD_MUTEX(un)); 20402 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 20403 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n", 20404 un, un->un_retry_bp); 20405 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */ 20406 sd_start_cmds(un, un->un_retry_bp); 20407 mutex_exit(SD_MUTEX(un)); 20408 20409 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n"); 20410 } 20411 20412 20413 /* 20414 * Function: sd_send_scsi_INQUIRY 20415 * 20416 * Description: Issue the scsi INQUIRY command. 20417 * 20418 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20419 * structure for this target. 20420 * bufaddr 20421 * buflen 20422 * evpd 20423 * page_code 20424 * page_length 20425 * 20426 * Return Code: 0 - Success 20427 * errno return code from sd_ssc_send() 20428 * 20429 * Context: Can sleep. Does not return until command is completed. 20430 */ 20431 20432 static int 20433 sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr, size_t buflen, 20434 uchar_t evpd, uchar_t page_code, size_t *residp) 20435 { 20436 union scsi_cdb cdb; 20437 struct uscsi_cmd ucmd_buf; 20438 int status; 20439 struct sd_lun *un; 20440 20441 ASSERT(ssc != NULL); 20442 un = ssc->ssc_un; 20443 ASSERT(un != NULL); 20444 ASSERT(!mutex_owned(SD_MUTEX(un))); 20445 ASSERT(bufaddr != NULL); 20446 20447 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un); 20448 20449 bzero(&cdb, sizeof (cdb)); 20450 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20451 bzero(bufaddr, buflen); 20452 20453 cdb.scc_cmd = SCMD_INQUIRY; 20454 cdb.cdb_opaque[1] = evpd; 20455 cdb.cdb_opaque[2] = page_code; 20456 FORMG0COUNT(&cdb, buflen); 20457 20458 ucmd_buf.uscsi_cdb = (char *)&cdb; 20459 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 20460 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 20461 ucmd_buf.uscsi_buflen = buflen; 20462 ucmd_buf.uscsi_rqbuf = NULL; 20463 ucmd_buf.uscsi_rqlen = 0; 20464 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 20465 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */ 20466 20467 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20468 UIO_SYSSPACE, SD_PATH_DIRECT); 20469 20470 /* 20471 * Only handle status == 0, the upper-level caller 20472 * will put different assessment based on the context. 20473 */ 20474 if (status == 0) 20475 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20476 20477 if ((status == 0) && (residp != NULL)) { 20478 *residp = ucmd_buf.uscsi_resid; 20479 } 20480 20481 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n"); 20482 20483 return (status); 20484 } 20485 20486 20487 /* 20488 * Function: sd_send_scsi_TEST_UNIT_READY 20489 * 20490 * Description: Issue the scsi TEST UNIT READY command. 20491 * This routine can be told to set the flag USCSI_DIAGNOSE to 20492 * prevent retrying failed commands. Use this when the intent 20493 * is either to check for device readiness, to clear a Unit 20494 * Attention, or to clear any outstanding sense data. 20495 * However under specific conditions the expected behavior 20496 * is for retries to bring a device ready, so use the flag 20497 * with caution. 20498 * 20499 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20500 * structure for this target. 20501 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present 20502 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE. 20503 * 0: dont check for media present, do retries on cmd. 20504 * 20505 * Return Code: 0 - Success 20506 * EIO - IO error 20507 * EACCES - Reservation conflict detected 20508 * ENXIO - Not Ready, medium not present 20509 * errno return code from sd_ssc_send() 20510 * 20511 * Context: Can sleep. Does not return until command is completed. 20512 */ 20513 20514 static int 20515 sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag) 20516 { 20517 struct scsi_extended_sense sense_buf; 20518 union scsi_cdb cdb; 20519 struct uscsi_cmd ucmd_buf; 20520 int status; 20521 struct sd_lun *un; 20522 20523 ASSERT(ssc != NULL); 20524 un = ssc->ssc_un; 20525 ASSERT(un != NULL); 20526 ASSERT(!mutex_owned(SD_MUTEX(un))); 20527 20528 SD_TRACE(SD_LOG_IO, un, 20529 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un); 20530 20531 /* 20532 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect 20533 * timeouts when they receive a TUR and the queue is not empty. Check 20534 * the configuration flag set during attach (indicating the drive has 20535 * this firmware bug) and un_ncmds_in_transport before issuing the 20536 * TUR. If there are 20537 * pending commands return success, this is a bit arbitrary but is ok 20538 * for non-removables (i.e. the eliteI disks) and non-clustering 20539 * configurations. 20540 */ 20541 if (un->un_f_cfg_tur_check == TRUE) { 20542 mutex_enter(SD_MUTEX(un)); 20543 if (un->un_ncmds_in_transport != 0) { 20544 mutex_exit(SD_MUTEX(un)); 20545 return (0); 20546 } 20547 mutex_exit(SD_MUTEX(un)); 20548 } 20549 20550 bzero(&cdb, sizeof (cdb)); 20551 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20552 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20553 20554 cdb.scc_cmd = SCMD_TEST_UNIT_READY; 20555 20556 ucmd_buf.uscsi_cdb = (char *)&cdb; 20557 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 20558 ucmd_buf.uscsi_bufaddr = NULL; 20559 ucmd_buf.uscsi_buflen = 0; 20560 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20561 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20562 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 20563 20564 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ 20565 if ((flag & SD_DONT_RETRY_TUR) != 0) { 20566 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; 20567 } 20568 ucmd_buf.uscsi_timeout = 60; 20569 20570 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20571 UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : 20572 SD_PATH_STANDARD)); 20573 20574 switch (status) { 20575 case 0: 20576 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20577 break; /* Success! */ 20578 case EIO: 20579 switch (ucmd_buf.uscsi_status) { 20580 case STATUS_RESERVATION_CONFLICT: 20581 status = EACCES; 20582 break; 20583 case STATUS_CHECK: 20584 if ((flag & SD_CHECK_FOR_MEDIA) == 0) { 20585 break; 20586 } 20587 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20588 (scsi_sense_key((uint8_t *)&sense_buf) == 20589 KEY_NOT_READY) && 20590 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x3A)) { 20591 status = ENXIO; 20592 } 20593 break; 20594 default: 20595 break; 20596 } 20597 break; 20598 default: 20599 break; 20600 } 20601 20602 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n"); 20603 20604 return (status); 20605 } 20606 20607 /* 20608 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN 20609 * 20610 * Description: Issue the scsi PERSISTENT RESERVE IN command. 20611 * 20612 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20613 * structure for this target. 20614 * 20615 * Return Code: 0 - Success 20616 * EACCES 20617 * ENOTSUP 20618 * errno return code from sd_ssc_send() 20619 * 20620 * Context: Can sleep. Does not return until command is completed. 20621 */ 20622 20623 static int 20624 sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc, uchar_t usr_cmd, 20625 uint16_t data_len, uchar_t *data_bufp) 20626 { 20627 struct scsi_extended_sense sense_buf; 20628 union scsi_cdb cdb; 20629 struct uscsi_cmd ucmd_buf; 20630 int status; 20631 int no_caller_buf = FALSE; 20632 struct sd_lun *un; 20633 20634 ASSERT(ssc != NULL); 20635 un = ssc->ssc_un; 20636 ASSERT(un != NULL); 20637 ASSERT(!mutex_owned(SD_MUTEX(un))); 20638 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV)); 20639 20640 SD_TRACE(SD_LOG_IO, un, 20641 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un); 20642 20643 bzero(&cdb, sizeof (cdb)); 20644 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20645 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20646 if (data_bufp == NULL) { 20647 /* Allocate a default buf if the caller did not give one */ 20648 ASSERT(data_len == 0); 20649 data_len = MHIOC_RESV_KEY_SIZE; 20650 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP); 20651 no_caller_buf = TRUE; 20652 } 20653 20654 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN; 20655 cdb.cdb_opaque[1] = usr_cmd; 20656 FORMG1COUNT(&cdb, data_len); 20657 20658 ucmd_buf.uscsi_cdb = (char *)&cdb; 20659 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 20660 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; 20661 ucmd_buf.uscsi_buflen = data_len; 20662 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20663 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20664 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20665 ucmd_buf.uscsi_timeout = 60; 20666 20667 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20668 UIO_SYSSPACE, SD_PATH_STANDARD); 20669 20670 switch (status) { 20671 case 0: 20672 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20673 20674 break; /* Success! */ 20675 case EIO: 20676 switch (ucmd_buf.uscsi_status) { 20677 case STATUS_RESERVATION_CONFLICT: 20678 status = EACCES; 20679 break; 20680 case STATUS_CHECK: 20681 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20682 (scsi_sense_key((uint8_t *)&sense_buf) == 20683 KEY_ILLEGAL_REQUEST)) { 20684 status = ENOTSUP; 20685 } 20686 break; 20687 default: 20688 break; 20689 } 20690 break; 20691 default: 20692 break; 20693 } 20694 20695 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n"); 20696 20697 if (no_caller_buf == TRUE) { 20698 kmem_free(data_bufp, data_len); 20699 } 20700 20701 return (status); 20702 } 20703 20704 20705 /* 20706 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT 20707 * 20708 * Description: This routine is the driver entry point for handling CD-ROM 20709 * multi-host persistent reservation requests (MHIOCGRP_INKEYS, 20710 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the 20711 * device. 20712 * 20713 * Arguments: ssc - ssc contains un - pointer to soft state struct 20714 * for the target. 20715 * usr_cmd SCSI-3 reservation facility command (one of 20716 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE, 20717 * SD_SCSI3_PREEMPTANDABORT) 20718 * usr_bufp - user provided pointer register, reserve descriptor or 20719 * preempt and abort structure (mhioc_register_t, 20720 * mhioc_resv_desc_t, mhioc_preemptandabort_t) 20721 * 20722 * Return Code: 0 - Success 20723 * EACCES 20724 * ENOTSUP 20725 * errno return code from sd_ssc_send() 20726 * 20727 * Context: Can sleep. Does not return until command is completed. 20728 */ 20729 20730 static int 20731 sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, uchar_t usr_cmd, 20732 uchar_t *usr_bufp) 20733 { 20734 struct scsi_extended_sense sense_buf; 20735 union scsi_cdb cdb; 20736 struct uscsi_cmd ucmd_buf; 20737 int status; 20738 uchar_t data_len = sizeof (sd_prout_t); 20739 sd_prout_t *prp; 20740 struct sd_lun *un; 20741 20742 ASSERT(ssc != NULL); 20743 un = ssc->ssc_un; 20744 ASSERT(un != NULL); 20745 ASSERT(!mutex_owned(SD_MUTEX(un))); 20746 ASSERT(data_len == 24); /* required by scsi spec */ 20747 20748 SD_TRACE(SD_LOG_IO, un, 20749 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un); 20750 20751 if (usr_bufp == NULL) { 20752 return (EINVAL); 20753 } 20754 20755 bzero(&cdb, sizeof (cdb)); 20756 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20757 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20758 prp = kmem_zalloc(data_len, KM_SLEEP); 20759 20760 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT; 20761 cdb.cdb_opaque[1] = usr_cmd; 20762 FORMG1COUNT(&cdb, data_len); 20763 20764 ucmd_buf.uscsi_cdb = (char *)&cdb; 20765 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 20766 ucmd_buf.uscsi_bufaddr = (caddr_t)prp; 20767 ucmd_buf.uscsi_buflen = data_len; 20768 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20769 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20770 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 20771 ucmd_buf.uscsi_timeout = 60; 20772 20773 switch (usr_cmd) { 20774 case SD_SCSI3_REGISTER: { 20775 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp; 20776 20777 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 20778 bcopy(ptr->newkey.key, prp->service_key, 20779 MHIOC_RESV_KEY_SIZE); 20780 prp->aptpl = ptr->aptpl; 20781 break; 20782 } 20783 case SD_SCSI3_RESERVE: 20784 case SD_SCSI3_RELEASE: { 20785 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 20786 20787 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 20788 prp->scope_address = BE_32(ptr->scope_specific_addr); 20789 cdb.cdb_opaque[2] = ptr->type; 20790 break; 20791 } 20792 case SD_SCSI3_PREEMPTANDABORT: { 20793 mhioc_preemptandabort_t *ptr = 20794 (mhioc_preemptandabort_t *)usr_bufp; 20795 20796 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 20797 bcopy(ptr->victim_key.key, prp->service_key, 20798 MHIOC_RESV_KEY_SIZE); 20799 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr); 20800 cdb.cdb_opaque[2] = ptr->resvdesc.type; 20801 ucmd_buf.uscsi_flags |= USCSI_HEAD; 20802 break; 20803 } 20804 case SD_SCSI3_REGISTERANDIGNOREKEY: 20805 { 20806 mhioc_registerandignorekey_t *ptr; 20807 ptr = (mhioc_registerandignorekey_t *)usr_bufp; 20808 bcopy(ptr->newkey.key, 20809 prp->service_key, MHIOC_RESV_KEY_SIZE); 20810 prp->aptpl = ptr->aptpl; 20811 break; 20812 } 20813 default: 20814 ASSERT(FALSE); 20815 break; 20816 } 20817 20818 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20819 UIO_SYSSPACE, SD_PATH_STANDARD); 20820 20821 switch (status) { 20822 case 0: 20823 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20824 break; /* Success! */ 20825 case EIO: 20826 switch (ucmd_buf.uscsi_status) { 20827 case STATUS_RESERVATION_CONFLICT: 20828 status = EACCES; 20829 break; 20830 case STATUS_CHECK: 20831 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20832 (scsi_sense_key((uint8_t *)&sense_buf) == 20833 KEY_ILLEGAL_REQUEST)) { 20834 status = ENOTSUP; 20835 } 20836 break; 20837 default: 20838 break; 20839 } 20840 break; 20841 default: 20842 break; 20843 } 20844 20845 kmem_free(prp, data_len); 20846 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n"); 20847 return (status); 20848 } 20849 20850 20851 /* 20852 * Function: sd_send_scsi_SYNCHRONIZE_CACHE 20853 * 20854 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target 20855 * 20856 * Arguments: un - pointer to the target's soft state struct 20857 * dkc - pointer to the callback structure 20858 * 20859 * Return Code: 0 - success 20860 * errno-type error code 20861 * 20862 * Context: kernel thread context only. 20863 * 20864 * _______________________________________________________________ 20865 * | dkc_flag & | dkc_callback | DKIOCFLUSHWRITECACHE | 20866 * |FLUSH_VOLATILE| | operation | 20867 * |______________|______________|_________________________________| 20868 * | 0 | NULL | Synchronous flush on both | 20869 * | | | volatile and non-volatile cache | 20870 * |______________|______________|_________________________________| 20871 * | 1 | NULL | Synchronous flush on volatile | 20872 * | | | cache; disk drivers may suppress| 20873 * | | | flush if disk table indicates | 20874 * | | | non-volatile cache | 20875 * |______________|______________|_________________________________| 20876 * | 0 | !NULL | Asynchronous flush on both | 20877 * | | | volatile and non-volatile cache;| 20878 * |______________|______________|_________________________________| 20879 * | 1 | !NULL | Asynchronous flush on volatile | 20880 * | | | cache; disk drivers may suppress| 20881 * | | | flush if disk table indicates | 20882 * | | | non-volatile cache | 20883 * |______________|______________|_________________________________| 20884 * 20885 */ 20886 20887 static int 20888 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc) 20889 { 20890 struct sd_uscsi_info *uip; 20891 struct uscsi_cmd *uscmd; 20892 union scsi_cdb *cdb; 20893 struct buf *bp; 20894 int rval = 0; 20895 int is_async; 20896 20897 SD_TRACE(SD_LOG_IO, un, 20898 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un); 20899 20900 ASSERT(un != NULL); 20901 ASSERT(!mutex_owned(SD_MUTEX(un))); 20902 20903 if (dkc == NULL || dkc->dkc_callback == NULL) { 20904 is_async = FALSE; 20905 } else { 20906 is_async = TRUE; 20907 } 20908 20909 mutex_enter(SD_MUTEX(un)); 20910 /* check whether cache flush should be suppressed */ 20911 if (un->un_f_suppress_cache_flush == TRUE) { 20912 mutex_exit(SD_MUTEX(un)); 20913 /* 20914 * suppress the cache flush if the device is told to do 20915 * so by sd.conf or disk table 20916 */ 20917 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_SYNCHRONIZE_CACHE: \ 20918 skip the cache flush since suppress_cache_flush is %d!\n", 20919 un->un_f_suppress_cache_flush); 20920 20921 if (is_async == TRUE) { 20922 /* invoke callback for asynchronous flush */ 20923 (*dkc->dkc_callback)(dkc->dkc_cookie, 0); 20924 } 20925 return (rval); 20926 } 20927 mutex_exit(SD_MUTEX(un)); 20928 20929 /* 20930 * check dkc_flag & FLUSH_VOLATILE so SYNC_NV bit can be 20931 * set properly 20932 */ 20933 cdb = kmem_zalloc(CDB_GROUP1, KM_SLEEP); 20934 cdb->scc_cmd = SCMD_SYNCHRONIZE_CACHE; 20935 20936 mutex_enter(SD_MUTEX(un)); 20937 if (dkc != NULL && un->un_f_sync_nv_supported && 20938 (dkc->dkc_flag & FLUSH_VOLATILE)) { 20939 /* 20940 * if the device supports SYNC_NV bit, turn on 20941 * the SYNC_NV bit to only flush volatile cache 20942 */ 20943 cdb->cdb_un.tag |= SD_SYNC_NV_BIT; 20944 } 20945 mutex_exit(SD_MUTEX(un)); 20946 20947 /* 20948 * First get some memory for the uscsi_cmd struct and cdb 20949 * and initialize for SYNCHRONIZE_CACHE cmd. 20950 */ 20951 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 20952 uscmd->uscsi_cdblen = CDB_GROUP1; 20953 uscmd->uscsi_cdb = (caddr_t)cdb; 20954 uscmd->uscsi_bufaddr = NULL; 20955 uscmd->uscsi_buflen = 0; 20956 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 20957 uscmd->uscsi_rqlen = SENSE_LENGTH; 20958 uscmd->uscsi_rqresid = SENSE_LENGTH; 20959 uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 20960 uscmd->uscsi_timeout = sd_io_time; 20961 20962 /* 20963 * Allocate an sd_uscsi_info struct and fill it with the info 20964 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 20965 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 20966 * since we allocate the buf here in this function, we do not 20967 * need to preserve the prior contents of b_private. 20968 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 20969 */ 20970 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 20971 uip->ui_flags = SD_PATH_DIRECT; 20972 uip->ui_cmdp = uscmd; 20973 20974 bp = getrbuf(KM_SLEEP); 20975 bp->b_private = uip; 20976 20977 /* 20978 * Setup buffer to carry uscsi request. 20979 */ 20980 bp->b_flags = B_BUSY; 20981 bp->b_bcount = 0; 20982 bp->b_blkno = 0; 20983 20984 if (is_async == TRUE) { 20985 bp->b_iodone = sd_send_scsi_SYNCHRONIZE_CACHE_biodone; 20986 uip->ui_dkc = *dkc; 20987 } 20988 20989 bp->b_edev = SD_GET_DEV(un); 20990 bp->b_dev = cmpdev(bp->b_edev); /* maybe unnecessary? */ 20991 20992 /* 20993 * Unset un_f_sync_cache_required flag 20994 */ 20995 mutex_enter(SD_MUTEX(un)); 20996 un->un_f_sync_cache_required = FALSE; 20997 mutex_exit(SD_MUTEX(un)); 20998 20999 (void) sd_uscsi_strategy(bp); 21000 21001 /* 21002 * If synchronous request, wait for completion 21003 * If async just return and let b_iodone callback 21004 * cleanup. 21005 * NOTE: On return, u_ncmds_in_driver will be decremented, 21006 * but it was also incremented in sd_uscsi_strategy(), so 21007 * we should be ok. 21008 */ 21009 if (is_async == FALSE) { 21010 (void) biowait(bp); 21011 rval = sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp); 21012 } 21013 21014 return (rval); 21015 } 21016 21017 21018 static int 21019 sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp) 21020 { 21021 struct sd_uscsi_info *uip; 21022 struct uscsi_cmd *uscmd; 21023 uint8_t *sense_buf; 21024 struct sd_lun *un; 21025 int status; 21026 union scsi_cdb *cdb; 21027 21028 uip = (struct sd_uscsi_info *)(bp->b_private); 21029 ASSERT(uip != NULL); 21030 21031 uscmd = uip->ui_cmdp; 21032 ASSERT(uscmd != NULL); 21033 21034 sense_buf = (uint8_t *)uscmd->uscsi_rqbuf; 21035 ASSERT(sense_buf != NULL); 21036 21037 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 21038 ASSERT(un != NULL); 21039 21040 cdb = (union scsi_cdb *)uscmd->uscsi_cdb; 21041 21042 status = geterror(bp); 21043 switch (status) { 21044 case 0: 21045 break; /* Success! */ 21046 case EIO: 21047 switch (uscmd->uscsi_status) { 21048 case STATUS_RESERVATION_CONFLICT: 21049 /* Ignore reservation conflict */ 21050 status = 0; 21051 goto done; 21052 21053 case STATUS_CHECK: 21054 if ((uscmd->uscsi_rqstatus == STATUS_GOOD) && 21055 (scsi_sense_key(sense_buf) == 21056 KEY_ILLEGAL_REQUEST)) { 21057 /* Ignore Illegal Request error */ 21058 if (cdb->cdb_un.tag&SD_SYNC_NV_BIT) { 21059 mutex_enter(SD_MUTEX(un)); 21060 un->un_f_sync_nv_supported = FALSE; 21061 mutex_exit(SD_MUTEX(un)); 21062 status = 0; 21063 SD_TRACE(SD_LOG_IO, un, 21064 "un_f_sync_nv_supported \ 21065 is set to false.\n"); 21066 goto done; 21067 } 21068 21069 mutex_enter(SD_MUTEX(un)); 21070 un->un_f_sync_cache_supported = FALSE; 21071 mutex_exit(SD_MUTEX(un)); 21072 SD_TRACE(SD_LOG_IO, un, 21073 "sd_send_scsi_SYNCHRONIZE_CACHE_biodone: \ 21074 un_f_sync_cache_supported set to false \ 21075 with asc = %x, ascq = %x\n", 21076 scsi_sense_asc(sense_buf), 21077 scsi_sense_ascq(sense_buf)); 21078 status = ENOTSUP; 21079 goto done; 21080 } 21081 break; 21082 default: 21083 break; 21084 } 21085 /* FALLTHRU */ 21086 default: 21087 /* 21088 * Turn on the un_f_sync_cache_required flag 21089 * since the SYNC CACHE command failed 21090 */ 21091 mutex_enter(SD_MUTEX(un)); 21092 un->un_f_sync_cache_required = TRUE; 21093 mutex_exit(SD_MUTEX(un)); 21094 21095 /* 21096 * Don't log an error message if this device 21097 * has removable media. 21098 */ 21099 if (!un->un_f_has_removable_media) { 21100 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 21101 "SYNCHRONIZE CACHE command failed (%d)\n", status); 21102 } 21103 break; 21104 } 21105 21106 done: 21107 if (uip->ui_dkc.dkc_callback != NULL) { 21108 (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status); 21109 } 21110 21111 ASSERT((bp->b_flags & B_REMAPPED) == 0); 21112 freerbuf(bp); 21113 kmem_free(uip, sizeof (struct sd_uscsi_info)); 21114 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); 21115 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen); 21116 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 21117 21118 return (status); 21119 } 21120 21121 21122 /* 21123 * Function: sd_send_scsi_GET_CONFIGURATION 21124 * 21125 * Description: Issues the get configuration command to the device. 21126 * Called from sd_check_for_writable_cd & sd_get_media_info 21127 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN 21128 * Arguments: ssc 21129 * ucmdbuf 21130 * rqbuf 21131 * rqbuflen 21132 * bufaddr 21133 * buflen 21134 * path_flag 21135 * 21136 * Return Code: 0 - Success 21137 * errno return code from sd_ssc_send() 21138 * 21139 * Context: Can sleep. Does not return until command is completed. 21140 * 21141 */ 21142 21143 static int 21144 sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, struct uscsi_cmd *ucmdbuf, 21145 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen, 21146 int path_flag) 21147 { 21148 char cdb[CDB_GROUP1]; 21149 int status; 21150 struct sd_lun *un; 21151 21152 ASSERT(ssc != NULL); 21153 un = ssc->ssc_un; 21154 ASSERT(un != NULL); 21155 ASSERT(!mutex_owned(SD_MUTEX(un))); 21156 ASSERT(bufaddr != NULL); 21157 ASSERT(ucmdbuf != NULL); 21158 ASSERT(rqbuf != NULL); 21159 21160 SD_TRACE(SD_LOG_IO, un, 21161 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un); 21162 21163 bzero(cdb, sizeof (cdb)); 21164 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 21165 bzero(rqbuf, rqbuflen); 21166 bzero(bufaddr, buflen); 21167 21168 /* 21169 * Set up cdb field for the get configuration command. 21170 */ 21171 cdb[0] = SCMD_GET_CONFIGURATION; 21172 cdb[1] = 0x02; /* Requested Type */ 21173 cdb[8] = SD_PROFILE_HEADER_LEN; 21174 ucmdbuf->uscsi_cdb = cdb; 21175 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 21176 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 21177 ucmdbuf->uscsi_buflen = buflen; 21178 ucmdbuf->uscsi_timeout = sd_io_time; 21179 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 21180 ucmdbuf->uscsi_rqlen = rqbuflen; 21181 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 21182 21183 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL, 21184 UIO_SYSSPACE, path_flag); 21185 21186 switch (status) { 21187 case 0: 21188 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21189 break; /* Success! */ 21190 case EIO: 21191 switch (ucmdbuf->uscsi_status) { 21192 case STATUS_RESERVATION_CONFLICT: 21193 status = EACCES; 21194 break; 21195 default: 21196 break; 21197 } 21198 break; 21199 default: 21200 break; 21201 } 21202 21203 if (status == 0) { 21204 SD_DUMP_MEMORY(un, SD_LOG_IO, 21205 "sd_send_scsi_GET_CONFIGURATION: data", 21206 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 21207 } 21208 21209 SD_TRACE(SD_LOG_IO, un, 21210 "sd_send_scsi_GET_CONFIGURATION: exit\n"); 21211 21212 return (status); 21213 } 21214 21215 /* 21216 * Function: sd_send_scsi_feature_GET_CONFIGURATION 21217 * 21218 * Description: Issues the get configuration command to the device to 21219 * retrieve a specific feature. Called from 21220 * sd_check_for_writable_cd & sd_set_mmc_caps. 21221 * Arguments: ssc 21222 * ucmdbuf 21223 * rqbuf 21224 * rqbuflen 21225 * bufaddr 21226 * buflen 21227 * feature 21228 * 21229 * Return Code: 0 - Success 21230 * errno return code from sd_ssc_send() 21231 * 21232 * Context: Can sleep. Does not return until command is completed. 21233 * 21234 */ 21235 static int 21236 sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc, 21237 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 21238 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag) 21239 { 21240 char cdb[CDB_GROUP1]; 21241 int status; 21242 struct sd_lun *un; 21243 21244 ASSERT(ssc != NULL); 21245 un = ssc->ssc_un; 21246 ASSERT(un != NULL); 21247 ASSERT(!mutex_owned(SD_MUTEX(un))); 21248 ASSERT(bufaddr != NULL); 21249 ASSERT(ucmdbuf != NULL); 21250 ASSERT(rqbuf != NULL); 21251 21252 SD_TRACE(SD_LOG_IO, un, 21253 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un); 21254 21255 bzero(cdb, sizeof (cdb)); 21256 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 21257 bzero(rqbuf, rqbuflen); 21258 bzero(bufaddr, buflen); 21259 21260 /* 21261 * Set up cdb field for the get configuration command. 21262 */ 21263 cdb[0] = SCMD_GET_CONFIGURATION; 21264 cdb[1] = 0x02; /* Requested Type */ 21265 cdb[3] = feature; 21266 cdb[8] = buflen; 21267 ucmdbuf->uscsi_cdb = cdb; 21268 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 21269 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 21270 ucmdbuf->uscsi_buflen = buflen; 21271 ucmdbuf->uscsi_timeout = sd_io_time; 21272 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 21273 ucmdbuf->uscsi_rqlen = rqbuflen; 21274 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 21275 21276 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL, 21277 UIO_SYSSPACE, path_flag); 21278 21279 switch (status) { 21280 case 0: 21281 21282 break; /* Success! */ 21283 case EIO: 21284 switch (ucmdbuf->uscsi_status) { 21285 case STATUS_RESERVATION_CONFLICT: 21286 status = EACCES; 21287 break; 21288 default: 21289 break; 21290 } 21291 break; 21292 default: 21293 break; 21294 } 21295 21296 if (status == 0) { 21297 SD_DUMP_MEMORY(un, SD_LOG_IO, 21298 "sd_send_scsi_feature_GET_CONFIGURATION: data", 21299 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 21300 } 21301 21302 SD_TRACE(SD_LOG_IO, un, 21303 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n"); 21304 21305 return (status); 21306 } 21307 21308 21309 /* 21310 * Function: sd_send_scsi_MODE_SENSE 21311 * 21312 * Description: Utility function for issuing a scsi MODE SENSE command. 21313 * Note: This routine uses a consistent implementation for Group0, 21314 * Group1, and Group2 commands across all platforms. ATAPI devices 21315 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 21316 * 21317 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21318 * structure for this target. 21319 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 21320 * CDB_GROUP[1|2] (10 byte). 21321 * bufaddr - buffer for page data retrieved from the target. 21322 * buflen - size of page to be retrieved. 21323 * page_code - page code of data to be retrieved from the target. 21324 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 21325 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 21326 * to use the USCSI "direct" chain and bypass the normal 21327 * command waitq. 21328 * 21329 * Return Code: 0 - Success 21330 * errno return code from sd_ssc_send() 21331 * 21332 * Context: Can sleep. Does not return until command is completed. 21333 */ 21334 21335 static int 21336 sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr, 21337 size_t buflen, uchar_t page_code, int path_flag) 21338 { 21339 struct scsi_extended_sense sense_buf; 21340 union scsi_cdb cdb; 21341 struct uscsi_cmd ucmd_buf; 21342 int status; 21343 int headlen; 21344 struct sd_lun *un; 21345 21346 ASSERT(ssc != NULL); 21347 un = ssc->ssc_un; 21348 ASSERT(un != NULL); 21349 ASSERT(!mutex_owned(SD_MUTEX(un))); 21350 ASSERT(bufaddr != NULL); 21351 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 21352 (cdbsize == CDB_GROUP2)); 21353 21354 SD_TRACE(SD_LOG_IO, un, 21355 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un); 21356 21357 bzero(&cdb, sizeof (cdb)); 21358 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21359 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21360 bzero(bufaddr, buflen); 21361 21362 if (cdbsize == CDB_GROUP0) { 21363 cdb.scc_cmd = SCMD_MODE_SENSE; 21364 cdb.cdb_opaque[2] = page_code; 21365 FORMG0COUNT(&cdb, buflen); 21366 headlen = MODE_HEADER_LENGTH; 21367 } else { 21368 cdb.scc_cmd = SCMD_MODE_SENSE_G1; 21369 cdb.cdb_opaque[2] = page_code; 21370 FORMG1COUNT(&cdb, buflen); 21371 headlen = MODE_HEADER_LENGTH_GRP2; 21372 } 21373 21374 ASSERT(headlen <= buflen); 21375 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 21376 21377 ucmd_buf.uscsi_cdb = (char *)&cdb; 21378 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 21379 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 21380 ucmd_buf.uscsi_buflen = buflen; 21381 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21382 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21383 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 21384 ucmd_buf.uscsi_timeout = 60; 21385 21386 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21387 UIO_SYSSPACE, path_flag); 21388 21389 switch (status) { 21390 case 0: 21391 /* 21392 * sr_check_wp() uses 0x3f page code and check the header of 21393 * mode page to determine if target device is write-protected. 21394 * But some USB devices return 0 bytes for 0x3f page code. For 21395 * this case, make sure that mode page header is returned at 21396 * least. 21397 */ 21398 if (buflen - ucmd_buf.uscsi_resid < headlen) { 21399 status = EIO; 21400 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 21401 "mode page header is not returned"); 21402 } 21403 break; /* Success! */ 21404 case EIO: 21405 switch (ucmd_buf.uscsi_status) { 21406 case STATUS_RESERVATION_CONFLICT: 21407 status = EACCES; 21408 break; 21409 default: 21410 break; 21411 } 21412 break; 21413 default: 21414 break; 21415 } 21416 21417 if (status == 0) { 21418 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data", 21419 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21420 } 21421 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n"); 21422 21423 return (status); 21424 } 21425 21426 21427 /* 21428 * Function: sd_send_scsi_MODE_SELECT 21429 * 21430 * Description: Utility function for issuing a scsi MODE SELECT command. 21431 * Note: This routine uses a consistent implementation for Group0, 21432 * Group1, and Group2 commands across all platforms. ATAPI devices 21433 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 21434 * 21435 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21436 * structure for this target. 21437 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 21438 * CDB_GROUP[1|2] (10 byte). 21439 * bufaddr - buffer for page data retrieved from the target. 21440 * buflen - size of page to be retrieved. 21441 * save_page - boolean to determin if SP bit should be set. 21442 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 21443 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 21444 * to use the USCSI "direct" chain and bypass the normal 21445 * command waitq. 21446 * 21447 * Return Code: 0 - Success 21448 * errno return code from sd_ssc_send() 21449 * 21450 * Context: Can sleep. Does not return until command is completed. 21451 */ 21452 21453 static int 21454 sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr, 21455 size_t buflen, uchar_t save_page, int path_flag) 21456 { 21457 struct scsi_extended_sense sense_buf; 21458 union scsi_cdb cdb; 21459 struct uscsi_cmd ucmd_buf; 21460 int status; 21461 struct sd_lun *un; 21462 21463 ASSERT(ssc != NULL); 21464 un = ssc->ssc_un; 21465 ASSERT(un != NULL); 21466 ASSERT(!mutex_owned(SD_MUTEX(un))); 21467 ASSERT(bufaddr != NULL); 21468 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 21469 (cdbsize == CDB_GROUP2)); 21470 21471 SD_TRACE(SD_LOG_IO, un, 21472 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un); 21473 21474 bzero(&cdb, sizeof (cdb)); 21475 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21476 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21477 21478 /* Set the PF bit for many third party drives */ 21479 cdb.cdb_opaque[1] = 0x10; 21480 21481 /* Set the savepage(SP) bit if given */ 21482 if (save_page == SD_SAVE_PAGE) { 21483 cdb.cdb_opaque[1] |= 0x01; 21484 } 21485 21486 if (cdbsize == CDB_GROUP0) { 21487 cdb.scc_cmd = SCMD_MODE_SELECT; 21488 FORMG0COUNT(&cdb, buflen); 21489 } else { 21490 cdb.scc_cmd = SCMD_MODE_SELECT_G1; 21491 FORMG1COUNT(&cdb, buflen); 21492 } 21493 21494 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 21495 21496 ucmd_buf.uscsi_cdb = (char *)&cdb; 21497 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 21498 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 21499 ucmd_buf.uscsi_buflen = buflen; 21500 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21501 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21502 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 21503 ucmd_buf.uscsi_timeout = 60; 21504 21505 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21506 UIO_SYSSPACE, path_flag); 21507 21508 switch (status) { 21509 case 0: 21510 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21511 break; /* Success! */ 21512 case EIO: 21513 switch (ucmd_buf.uscsi_status) { 21514 case STATUS_RESERVATION_CONFLICT: 21515 status = EACCES; 21516 break; 21517 default: 21518 break; 21519 } 21520 break; 21521 default: 21522 break; 21523 } 21524 21525 if (status == 0) { 21526 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data", 21527 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21528 } 21529 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n"); 21530 21531 return (status); 21532 } 21533 21534 21535 /* 21536 * Function: sd_send_scsi_RDWR 21537 * 21538 * Description: Issue a scsi READ or WRITE command with the given parameters. 21539 * 21540 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21541 * structure for this target. 21542 * cmd: SCMD_READ or SCMD_WRITE 21543 * bufaddr: Address of caller's buffer to receive the RDWR data 21544 * buflen: Length of caller's buffer receive the RDWR data. 21545 * start_block: Block number for the start of the RDWR operation. 21546 * (Assumes target-native block size.) 21547 * residp: Pointer to variable to receive the redisual of the 21548 * RDWR operation (may be NULL of no residual requested). 21549 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 21550 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 21551 * to use the USCSI "direct" chain and bypass the normal 21552 * command waitq. 21553 * 21554 * Return Code: 0 - Success 21555 * errno return code from sd_ssc_send() 21556 * 21557 * Context: Can sleep. Does not return until command is completed. 21558 */ 21559 21560 static int 21561 sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr, 21562 size_t buflen, daddr_t start_block, int path_flag) 21563 { 21564 struct scsi_extended_sense sense_buf; 21565 union scsi_cdb cdb; 21566 struct uscsi_cmd ucmd_buf; 21567 uint32_t block_count; 21568 int status; 21569 int cdbsize; 21570 uchar_t flag; 21571 struct sd_lun *un; 21572 21573 ASSERT(ssc != NULL); 21574 un = ssc->ssc_un; 21575 ASSERT(un != NULL); 21576 ASSERT(!mutex_owned(SD_MUTEX(un))); 21577 ASSERT(bufaddr != NULL); 21578 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE)); 21579 21580 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un); 21581 21582 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 21583 return (EINVAL); 21584 } 21585 21586 mutex_enter(SD_MUTEX(un)); 21587 block_count = SD_BYTES2TGTBLOCKS(un, buflen); 21588 mutex_exit(SD_MUTEX(un)); 21589 21590 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE; 21591 21592 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: " 21593 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n", 21594 bufaddr, buflen, start_block, block_count); 21595 21596 bzero(&cdb, sizeof (cdb)); 21597 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21598 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21599 21600 /* Compute CDB size to use */ 21601 if (start_block > 0xffffffff) 21602 cdbsize = CDB_GROUP4; 21603 else if ((start_block & 0xFFE00000) || 21604 (un->un_f_cfg_is_atapi == TRUE)) 21605 cdbsize = CDB_GROUP1; 21606 else 21607 cdbsize = CDB_GROUP0; 21608 21609 switch (cdbsize) { 21610 case CDB_GROUP0: /* 6-byte CDBs */ 21611 cdb.scc_cmd = cmd; 21612 FORMG0ADDR(&cdb, start_block); 21613 FORMG0COUNT(&cdb, block_count); 21614 break; 21615 case CDB_GROUP1: /* 10-byte CDBs */ 21616 cdb.scc_cmd = cmd | SCMD_GROUP1; 21617 FORMG1ADDR(&cdb, start_block); 21618 FORMG1COUNT(&cdb, block_count); 21619 break; 21620 case CDB_GROUP4: /* 16-byte CDBs */ 21621 cdb.scc_cmd = cmd | SCMD_GROUP4; 21622 FORMG4LONGADDR(&cdb, (uint64_t)start_block); 21623 FORMG4COUNT(&cdb, block_count); 21624 break; 21625 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */ 21626 default: 21627 /* All others reserved */ 21628 return (EINVAL); 21629 } 21630 21631 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */ 21632 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 21633 21634 ucmd_buf.uscsi_cdb = (char *)&cdb; 21635 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 21636 ucmd_buf.uscsi_bufaddr = bufaddr; 21637 ucmd_buf.uscsi_buflen = buflen; 21638 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21639 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21640 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; 21641 ucmd_buf.uscsi_timeout = 60; 21642 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21643 UIO_SYSSPACE, path_flag); 21644 21645 switch (status) { 21646 case 0: 21647 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21648 break; /* Success! */ 21649 case EIO: 21650 switch (ucmd_buf.uscsi_status) { 21651 case STATUS_RESERVATION_CONFLICT: 21652 status = EACCES; 21653 break; 21654 default: 21655 break; 21656 } 21657 break; 21658 default: 21659 break; 21660 } 21661 21662 if (status == 0) { 21663 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data", 21664 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21665 } 21666 21667 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n"); 21668 21669 return (status); 21670 } 21671 21672 21673 /* 21674 * Function: sd_send_scsi_LOG_SENSE 21675 * 21676 * Description: Issue a scsi LOG_SENSE command with the given parameters. 21677 * 21678 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21679 * structure for this target. 21680 * 21681 * Return Code: 0 - Success 21682 * errno return code from sd_ssc_send() 21683 * 21684 * Context: Can sleep. Does not return until command is completed. 21685 */ 21686 21687 static int 21688 sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr, uint16_t buflen, 21689 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, 21690 int path_flag) 21691 21692 { 21693 struct scsi_extended_sense sense_buf; 21694 union scsi_cdb cdb; 21695 struct uscsi_cmd ucmd_buf; 21696 int status; 21697 struct sd_lun *un; 21698 21699 ASSERT(ssc != NULL); 21700 un = ssc->ssc_un; 21701 ASSERT(un != NULL); 21702 ASSERT(!mutex_owned(SD_MUTEX(un))); 21703 21704 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un); 21705 21706 bzero(&cdb, sizeof (cdb)); 21707 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21708 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21709 21710 cdb.scc_cmd = SCMD_LOG_SENSE_G1; 21711 cdb.cdb_opaque[2] = (page_control << 6) | page_code; 21712 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8); 21713 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF); 21714 FORMG1COUNT(&cdb, buflen); 21715 21716 ucmd_buf.uscsi_cdb = (char *)&cdb; 21717 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 21718 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 21719 ucmd_buf.uscsi_buflen = buflen; 21720 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21721 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21722 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 21723 ucmd_buf.uscsi_timeout = 60; 21724 21725 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21726 UIO_SYSSPACE, path_flag); 21727 21728 switch (status) { 21729 case 0: 21730 break; 21731 case EIO: 21732 switch (ucmd_buf.uscsi_status) { 21733 case STATUS_RESERVATION_CONFLICT: 21734 status = EACCES; 21735 break; 21736 case STATUS_CHECK: 21737 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 21738 (scsi_sense_key((uint8_t *)&sense_buf) == 21739 KEY_ILLEGAL_REQUEST) && 21740 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x24)) { 21741 /* 21742 * ASC 0x24: INVALID FIELD IN CDB 21743 */ 21744 switch (page_code) { 21745 case START_STOP_CYCLE_PAGE: 21746 /* 21747 * The start stop cycle counter is 21748 * implemented as page 0x31 in earlier 21749 * generation disks. In new generation 21750 * disks the start stop cycle counter is 21751 * implemented as page 0xE. To properly 21752 * handle this case if an attempt for 21753 * log page 0xE is made and fails we 21754 * will try again using page 0x31. 21755 * 21756 * Network storage BU committed to 21757 * maintain the page 0x31 for this 21758 * purpose and will not have any other 21759 * page implemented with page code 0x31 21760 * until all disks transition to the 21761 * standard page. 21762 */ 21763 mutex_enter(SD_MUTEX(un)); 21764 un->un_start_stop_cycle_page = 21765 START_STOP_CYCLE_VU_PAGE; 21766 cdb.cdb_opaque[2] = 21767 (char)(page_control << 6) | 21768 un->un_start_stop_cycle_page; 21769 mutex_exit(SD_MUTEX(un)); 21770 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 21771 status = sd_ssc_send( 21772 ssc, &ucmd_buf, FKIOCTL, 21773 UIO_SYSSPACE, path_flag); 21774 21775 break; 21776 case TEMPERATURE_PAGE: 21777 status = ENOTTY; 21778 break; 21779 default: 21780 break; 21781 } 21782 } 21783 break; 21784 default: 21785 break; 21786 } 21787 break; 21788 default: 21789 break; 21790 } 21791 21792 if (status == 0) { 21793 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21794 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data", 21795 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21796 } 21797 21798 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n"); 21799 21800 return (status); 21801 } 21802 21803 21804 /* 21805 * Function: sdioctl 21806 * 21807 * Description: Driver's ioctl(9e) entry point function. 21808 * 21809 * Arguments: dev - device number 21810 * cmd - ioctl operation to be performed 21811 * arg - user argument, contains data to be set or reference 21812 * parameter for get 21813 * flag - bit flag, indicating open settings, 32/64 bit type 21814 * cred_p - user credential pointer 21815 * rval_p - calling process return value (OPT) 21816 * 21817 * Return Code: EINVAL 21818 * ENOTTY 21819 * ENXIO 21820 * EIO 21821 * EFAULT 21822 * ENOTSUP 21823 * EPERM 21824 * 21825 * Context: Called from the device switch at normal priority. 21826 */ 21827 21828 static int 21829 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) 21830 { 21831 struct sd_lun *un = NULL; 21832 int err = 0; 21833 int i = 0; 21834 cred_t *cr; 21835 int tmprval = EINVAL; 21836 boolean_t is_valid; 21837 sd_ssc_t *ssc; 21838 21839 /* 21840 * All device accesses go thru sdstrategy where we check on suspend 21841 * status 21842 */ 21843 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21844 return (ENXIO); 21845 } 21846 21847 ASSERT(!mutex_owned(SD_MUTEX(un))); 21848 21849 /* Initialize sd_ssc_t for internal uscsi commands */ 21850 ssc = sd_ssc_init(un); 21851 21852 is_valid = SD_IS_VALID_LABEL(un); 21853 21854 /* 21855 * Moved this wait from sd_uscsi_strategy to here for 21856 * reasons of deadlock prevention. Internal driver commands, 21857 * specifically those to change a devices power level, result 21858 * in a call to sd_uscsi_strategy. 21859 */ 21860 mutex_enter(SD_MUTEX(un)); 21861 while ((un->un_state == SD_STATE_SUSPENDED) || 21862 (un->un_state == SD_STATE_PM_CHANGING)) { 21863 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 21864 } 21865 /* 21866 * Twiddling the counter here protects commands from now 21867 * through to the top of sd_uscsi_strategy. Without the 21868 * counter inc. a power down, for example, could get in 21869 * after the above check for state is made and before 21870 * execution gets to the top of sd_uscsi_strategy. 21871 * That would cause problems. 21872 */ 21873 un->un_ncmds_in_driver++; 21874 21875 if (!is_valid && 21876 (flag & (FNDELAY | FNONBLOCK))) { 21877 switch (cmd) { 21878 case DKIOCGGEOM: /* SD_PATH_DIRECT */ 21879 case DKIOCGVTOC: 21880 case DKIOCGEXTVTOC: 21881 case DKIOCGAPART: 21882 case DKIOCPARTINFO: 21883 case DKIOCEXTPARTINFO: 21884 case DKIOCSGEOM: 21885 case DKIOCSAPART: 21886 case DKIOCGETEFI: 21887 case DKIOCPARTITION: 21888 case DKIOCSVTOC: 21889 case DKIOCSEXTVTOC: 21890 case DKIOCSETEFI: 21891 case DKIOCGMBOOT: 21892 case DKIOCSMBOOT: 21893 case DKIOCG_PHYGEOM: 21894 case DKIOCG_VIRTGEOM: 21895 #if defined(__i386) || defined(__amd64) 21896 case DKIOCSETEXTPART: 21897 #endif 21898 /* let cmlb handle it */ 21899 goto skip_ready_valid; 21900 21901 case CDROMPAUSE: 21902 case CDROMRESUME: 21903 case CDROMPLAYMSF: 21904 case CDROMPLAYTRKIND: 21905 case CDROMREADTOCHDR: 21906 case CDROMREADTOCENTRY: 21907 case CDROMSTOP: 21908 case CDROMSTART: 21909 case CDROMVOLCTRL: 21910 case CDROMSUBCHNL: 21911 case CDROMREADMODE2: 21912 case CDROMREADMODE1: 21913 case CDROMREADOFFSET: 21914 case CDROMSBLKMODE: 21915 case CDROMGBLKMODE: 21916 case CDROMGDRVSPEED: 21917 case CDROMSDRVSPEED: 21918 case CDROMCDDA: 21919 case CDROMCDXA: 21920 case CDROMSUBCODE: 21921 if (!ISCD(un)) { 21922 un->un_ncmds_in_driver--; 21923 ASSERT(un->un_ncmds_in_driver >= 0); 21924 mutex_exit(SD_MUTEX(un)); 21925 err = ENOTTY; 21926 goto done_without_assess; 21927 } 21928 break; 21929 case FDEJECT: 21930 case DKIOCEJECT: 21931 case CDROMEJECT: 21932 if (!un->un_f_eject_media_supported) { 21933 un->un_ncmds_in_driver--; 21934 ASSERT(un->un_ncmds_in_driver >= 0); 21935 mutex_exit(SD_MUTEX(un)); 21936 err = ENOTTY; 21937 goto done_without_assess; 21938 } 21939 break; 21940 case DKIOCFLUSHWRITECACHE: 21941 mutex_exit(SD_MUTEX(un)); 21942 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 21943 if (err != 0) { 21944 mutex_enter(SD_MUTEX(un)); 21945 un->un_ncmds_in_driver--; 21946 ASSERT(un->un_ncmds_in_driver >= 0); 21947 mutex_exit(SD_MUTEX(un)); 21948 err = EIO; 21949 goto done_quick_assess; 21950 } 21951 mutex_enter(SD_MUTEX(un)); 21952 /* FALLTHROUGH */ 21953 case DKIOCREMOVABLE: 21954 case DKIOCHOTPLUGGABLE: 21955 case DKIOCINFO: 21956 case DKIOCGMEDIAINFO: 21957 case DKIOCGMEDIAINFOEXT: 21958 case MHIOCENFAILFAST: 21959 case MHIOCSTATUS: 21960 case MHIOCTKOWN: 21961 case MHIOCRELEASE: 21962 case MHIOCGRP_INKEYS: 21963 case MHIOCGRP_INRESV: 21964 case MHIOCGRP_REGISTER: 21965 case MHIOCGRP_RESERVE: 21966 case MHIOCGRP_PREEMPTANDABORT: 21967 case MHIOCGRP_REGISTERANDIGNOREKEY: 21968 case CDROMCLOSETRAY: 21969 case USCSICMD: 21970 goto skip_ready_valid; 21971 default: 21972 break; 21973 } 21974 21975 mutex_exit(SD_MUTEX(un)); 21976 err = sd_ready_and_valid(ssc, SDPART(dev)); 21977 mutex_enter(SD_MUTEX(un)); 21978 21979 if (err != SD_READY_VALID) { 21980 switch (cmd) { 21981 case DKIOCSTATE: 21982 case CDROMGDRVSPEED: 21983 case CDROMSDRVSPEED: 21984 case FDEJECT: /* for eject command */ 21985 case DKIOCEJECT: 21986 case CDROMEJECT: 21987 case DKIOCREMOVABLE: 21988 case DKIOCHOTPLUGGABLE: 21989 break; 21990 default: 21991 if (un->un_f_has_removable_media) { 21992 err = ENXIO; 21993 } else { 21994 /* Do not map SD_RESERVED_BY_OTHERS to EIO */ 21995 if (err == SD_RESERVED_BY_OTHERS) { 21996 err = EACCES; 21997 } else { 21998 err = EIO; 21999 } 22000 } 22001 un->un_ncmds_in_driver--; 22002 ASSERT(un->un_ncmds_in_driver >= 0); 22003 mutex_exit(SD_MUTEX(un)); 22004 22005 goto done_without_assess; 22006 } 22007 } 22008 } 22009 22010 skip_ready_valid: 22011 mutex_exit(SD_MUTEX(un)); 22012 22013 switch (cmd) { 22014 case DKIOCINFO: 22015 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n"); 22016 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag); 22017 break; 22018 22019 case DKIOCGMEDIAINFO: 22020 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n"); 22021 err = sd_get_media_info(dev, (caddr_t)arg, flag); 22022 break; 22023 22024 case DKIOCGMEDIAINFOEXT: 22025 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFOEXT\n"); 22026 err = sd_get_media_info_ext(dev, (caddr_t)arg, flag); 22027 break; 22028 22029 case DKIOCGGEOM: 22030 case DKIOCGVTOC: 22031 case DKIOCGEXTVTOC: 22032 case DKIOCGAPART: 22033 case DKIOCPARTINFO: 22034 case DKIOCEXTPARTINFO: 22035 case DKIOCSGEOM: 22036 case DKIOCSAPART: 22037 case DKIOCGETEFI: 22038 case DKIOCPARTITION: 22039 case DKIOCSVTOC: 22040 case DKIOCSEXTVTOC: 22041 case DKIOCSETEFI: 22042 case DKIOCGMBOOT: 22043 case DKIOCSMBOOT: 22044 case DKIOCG_PHYGEOM: 22045 case DKIOCG_VIRTGEOM: 22046 #if defined(__i386) || defined(__amd64) 22047 case DKIOCSETEXTPART: 22048 #endif 22049 SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd); 22050 22051 /* TUR should spin up */ 22052 22053 if (un->un_f_has_removable_media) 22054 err = sd_send_scsi_TEST_UNIT_READY(ssc, 22055 SD_CHECK_FOR_MEDIA); 22056 22057 else 22058 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 22059 22060 if (err != 0) 22061 goto done_with_assess; 22062 22063 err = cmlb_ioctl(un->un_cmlbhandle, dev, 22064 cmd, arg, flag, cred_p, rval_p, (void *)SD_PATH_DIRECT); 22065 22066 if ((err == 0) && 22067 ((cmd == DKIOCSETEFI) || 22068 (un->un_f_pkstats_enabled) && 22069 (cmd == DKIOCSAPART || cmd == DKIOCSVTOC || 22070 cmd == DKIOCSEXTVTOC))) { 22071 22072 tmprval = cmlb_validate(un->un_cmlbhandle, CMLB_SILENT, 22073 (void *)SD_PATH_DIRECT); 22074 if ((tmprval == 0) && un->un_f_pkstats_enabled) { 22075 sd_set_pstats(un); 22076 SD_TRACE(SD_LOG_IO_PARTITION, un, 22077 "sd_ioctl: un:0x%p pstats created and " 22078 "set\n", un); 22079 } 22080 } 22081 22082 if ((cmd == DKIOCSVTOC || cmd == DKIOCSEXTVTOC) || 22083 ((cmd == DKIOCSETEFI) && (tmprval == 0))) { 22084 22085 mutex_enter(SD_MUTEX(un)); 22086 if (un->un_f_devid_supported && 22087 (un->un_f_opt_fab_devid == TRUE)) { 22088 if (un->un_devid == NULL) { 22089 sd_register_devid(ssc, SD_DEVINFO(un), 22090 SD_TARGET_IS_UNRESERVED); 22091 } else { 22092 /* 22093 * The device id for this disk 22094 * has been fabricated. The 22095 * device id must be preserved 22096 * by writing it back out to 22097 * disk. 22098 */ 22099 if (sd_write_deviceid(ssc) != 0) { 22100 ddi_devid_free(un->un_devid); 22101 un->un_devid = NULL; 22102 } 22103 } 22104 } 22105 mutex_exit(SD_MUTEX(un)); 22106 } 22107 22108 break; 22109 22110 case DKIOCLOCK: 22111 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n"); 22112 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 22113 SD_PATH_STANDARD); 22114 goto done_with_assess; 22115 22116 case DKIOCUNLOCK: 22117 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n"); 22118 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW, 22119 SD_PATH_STANDARD); 22120 goto done_with_assess; 22121 22122 case DKIOCSTATE: { 22123 enum dkio_state state; 22124 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n"); 22125 22126 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) { 22127 err = EFAULT; 22128 } else { 22129 err = sd_check_media(dev, state); 22130 if (err == 0) { 22131 if (ddi_copyout(&un->un_mediastate, (void *)arg, 22132 sizeof (int), flag) != 0) 22133 err = EFAULT; 22134 } 22135 } 22136 break; 22137 } 22138 22139 case DKIOCREMOVABLE: 22140 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n"); 22141 i = un->un_f_has_removable_media ? 1 : 0; 22142 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 22143 err = EFAULT; 22144 } else { 22145 err = 0; 22146 } 22147 break; 22148 22149 case DKIOCHOTPLUGGABLE: 22150 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCHOTPLUGGABLE\n"); 22151 i = un->un_f_is_hotpluggable ? 1 : 0; 22152 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 22153 err = EFAULT; 22154 } else { 22155 err = 0; 22156 } 22157 break; 22158 22159 case DKIOCGTEMPERATURE: 22160 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n"); 22161 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag); 22162 break; 22163 22164 case MHIOCENFAILFAST: 22165 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n"); 22166 if ((err = drv_priv(cred_p)) == 0) { 22167 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag); 22168 } 22169 break; 22170 22171 case MHIOCTKOWN: 22172 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n"); 22173 if ((err = drv_priv(cred_p)) == 0) { 22174 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag); 22175 } 22176 break; 22177 22178 case MHIOCRELEASE: 22179 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n"); 22180 if ((err = drv_priv(cred_p)) == 0) { 22181 err = sd_mhdioc_release(dev); 22182 } 22183 break; 22184 22185 case MHIOCSTATUS: 22186 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n"); 22187 if ((err = drv_priv(cred_p)) == 0) { 22188 switch (sd_send_scsi_TEST_UNIT_READY(ssc, 0)) { 22189 case 0: 22190 err = 0; 22191 break; 22192 case EACCES: 22193 *rval_p = 1; 22194 err = 0; 22195 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22196 break; 22197 default: 22198 err = EIO; 22199 goto done_with_assess; 22200 } 22201 } 22202 break; 22203 22204 case MHIOCQRESERVE: 22205 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n"); 22206 if ((err = drv_priv(cred_p)) == 0) { 22207 err = sd_reserve_release(dev, SD_RESERVE); 22208 } 22209 break; 22210 22211 case MHIOCREREGISTERDEVID: 22212 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n"); 22213 if (drv_priv(cred_p) == EPERM) { 22214 err = EPERM; 22215 } else if (!un->un_f_devid_supported) { 22216 err = ENOTTY; 22217 } else { 22218 err = sd_mhdioc_register_devid(dev); 22219 } 22220 break; 22221 22222 case MHIOCGRP_INKEYS: 22223 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n"); 22224 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 22225 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22226 err = ENOTSUP; 22227 } else { 22228 err = sd_mhdioc_inkeys(dev, (caddr_t)arg, 22229 flag); 22230 } 22231 } 22232 break; 22233 22234 case MHIOCGRP_INRESV: 22235 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n"); 22236 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 22237 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22238 err = ENOTSUP; 22239 } else { 22240 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag); 22241 } 22242 } 22243 break; 22244 22245 case MHIOCGRP_REGISTER: 22246 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n"); 22247 if ((err = drv_priv(cred_p)) != EPERM) { 22248 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22249 err = ENOTSUP; 22250 } else if (arg != NULL) { 22251 mhioc_register_t reg; 22252 if (ddi_copyin((void *)arg, ®, 22253 sizeof (mhioc_register_t), flag) != 0) { 22254 err = EFAULT; 22255 } else { 22256 err = 22257 sd_send_scsi_PERSISTENT_RESERVE_OUT( 22258 ssc, SD_SCSI3_REGISTER, 22259 (uchar_t *)®); 22260 if (err != 0) 22261 goto done_with_assess; 22262 } 22263 } 22264 } 22265 break; 22266 22267 case MHIOCGRP_RESERVE: 22268 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n"); 22269 if ((err = drv_priv(cred_p)) != EPERM) { 22270 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22271 err = ENOTSUP; 22272 } else if (arg != NULL) { 22273 mhioc_resv_desc_t resv_desc; 22274 if (ddi_copyin((void *)arg, &resv_desc, 22275 sizeof (mhioc_resv_desc_t), flag) != 0) { 22276 err = EFAULT; 22277 } else { 22278 err = 22279 sd_send_scsi_PERSISTENT_RESERVE_OUT( 22280 ssc, SD_SCSI3_RESERVE, 22281 (uchar_t *)&resv_desc); 22282 if (err != 0) 22283 goto done_with_assess; 22284 } 22285 } 22286 } 22287 break; 22288 22289 case MHIOCGRP_PREEMPTANDABORT: 22290 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 22291 if ((err = drv_priv(cred_p)) != EPERM) { 22292 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22293 err = ENOTSUP; 22294 } else if (arg != NULL) { 22295 mhioc_preemptandabort_t preempt_abort; 22296 if (ddi_copyin((void *)arg, &preempt_abort, 22297 sizeof (mhioc_preemptandabort_t), 22298 flag) != 0) { 22299 err = EFAULT; 22300 } else { 22301 err = 22302 sd_send_scsi_PERSISTENT_RESERVE_OUT( 22303 ssc, SD_SCSI3_PREEMPTANDABORT, 22304 (uchar_t *)&preempt_abort); 22305 if (err != 0) 22306 goto done_with_assess; 22307 } 22308 } 22309 } 22310 break; 22311 22312 case MHIOCGRP_REGISTERANDIGNOREKEY: 22313 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTERANDIGNOREKEY\n"); 22314 if ((err = drv_priv(cred_p)) != EPERM) { 22315 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22316 err = ENOTSUP; 22317 } else if (arg != NULL) { 22318 mhioc_registerandignorekey_t r_and_i; 22319 if (ddi_copyin((void *)arg, (void *)&r_and_i, 22320 sizeof (mhioc_registerandignorekey_t), 22321 flag) != 0) { 22322 err = EFAULT; 22323 } else { 22324 err = 22325 sd_send_scsi_PERSISTENT_RESERVE_OUT( 22326 ssc, SD_SCSI3_REGISTERANDIGNOREKEY, 22327 (uchar_t *)&r_and_i); 22328 if (err != 0) 22329 goto done_with_assess; 22330 } 22331 } 22332 } 22333 break; 22334 22335 case USCSICMD: 22336 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n"); 22337 cr = ddi_get_cred(); 22338 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 22339 err = EPERM; 22340 } else { 22341 enum uio_seg uioseg; 22342 22343 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE : 22344 UIO_USERSPACE; 22345 if (un->un_f_format_in_progress == TRUE) { 22346 err = EAGAIN; 22347 break; 22348 } 22349 22350 err = sd_ssc_send(ssc, 22351 (struct uscsi_cmd *)arg, 22352 flag, uioseg, SD_PATH_STANDARD); 22353 if (err != 0) 22354 goto done_with_assess; 22355 else 22356 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 22357 } 22358 break; 22359 22360 case CDROMPAUSE: 22361 case CDROMRESUME: 22362 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n"); 22363 if (!ISCD(un)) { 22364 err = ENOTTY; 22365 } else { 22366 err = sr_pause_resume(dev, cmd); 22367 } 22368 break; 22369 22370 case CDROMPLAYMSF: 22371 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n"); 22372 if (!ISCD(un)) { 22373 err = ENOTTY; 22374 } else { 22375 err = sr_play_msf(dev, (caddr_t)arg, flag); 22376 } 22377 break; 22378 22379 case CDROMPLAYTRKIND: 22380 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n"); 22381 #if defined(__i386) || defined(__amd64) 22382 /* 22383 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead 22384 */ 22385 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 22386 #else 22387 if (!ISCD(un)) { 22388 #endif 22389 err = ENOTTY; 22390 } else { 22391 err = sr_play_trkind(dev, (caddr_t)arg, flag); 22392 } 22393 break; 22394 22395 case CDROMREADTOCHDR: 22396 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n"); 22397 if (!ISCD(un)) { 22398 err = ENOTTY; 22399 } else { 22400 err = sr_read_tochdr(dev, (caddr_t)arg, flag); 22401 } 22402 break; 22403 22404 case CDROMREADTOCENTRY: 22405 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n"); 22406 if (!ISCD(un)) { 22407 err = ENOTTY; 22408 } else { 22409 err = sr_read_tocentry(dev, (caddr_t)arg, flag); 22410 } 22411 break; 22412 22413 case CDROMSTOP: 22414 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n"); 22415 if (!ISCD(un)) { 22416 err = ENOTTY; 22417 } else { 22418 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 22419 SD_TARGET_STOP, SD_PATH_STANDARD); 22420 goto done_with_assess; 22421 } 22422 break; 22423 22424 case CDROMSTART: 22425 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n"); 22426 if (!ISCD(un)) { 22427 err = ENOTTY; 22428 } else { 22429 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 22430 SD_TARGET_START, SD_PATH_STANDARD); 22431 goto done_with_assess; 22432 } 22433 break; 22434 22435 case CDROMCLOSETRAY: 22436 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n"); 22437 if (!ISCD(un)) { 22438 err = ENOTTY; 22439 } else { 22440 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 22441 SD_TARGET_CLOSE, SD_PATH_STANDARD); 22442 goto done_with_assess; 22443 } 22444 break; 22445 22446 case FDEJECT: /* for eject command */ 22447 case DKIOCEJECT: 22448 case CDROMEJECT: 22449 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n"); 22450 if (!un->un_f_eject_media_supported) { 22451 err = ENOTTY; 22452 } else { 22453 err = sr_eject(dev); 22454 } 22455 break; 22456 22457 case CDROMVOLCTRL: 22458 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n"); 22459 if (!ISCD(un)) { 22460 err = ENOTTY; 22461 } else { 22462 err = sr_volume_ctrl(dev, (caddr_t)arg, flag); 22463 } 22464 break; 22465 22466 case CDROMSUBCHNL: 22467 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n"); 22468 if (!ISCD(un)) { 22469 err = ENOTTY; 22470 } else { 22471 err = sr_read_subchannel(dev, (caddr_t)arg, flag); 22472 } 22473 break; 22474 22475 case CDROMREADMODE2: 22476 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n"); 22477 if (!ISCD(un)) { 22478 err = ENOTTY; 22479 } else if (un->un_f_cfg_is_atapi == TRUE) { 22480 /* 22481 * If the drive supports READ CD, use that instead of 22482 * switching the LBA size via a MODE SELECT 22483 * Block Descriptor 22484 */ 22485 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag); 22486 } else { 22487 err = sr_read_mode2(dev, (caddr_t)arg, flag); 22488 } 22489 break; 22490 22491 case CDROMREADMODE1: 22492 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n"); 22493 if (!ISCD(un)) { 22494 err = ENOTTY; 22495 } else { 22496 err = sr_read_mode1(dev, (caddr_t)arg, flag); 22497 } 22498 break; 22499 22500 case CDROMREADOFFSET: 22501 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n"); 22502 if (!ISCD(un)) { 22503 err = ENOTTY; 22504 } else { 22505 err = sr_read_sony_session_offset(dev, (caddr_t)arg, 22506 flag); 22507 } 22508 break; 22509 22510 case CDROMSBLKMODE: 22511 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n"); 22512 /* 22513 * There is no means of changing block size in case of atapi 22514 * drives, thus return ENOTTY if drive type is atapi 22515 */ 22516 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 22517 err = ENOTTY; 22518 } else if (un->un_f_mmc_cap == TRUE) { 22519 22520 /* 22521 * MMC Devices do not support changing the 22522 * logical block size 22523 * 22524 * Note: EINVAL is being returned instead of ENOTTY to 22525 * maintain consistancy with the original mmc 22526 * driver update. 22527 */ 22528 err = EINVAL; 22529 } else { 22530 mutex_enter(SD_MUTEX(un)); 22531 if ((!(un->un_exclopen & (1<<SDPART(dev)))) || 22532 (un->un_ncmds_in_transport > 0)) { 22533 mutex_exit(SD_MUTEX(un)); 22534 err = EINVAL; 22535 } else { 22536 mutex_exit(SD_MUTEX(un)); 22537 err = sr_change_blkmode(dev, cmd, arg, flag); 22538 } 22539 } 22540 break; 22541 22542 case CDROMGBLKMODE: 22543 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n"); 22544 if (!ISCD(un)) { 22545 err = ENOTTY; 22546 } else if ((un->un_f_cfg_is_atapi != FALSE) && 22547 (un->un_f_blockcount_is_valid != FALSE)) { 22548 /* 22549 * Drive is an ATAPI drive so return target block 22550 * size for ATAPI drives since we cannot change the 22551 * blocksize on ATAPI drives. Used primarily to detect 22552 * if an ATAPI cdrom is present. 22553 */ 22554 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg, 22555 sizeof (int), flag) != 0) { 22556 err = EFAULT; 22557 } else { 22558 err = 0; 22559 } 22560 22561 } else { 22562 /* 22563 * Drive supports changing block sizes via a Mode 22564 * Select. 22565 */ 22566 err = sr_change_blkmode(dev, cmd, arg, flag); 22567 } 22568 break; 22569 22570 case CDROMGDRVSPEED: 22571 case CDROMSDRVSPEED: 22572 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n"); 22573 if (!ISCD(un)) { 22574 err = ENOTTY; 22575 } else if (un->un_f_mmc_cap == TRUE) { 22576 /* 22577 * Note: In the future the driver implementation 22578 * for getting and 22579 * setting cd speed should entail: 22580 * 1) If non-mmc try the Toshiba mode page 22581 * (sr_change_speed) 22582 * 2) If mmc but no support for Real Time Streaming try 22583 * the SET CD SPEED (0xBB) command 22584 * (sr_atapi_change_speed) 22585 * 3) If mmc and support for Real Time Streaming 22586 * try the GET PERFORMANCE and SET STREAMING 22587 * commands (not yet implemented, 4380808) 22588 */ 22589 /* 22590 * As per recent MMC spec, CD-ROM speed is variable 22591 * and changes with LBA. Since there is no such 22592 * things as drive speed now, fail this ioctl. 22593 * 22594 * Note: EINVAL is returned for consistancy of original 22595 * implementation which included support for getting 22596 * the drive speed of mmc devices but not setting 22597 * the drive speed. Thus EINVAL would be returned 22598 * if a set request was made for an mmc device. 22599 * We no longer support get or set speed for 22600 * mmc but need to remain consistent with regard 22601 * to the error code returned. 22602 */ 22603 err = EINVAL; 22604 } else if (un->un_f_cfg_is_atapi == TRUE) { 22605 err = sr_atapi_change_speed(dev, cmd, arg, flag); 22606 } else { 22607 err = sr_change_speed(dev, cmd, arg, flag); 22608 } 22609 break; 22610 22611 case CDROMCDDA: 22612 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n"); 22613 if (!ISCD(un)) { 22614 err = ENOTTY; 22615 } else { 22616 err = sr_read_cdda(dev, (void *)arg, flag); 22617 } 22618 break; 22619 22620 case CDROMCDXA: 22621 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n"); 22622 if (!ISCD(un)) { 22623 err = ENOTTY; 22624 } else { 22625 err = sr_read_cdxa(dev, (caddr_t)arg, flag); 22626 } 22627 break; 22628 22629 case CDROMSUBCODE: 22630 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n"); 22631 if (!ISCD(un)) { 22632 err = ENOTTY; 22633 } else { 22634 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag); 22635 } 22636 break; 22637 22638 22639 #ifdef SDDEBUG 22640 /* RESET/ABORTS testing ioctls */ 22641 case DKIOCRESET: { 22642 int reset_level; 22643 22644 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) { 22645 err = EFAULT; 22646 } else { 22647 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: " 22648 "reset_level = 0x%lx\n", reset_level); 22649 if (scsi_reset(SD_ADDRESS(un), reset_level)) { 22650 err = 0; 22651 } else { 22652 err = EIO; 22653 } 22654 } 22655 break; 22656 } 22657 22658 case DKIOCABORT: 22659 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n"); 22660 if (scsi_abort(SD_ADDRESS(un), NULL)) { 22661 err = 0; 22662 } else { 22663 err = EIO; 22664 } 22665 break; 22666 #endif 22667 22668 #ifdef SD_FAULT_INJECTION 22669 /* SDIOC FaultInjection testing ioctls */ 22670 case SDIOCSTART: 22671 case SDIOCSTOP: 22672 case SDIOCINSERTPKT: 22673 case SDIOCINSERTXB: 22674 case SDIOCINSERTUN: 22675 case SDIOCINSERTARQ: 22676 case SDIOCPUSH: 22677 case SDIOCRETRIEVE: 22678 case SDIOCRUN: 22679 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" 22680 "SDIOC detected cmd:0x%X:\n", cmd); 22681 /* call error generator */ 22682 sd_faultinjection_ioctl(cmd, arg, un); 22683 err = 0; 22684 break; 22685 22686 #endif /* SD_FAULT_INJECTION */ 22687 22688 case DKIOCFLUSHWRITECACHE: 22689 { 22690 struct dk_callback *dkc = (struct dk_callback *)arg; 22691 22692 mutex_enter(SD_MUTEX(un)); 22693 if (!un->un_f_sync_cache_supported || 22694 !un->un_f_write_cache_enabled) { 22695 err = un->un_f_sync_cache_supported ? 22696 0 : ENOTSUP; 22697 mutex_exit(SD_MUTEX(un)); 22698 if ((flag & FKIOCTL) && dkc != NULL && 22699 dkc->dkc_callback != NULL) { 22700 (*dkc->dkc_callback)(dkc->dkc_cookie, 22701 err); 22702 /* 22703 * Did callback and reported error. 22704 * Since we did a callback, ioctl 22705 * should return 0. 22706 */ 22707 err = 0; 22708 } 22709 break; 22710 } 22711 mutex_exit(SD_MUTEX(un)); 22712 22713 if ((flag & FKIOCTL) && dkc != NULL && 22714 dkc->dkc_callback != NULL) { 22715 /* async SYNC CACHE request */ 22716 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 22717 } else { 22718 /* synchronous SYNC CACHE request */ 22719 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); 22720 } 22721 } 22722 break; 22723 22724 case DKIOCGETWCE: { 22725 22726 int wce; 22727 22728 if ((err = sd_get_write_cache_enabled(ssc, &wce)) != 0) { 22729 break; 22730 } 22731 22732 if (ddi_copyout(&wce, (void *)arg, sizeof (wce), flag)) { 22733 err = EFAULT; 22734 } 22735 break; 22736 } 22737 22738 case DKIOCSETWCE: { 22739 22740 int wce, sync_supported; 22741 22742 if (ddi_copyin((void *)arg, &wce, sizeof (wce), flag)) { 22743 err = EFAULT; 22744 break; 22745 } 22746 22747 /* 22748 * Synchronize multiple threads trying to enable 22749 * or disable the cache via the un_f_wcc_cv 22750 * condition variable. 22751 */ 22752 mutex_enter(SD_MUTEX(un)); 22753 22754 /* 22755 * Don't allow the cache to be enabled if the 22756 * config file has it disabled. 22757 */ 22758 if (un->un_f_opt_disable_cache && wce) { 22759 mutex_exit(SD_MUTEX(un)); 22760 err = EINVAL; 22761 break; 22762 } 22763 22764 /* 22765 * Wait for write cache change in progress 22766 * bit to be clear before proceeding. 22767 */ 22768 while (un->un_f_wcc_inprog) 22769 cv_wait(&un->un_wcc_cv, SD_MUTEX(un)); 22770 22771 un->un_f_wcc_inprog = 1; 22772 22773 if (un->un_f_write_cache_enabled && wce == 0) { 22774 /* 22775 * Disable the write cache. Don't clear 22776 * un_f_write_cache_enabled until after 22777 * the mode select and flush are complete. 22778 */ 22779 sync_supported = un->un_f_sync_cache_supported; 22780 22781 /* 22782 * If cache flush is suppressed, we assume that the 22783 * controller firmware will take care of managing the 22784 * write cache for us: no need to explicitly 22785 * disable it. 22786 */ 22787 if (!un->un_f_suppress_cache_flush) { 22788 mutex_exit(SD_MUTEX(un)); 22789 if ((err = sd_cache_control(ssc, 22790 SD_CACHE_NOCHANGE, 22791 SD_CACHE_DISABLE)) == 0 && 22792 sync_supported) { 22793 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, 22794 NULL); 22795 } 22796 } else { 22797 mutex_exit(SD_MUTEX(un)); 22798 } 22799 22800 mutex_enter(SD_MUTEX(un)); 22801 if (err == 0) { 22802 un->un_f_write_cache_enabled = 0; 22803 } 22804 22805 } else if (!un->un_f_write_cache_enabled && wce != 0) { 22806 /* 22807 * Set un_f_write_cache_enabled first, so there is 22808 * no window where the cache is enabled, but the 22809 * bit says it isn't. 22810 */ 22811 un->un_f_write_cache_enabled = 1; 22812 22813 /* 22814 * If cache flush is suppressed, we assume that the 22815 * controller firmware will take care of managing the 22816 * write cache for us: no need to explicitly 22817 * enable it. 22818 */ 22819 if (!un->un_f_suppress_cache_flush) { 22820 mutex_exit(SD_MUTEX(un)); 22821 err = sd_cache_control(ssc, SD_CACHE_NOCHANGE, 22822 SD_CACHE_ENABLE); 22823 } else { 22824 mutex_exit(SD_MUTEX(un)); 22825 } 22826 22827 mutex_enter(SD_MUTEX(un)); 22828 22829 if (err) { 22830 un->un_f_write_cache_enabled = 0; 22831 } 22832 } 22833 22834 un->un_f_wcc_inprog = 0; 22835 cv_broadcast(&un->un_wcc_cv); 22836 mutex_exit(SD_MUTEX(un)); 22837 break; 22838 } 22839 22840 default: 22841 err = ENOTTY; 22842 break; 22843 } 22844 mutex_enter(SD_MUTEX(un)); 22845 un->un_ncmds_in_driver--; 22846 ASSERT(un->un_ncmds_in_driver >= 0); 22847 mutex_exit(SD_MUTEX(un)); 22848 22849 22850 done_without_assess: 22851 sd_ssc_fini(ssc); 22852 22853 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 22854 return (err); 22855 22856 done_with_assess: 22857 mutex_enter(SD_MUTEX(un)); 22858 un->un_ncmds_in_driver--; 22859 ASSERT(un->un_ncmds_in_driver >= 0); 22860 mutex_exit(SD_MUTEX(un)); 22861 22862 done_quick_assess: 22863 if (err != 0) 22864 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22865 /* Uninitialize sd_ssc_t pointer */ 22866 sd_ssc_fini(ssc); 22867 22868 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 22869 return (err); 22870 } 22871 22872 22873 /* 22874 * Function: sd_dkio_ctrl_info 22875 * 22876 * Description: This routine is the driver entry point for handling controller 22877 * information ioctl requests (DKIOCINFO). 22878 * 22879 * Arguments: dev - the device number 22880 * arg - pointer to user provided dk_cinfo structure 22881 * specifying the controller type and attributes. 22882 * flag - this argument is a pass through to ddi_copyxxx() 22883 * directly from the mode argument of ioctl(). 22884 * 22885 * Return Code: 0 22886 * EFAULT 22887 * ENXIO 22888 */ 22889 22890 static int 22891 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag) 22892 { 22893 struct sd_lun *un = NULL; 22894 struct dk_cinfo *info; 22895 dev_info_t *pdip; 22896 int lun, tgt; 22897 22898 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22899 return (ENXIO); 22900 } 22901 22902 info = (struct dk_cinfo *) 22903 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); 22904 22905 switch (un->un_ctype) { 22906 case CTYPE_CDROM: 22907 info->dki_ctype = DKC_CDROM; 22908 break; 22909 default: 22910 info->dki_ctype = DKC_SCSI_CCS; 22911 break; 22912 } 22913 pdip = ddi_get_parent(SD_DEVINFO(un)); 22914 info->dki_cnum = ddi_get_instance(pdip); 22915 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) { 22916 (void) strcpy(info->dki_cname, ddi_get_name(pdip)); 22917 } else { 22918 (void) strncpy(info->dki_cname, ddi_node_name(pdip), 22919 DK_DEVLEN - 1); 22920 } 22921 22922 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 22923 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0); 22924 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 22925 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0); 22926 22927 /* Unit Information */ 22928 info->dki_unit = ddi_get_instance(SD_DEVINFO(un)); 22929 info->dki_slave = ((tgt << 3) | lun); 22930 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)), 22931 DK_DEVLEN - 1); 22932 info->dki_flags = DKI_FMTVOL; 22933 info->dki_partition = SDPART(dev); 22934 22935 /* Max Transfer size of this device in blocks */ 22936 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize; 22937 info->dki_addr = 0; 22938 info->dki_space = 0; 22939 info->dki_prio = 0; 22940 info->dki_vec = 0; 22941 22942 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) { 22943 kmem_free(info, sizeof (struct dk_cinfo)); 22944 return (EFAULT); 22945 } else { 22946 kmem_free(info, sizeof (struct dk_cinfo)); 22947 return (0); 22948 } 22949 } 22950 22951 22952 /* 22953 * Function: sd_get_media_info 22954 * 22955 * Description: This routine is the driver entry point for handling ioctl 22956 * requests for the media type or command set profile used by the 22957 * drive to operate on the media (DKIOCGMEDIAINFO). 22958 * 22959 * Arguments: dev - the device number 22960 * arg - pointer to user provided dk_minfo structure 22961 * specifying the media type, logical block size and 22962 * drive capacity. 22963 * flag - this argument is a pass through to ddi_copyxxx() 22964 * directly from the mode argument of ioctl(). 22965 * 22966 * Return Code: 0 22967 * EACCESS 22968 * EFAULT 22969 * ENXIO 22970 * EIO 22971 */ 22972 22973 static int 22974 sd_get_media_info(dev_t dev, caddr_t arg, int flag) 22975 { 22976 struct sd_lun *un = NULL; 22977 struct uscsi_cmd com; 22978 struct scsi_inquiry *sinq; 22979 struct dk_minfo media_info; 22980 u_longlong_t media_capacity; 22981 uint64_t capacity; 22982 uint_t lbasize; 22983 uchar_t *out_data; 22984 uchar_t *rqbuf; 22985 int rval = 0; 22986 int rtn; 22987 sd_ssc_t *ssc; 22988 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 22989 (un->un_state == SD_STATE_OFFLINE)) { 22990 return (ENXIO); 22991 } 22992 22993 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info: entry\n"); 22994 22995 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 22996 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 22997 22998 /* Issue a TUR to determine if the drive is ready with media present */ 22999 ssc = sd_ssc_init(un); 23000 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_CHECK_FOR_MEDIA); 23001 if (rval == ENXIO) { 23002 goto done; 23003 } else if (rval != 0) { 23004 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23005 } 23006 23007 /* Now get configuration data */ 23008 if (ISCD(un)) { 23009 media_info.dki_media_type = DK_CDROM; 23010 23011 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 23012 if (un->un_f_mmc_cap == TRUE) { 23013 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, 23014 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN, 23015 SD_PATH_STANDARD); 23016 23017 if (rtn) { 23018 /* 23019 * We ignore all failures for CD and need to 23020 * put the assessment before processing code 23021 * to avoid missing assessment for FMA. 23022 */ 23023 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23024 /* 23025 * Failed for other than an illegal request 23026 * or command not supported 23027 */ 23028 if ((com.uscsi_status == STATUS_CHECK) && 23029 (com.uscsi_rqstatus == STATUS_GOOD)) { 23030 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 23031 (rqbuf[12] != 0x20)) { 23032 rval = EIO; 23033 goto no_assessment; 23034 } 23035 } 23036 } else { 23037 /* 23038 * The GET CONFIGURATION command succeeded 23039 * so set the media type according to the 23040 * returned data 23041 */ 23042 media_info.dki_media_type = out_data[6]; 23043 media_info.dki_media_type <<= 8; 23044 media_info.dki_media_type |= out_data[7]; 23045 } 23046 } 23047 } else { 23048 /* 23049 * The profile list is not available, so we attempt to identify 23050 * the media type based on the inquiry data 23051 */ 23052 sinq = un->un_sd->sd_inq; 23053 if ((sinq->inq_dtype == DTYPE_DIRECT) || 23054 (sinq->inq_dtype == DTYPE_OPTICAL)) { 23055 /* This is a direct access device or optical disk */ 23056 media_info.dki_media_type = DK_FIXED_DISK; 23057 23058 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 23059 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 23060 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 23061 media_info.dki_media_type = DK_ZIP; 23062 } else if ( 23063 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 23064 media_info.dki_media_type = DK_JAZ; 23065 } 23066 } 23067 } else { 23068 /* 23069 * Not a CD, direct access or optical disk so return 23070 * unknown media 23071 */ 23072 media_info.dki_media_type = DK_UNKNOWN; 23073 } 23074 } 23075 23076 /* Now read the capacity so we can provide the lbasize and capacity */ 23077 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize, 23078 SD_PATH_DIRECT); 23079 switch (rval) { 23080 case 0: 23081 break; 23082 case EACCES: 23083 rval = EACCES; 23084 goto done; 23085 default: 23086 rval = EIO; 23087 goto done; 23088 } 23089 23090 /* 23091 * If lun is expanded dynamically, update the un structure. 23092 */ 23093 mutex_enter(SD_MUTEX(un)); 23094 if ((un->un_f_blockcount_is_valid == TRUE) && 23095 (un->un_f_tgt_blocksize_is_valid == TRUE) && 23096 (capacity > un->un_blockcount)) { 23097 sd_update_block_info(un, lbasize, capacity); 23098 } 23099 mutex_exit(SD_MUTEX(un)); 23100 23101 media_info.dki_lbsize = lbasize; 23102 media_capacity = capacity; 23103 23104 /* 23105 * sd_send_scsi_READ_CAPACITY() reports capacity in 23106 * un->un_sys_blocksize chunks. So we need to convert it into 23107 * cap.lbasize chunks. 23108 */ 23109 media_capacity *= un->un_sys_blocksize; 23110 media_capacity /= lbasize; 23111 media_info.dki_capacity = media_capacity; 23112 23113 if (ddi_copyout(&media_info, arg, sizeof (struct dk_minfo), flag)) { 23114 rval = EFAULT; 23115 /* Put goto. Anybody might add some code below in future */ 23116 goto no_assessment; 23117 } 23118 done: 23119 if (rval != 0) { 23120 if (rval == EIO) 23121 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23122 else 23123 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23124 } 23125 no_assessment: 23126 sd_ssc_fini(ssc); 23127 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 23128 kmem_free(rqbuf, SENSE_LENGTH); 23129 return (rval); 23130 } 23131 23132 /* 23133 * Function: sd_get_media_info_ext 23134 * 23135 * Description: This routine is the driver entry point for handling ioctl 23136 * requests for the media type or command set profile used by the 23137 * drive to operate on the media (DKIOCGMEDIAINFOEXT). The 23138 * difference this ioctl and DKIOCGMEDIAINFO is the return value 23139 * of this ioctl contains both logical block size and physical 23140 * block size. 23141 * 23142 * 23143 * Arguments: dev - the device number 23144 * arg - pointer to user provided dk_minfo_ext structure 23145 * specifying the media type, logical block size, 23146 * physical block size and disk capacity. 23147 * flag - this argument is a pass through to ddi_copyxxx() 23148 * directly from the mode argument of ioctl(). 23149 * 23150 * Return Code: 0 23151 * EACCESS 23152 * EFAULT 23153 * ENXIO 23154 * EIO 23155 */ 23156 23157 static int 23158 sd_get_media_info_ext(dev_t dev, caddr_t arg, int flag) 23159 { 23160 struct sd_lun *un = NULL; 23161 struct uscsi_cmd com; 23162 struct scsi_inquiry *sinq; 23163 struct dk_minfo_ext media_info_ext; 23164 u_longlong_t media_capacity; 23165 uint64_t capacity; 23166 uint_t lbasize; 23167 uint_t pbsize; 23168 uchar_t *out_data; 23169 uchar_t *rqbuf; 23170 int rval = 0; 23171 int rtn; 23172 sd_ssc_t *ssc; 23173 23174 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 23175 (un->un_state == SD_STATE_OFFLINE)) { 23176 return (ENXIO); 23177 } 23178 23179 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info_ext: entry\n"); 23180 23181 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 23182 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 23183 ssc = sd_ssc_init(un); 23184 23185 /* Issue a TUR to determine if the drive is ready with media present */ 23186 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_CHECK_FOR_MEDIA); 23187 if (rval == ENXIO) { 23188 goto done; 23189 } else if (rval != 0) { 23190 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23191 } 23192 23193 /* Now get configuration data */ 23194 if (ISCD(un)) { 23195 media_info_ext.dki_media_type = DK_CDROM; 23196 23197 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 23198 if (un->un_f_mmc_cap == TRUE) { 23199 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, 23200 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN, 23201 SD_PATH_STANDARD); 23202 23203 if (rtn) { 23204 /* 23205 * We ignore all failures for CD and need to 23206 * put the assessment before processing code 23207 * to avoid missing assessment for FMA. 23208 */ 23209 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23210 /* 23211 * Failed for other than an illegal request 23212 * or command not supported 23213 */ 23214 if ((com.uscsi_status == STATUS_CHECK) && 23215 (com.uscsi_rqstatus == STATUS_GOOD)) { 23216 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 23217 (rqbuf[12] != 0x20)) { 23218 rval = EIO; 23219 goto no_assessment; 23220 } 23221 } 23222 } else { 23223 /* 23224 * The GET CONFIGURATION command succeeded 23225 * so set the media type according to the 23226 * returned data 23227 */ 23228 media_info_ext.dki_media_type = out_data[6]; 23229 media_info_ext.dki_media_type <<= 8; 23230 media_info_ext.dki_media_type |= out_data[7]; 23231 } 23232 } 23233 } else { 23234 /* 23235 * The profile list is not available, so we attempt to identify 23236 * the media type based on the inquiry data 23237 */ 23238 sinq = un->un_sd->sd_inq; 23239 if ((sinq->inq_dtype == DTYPE_DIRECT) || 23240 (sinq->inq_dtype == DTYPE_OPTICAL)) { 23241 /* This is a direct access device or optical disk */ 23242 media_info_ext.dki_media_type = DK_FIXED_DISK; 23243 23244 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 23245 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 23246 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 23247 media_info_ext.dki_media_type = DK_ZIP; 23248 } else if ( 23249 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 23250 media_info_ext.dki_media_type = DK_JAZ; 23251 } 23252 } 23253 } else { 23254 /* 23255 * Not a CD, direct access or optical disk so return 23256 * unknown media 23257 */ 23258 media_info_ext.dki_media_type = DK_UNKNOWN; 23259 } 23260 } 23261 23262 /* 23263 * Now read the capacity so we can provide the lbasize, 23264 * pbsize and capacity. 23265 */ 23266 rval = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, &lbasize, &pbsize, 23267 SD_PATH_DIRECT); 23268 23269 if (rval != 0) { 23270 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize, 23271 SD_PATH_DIRECT); 23272 23273 switch (rval) { 23274 case 0: 23275 pbsize = lbasize; 23276 media_capacity = capacity; 23277 /* 23278 * sd_send_scsi_READ_CAPACITY() reports capacity in 23279 * un->un_sys_blocksize chunks. So we need to convert 23280 * it into cap.lbsize chunks. 23281 */ 23282 if (un->un_f_has_removable_media) { 23283 media_capacity *= un->un_sys_blocksize; 23284 media_capacity /= lbasize; 23285 } 23286 break; 23287 case EACCES: 23288 rval = EACCES; 23289 goto done; 23290 default: 23291 rval = EIO; 23292 goto done; 23293 } 23294 } else { 23295 media_capacity = capacity; 23296 } 23297 23298 /* 23299 * If lun is expanded dynamically, update the un structure. 23300 */ 23301 mutex_enter(SD_MUTEX(un)); 23302 if ((un->un_f_blockcount_is_valid == TRUE) && 23303 (un->un_f_tgt_blocksize_is_valid == TRUE) && 23304 (capacity > un->un_blockcount)) { 23305 sd_update_block_info(un, lbasize, capacity); 23306 } 23307 mutex_exit(SD_MUTEX(un)); 23308 23309 media_info_ext.dki_lbsize = lbasize; 23310 media_info_ext.dki_capacity = media_capacity; 23311 media_info_ext.dki_pbsize = pbsize; 23312 23313 if (ddi_copyout(&media_info_ext, arg, sizeof (struct dk_minfo_ext), 23314 flag)) { 23315 rval = EFAULT; 23316 goto no_assessment; 23317 } 23318 done: 23319 if (rval != 0) { 23320 if (rval == EIO) 23321 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23322 else 23323 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23324 } 23325 no_assessment: 23326 sd_ssc_fini(ssc); 23327 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 23328 kmem_free(rqbuf, SENSE_LENGTH); 23329 return (rval); 23330 } 23331 23332 /* 23333 * Function: sd_check_media 23334 * 23335 * Description: This utility routine implements the functionality for the 23336 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the 23337 * driver state changes from that specified by the user 23338 * (inserted or ejected). For example, if the user specifies 23339 * DKIO_EJECTED and the current media state is inserted this 23340 * routine will immediately return DKIO_INSERTED. However, if the 23341 * current media state is not inserted the user thread will be 23342 * blocked until the drive state changes. If DKIO_NONE is specified 23343 * the user thread will block until a drive state change occurs. 23344 * 23345 * Arguments: dev - the device number 23346 * state - user pointer to a dkio_state, updated with the current 23347 * drive state at return. 23348 * 23349 * Return Code: ENXIO 23350 * EIO 23351 * EAGAIN 23352 * EINTR 23353 */ 23354 23355 static int 23356 sd_check_media(dev_t dev, enum dkio_state state) 23357 { 23358 struct sd_lun *un = NULL; 23359 enum dkio_state prev_state; 23360 opaque_t token = NULL; 23361 int rval = 0; 23362 sd_ssc_t *ssc; 23363 dev_t sub_dev; 23364 23365 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23366 return (ENXIO); 23367 } 23368 23369 /* 23370 * sub_dev is used when submitting request to scsi watch. 23371 * All submissions are unified to use same device number. 23372 */ 23373 sub_dev = sd_make_device(SD_DEVINFO(un)); 23374 23375 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n"); 23376 23377 ssc = sd_ssc_init(un); 23378 23379 mutex_enter(SD_MUTEX(un)); 23380 23381 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: " 23382 "state=%x, mediastate=%x\n", state, un->un_mediastate); 23383 23384 prev_state = un->un_mediastate; 23385 23386 /* is there anything to do? */ 23387 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) { 23388 /* 23389 * submit the request to the scsi_watch service; 23390 * scsi_media_watch_cb() does the real work 23391 */ 23392 mutex_exit(SD_MUTEX(un)); 23393 23394 /* 23395 * This change handles the case where a scsi watch request is 23396 * added to a device that is powered down. To accomplish this 23397 * we power up the device before adding the scsi watch request, 23398 * since the scsi watch sends a TUR directly to the device 23399 * which the device cannot handle if it is powered down. 23400 */ 23401 if (sd_pm_entry(un) != DDI_SUCCESS) { 23402 mutex_enter(SD_MUTEX(un)); 23403 goto done; 23404 } 23405 23406 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), 23407 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 23408 (caddr_t)sub_dev); 23409 23410 sd_pm_exit(un); 23411 23412 mutex_enter(SD_MUTEX(un)); 23413 if (token == NULL) { 23414 rval = EAGAIN; 23415 goto done; 23416 } 23417 23418 /* 23419 * This is a special case IOCTL that doesn't return 23420 * until the media state changes. Routine sdpower 23421 * knows about and handles this so don't count it 23422 * as an active cmd in the driver, which would 23423 * keep the device busy to the pm framework. 23424 * If the count isn't decremented the device can't 23425 * be powered down. 23426 */ 23427 un->un_ncmds_in_driver--; 23428 ASSERT(un->un_ncmds_in_driver >= 0); 23429 23430 /* 23431 * if a prior request had been made, this will be the same 23432 * token, as scsi_watch was designed that way. 23433 */ 23434 un->un_swr_token = token; 23435 un->un_specified_mediastate = state; 23436 23437 /* 23438 * now wait for media change 23439 * we will not be signalled unless mediastate == state but it is 23440 * still better to test for this condition, since there is a 23441 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED 23442 */ 23443 SD_TRACE(SD_LOG_COMMON, un, 23444 "sd_check_media: waiting for media state change\n"); 23445 while (un->un_mediastate == state) { 23446 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) { 23447 SD_TRACE(SD_LOG_COMMON, un, 23448 "sd_check_media: waiting for media state " 23449 "was interrupted\n"); 23450 un->un_ncmds_in_driver++; 23451 rval = EINTR; 23452 goto done; 23453 } 23454 SD_TRACE(SD_LOG_COMMON, un, 23455 "sd_check_media: received signal, state=%x\n", 23456 un->un_mediastate); 23457 } 23458 /* 23459 * Inc the counter to indicate the device once again 23460 * has an active outstanding cmd. 23461 */ 23462 un->un_ncmds_in_driver++; 23463 } 23464 23465 /* invalidate geometry */ 23466 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) { 23467 sr_ejected(un); 23468 } 23469 23470 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) { 23471 uint64_t capacity; 23472 uint_t lbasize; 23473 23474 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n"); 23475 mutex_exit(SD_MUTEX(un)); 23476 /* 23477 * Since the following routines use SD_PATH_DIRECT, we must 23478 * call PM directly before the upcoming disk accesses. This 23479 * may cause the disk to be power/spin up. 23480 */ 23481 23482 if (sd_pm_entry(un) == DDI_SUCCESS) { 23483 rval = sd_send_scsi_READ_CAPACITY(ssc, 23484 &capacity, &lbasize, SD_PATH_DIRECT); 23485 if (rval != 0) { 23486 sd_pm_exit(un); 23487 if (rval == EIO) 23488 sd_ssc_assessment(ssc, 23489 SD_FMT_STATUS_CHECK); 23490 else 23491 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23492 mutex_enter(SD_MUTEX(un)); 23493 goto done; 23494 } 23495 } else { 23496 rval = EIO; 23497 mutex_enter(SD_MUTEX(un)); 23498 goto done; 23499 } 23500 mutex_enter(SD_MUTEX(un)); 23501 23502 sd_update_block_info(un, lbasize, capacity); 23503 23504 /* 23505 * Check if the media in the device is writable or not 23506 */ 23507 if (ISCD(un)) { 23508 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT); 23509 } 23510 23511 mutex_exit(SD_MUTEX(un)); 23512 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 23513 if ((cmlb_validate(un->un_cmlbhandle, 0, 23514 (void *)SD_PATH_DIRECT) == 0) && un->un_f_pkstats_enabled) { 23515 sd_set_pstats(un); 23516 SD_TRACE(SD_LOG_IO_PARTITION, un, 23517 "sd_check_media: un:0x%p pstats created and " 23518 "set\n", un); 23519 } 23520 23521 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 23522 SD_PATH_DIRECT); 23523 23524 sd_pm_exit(un); 23525 23526 if (rval != 0) { 23527 if (rval == EIO) 23528 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23529 else 23530 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23531 } 23532 23533 mutex_enter(SD_MUTEX(un)); 23534 } 23535 done: 23536 sd_ssc_fini(ssc); 23537 un->un_f_watcht_stopped = FALSE; 23538 if (token != NULL && un->un_swr_token != NULL) { 23539 /* 23540 * Use of this local token and the mutex ensures that we avoid 23541 * some race conditions associated with terminating the 23542 * scsi watch. 23543 */ 23544 token = un->un_swr_token; 23545 mutex_exit(SD_MUTEX(un)); 23546 (void) scsi_watch_request_terminate(token, 23547 SCSI_WATCH_TERMINATE_WAIT); 23548 if (scsi_watch_get_ref_count(token) == 0) { 23549 mutex_enter(SD_MUTEX(un)); 23550 un->un_swr_token = (opaque_t)NULL; 23551 } else { 23552 mutex_enter(SD_MUTEX(un)); 23553 } 23554 } 23555 23556 /* 23557 * Update the capacity kstat value, if no media previously 23558 * (capacity kstat is 0) and a media has been inserted 23559 * (un_f_blockcount_is_valid == TRUE) 23560 */ 23561 if (un->un_errstats) { 23562 struct sd_errstats *stp = NULL; 23563 23564 stp = (struct sd_errstats *)un->un_errstats->ks_data; 23565 if ((stp->sd_capacity.value.ui64 == 0) && 23566 (un->un_f_blockcount_is_valid == TRUE)) { 23567 stp->sd_capacity.value.ui64 = 23568 (uint64_t)((uint64_t)un->un_blockcount * 23569 un->un_sys_blocksize); 23570 } 23571 } 23572 mutex_exit(SD_MUTEX(un)); 23573 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n"); 23574 return (rval); 23575 } 23576 23577 23578 /* 23579 * Function: sd_delayed_cv_broadcast 23580 * 23581 * Description: Delayed cv_broadcast to allow for target to recover from media 23582 * insertion. 23583 * 23584 * Arguments: arg - driver soft state (unit) structure 23585 */ 23586 23587 static void 23588 sd_delayed_cv_broadcast(void *arg) 23589 { 23590 struct sd_lun *un = arg; 23591 23592 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n"); 23593 23594 mutex_enter(SD_MUTEX(un)); 23595 un->un_dcvb_timeid = NULL; 23596 cv_broadcast(&un->un_state_cv); 23597 mutex_exit(SD_MUTEX(un)); 23598 } 23599 23600 23601 /* 23602 * Function: sd_media_watch_cb 23603 * 23604 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This 23605 * routine processes the TUR sense data and updates the driver 23606 * state if a transition has occurred. The user thread 23607 * (sd_check_media) is then signalled. 23608 * 23609 * Arguments: arg - the device 'dev_t' is used for context to discriminate 23610 * among multiple watches that share this callback function 23611 * resultp - scsi watch facility result packet containing scsi 23612 * packet, status byte and sense data 23613 * 23614 * Return Code: 0 for success, -1 for failure 23615 */ 23616 23617 static int 23618 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 23619 { 23620 struct sd_lun *un; 23621 struct scsi_status *statusp = resultp->statusp; 23622 uint8_t *sensep = (uint8_t *)resultp->sensep; 23623 enum dkio_state state = DKIO_NONE; 23624 dev_t dev = (dev_t)arg; 23625 uchar_t actual_sense_length; 23626 uint8_t skey, asc, ascq; 23627 23628 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23629 return (-1); 23630 } 23631 actual_sense_length = resultp->actual_sense_length; 23632 23633 mutex_enter(SD_MUTEX(un)); 23634 SD_TRACE(SD_LOG_COMMON, un, 23635 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n", 23636 *((char *)statusp), (void *)sensep, actual_sense_length); 23637 23638 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) { 23639 un->un_mediastate = DKIO_DEV_GONE; 23640 cv_broadcast(&un->un_state_cv); 23641 mutex_exit(SD_MUTEX(un)); 23642 23643 return (0); 23644 } 23645 23646 /* 23647 * If there was a check condition then sensep points to valid sense data 23648 * If status was not a check condition but a reservation or busy status 23649 * then the new state is DKIO_NONE 23650 */ 23651 if (sensep != NULL) { 23652 skey = scsi_sense_key(sensep); 23653 asc = scsi_sense_asc(sensep); 23654 ascq = scsi_sense_ascq(sensep); 23655 23656 SD_INFO(SD_LOG_COMMON, un, 23657 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n", 23658 skey, asc, ascq); 23659 /* This routine only uses up to 13 bytes of sense data. */ 23660 if (actual_sense_length >= 13) { 23661 if (skey == KEY_UNIT_ATTENTION) { 23662 if (asc == 0x28) { 23663 state = DKIO_INSERTED; 23664 } 23665 } else if (skey == KEY_NOT_READY) { 23666 /* 23667 * Sense data of 02/06/00 means that the 23668 * drive could not read the media (No 23669 * reference position found). In this case 23670 * to prevent a hang on the DKIOCSTATE IOCTL 23671 * we set the media state to DKIO_INSERTED. 23672 */ 23673 if (asc == 0x06 && ascq == 0x00) 23674 state = DKIO_INSERTED; 23675 23676 /* 23677 * if 02/04/02 means that the host 23678 * should send start command. Explicitly 23679 * leave the media state as is 23680 * (inserted) as the media is inserted 23681 * and host has stopped device for PM 23682 * reasons. Upon next true read/write 23683 * to this media will bring the 23684 * device to the right state good for 23685 * media access. 23686 */ 23687 if (asc == 0x3a) { 23688 state = DKIO_EJECTED; 23689 } else { 23690 /* 23691 * If the drive is busy with an 23692 * operation or long write, keep the 23693 * media in an inserted state. 23694 */ 23695 23696 if ((asc == 0x04) && 23697 ((ascq == 0x02) || 23698 (ascq == 0x07) || 23699 (ascq == 0x08))) { 23700 state = DKIO_INSERTED; 23701 } 23702 } 23703 } else if (skey == KEY_NO_SENSE) { 23704 if ((asc == 0x00) && (ascq == 0x00)) { 23705 /* 23706 * Sense Data 00/00/00 does not provide 23707 * any information about the state of 23708 * the media. Ignore it. 23709 */ 23710 mutex_exit(SD_MUTEX(un)); 23711 return (0); 23712 } 23713 } 23714 } 23715 } else if ((*((char *)statusp) == STATUS_GOOD) && 23716 (resultp->pkt->pkt_reason == CMD_CMPLT)) { 23717 state = DKIO_INSERTED; 23718 } 23719 23720 SD_TRACE(SD_LOG_COMMON, un, 23721 "sd_media_watch_cb: state=%x, specified=%x\n", 23722 state, un->un_specified_mediastate); 23723 23724 /* 23725 * now signal the waiting thread if this is *not* the specified state; 23726 * delay the signal if the state is DKIO_INSERTED to allow the target 23727 * to recover 23728 */ 23729 if (state != un->un_specified_mediastate) { 23730 un->un_mediastate = state; 23731 if (state == DKIO_INSERTED) { 23732 /* 23733 * delay the signal to give the drive a chance 23734 * to do what it apparently needs to do 23735 */ 23736 SD_TRACE(SD_LOG_COMMON, un, 23737 "sd_media_watch_cb: delayed cv_broadcast\n"); 23738 if (un->un_dcvb_timeid == NULL) { 23739 un->un_dcvb_timeid = 23740 timeout(sd_delayed_cv_broadcast, un, 23741 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY)); 23742 } 23743 } else { 23744 SD_TRACE(SD_LOG_COMMON, un, 23745 "sd_media_watch_cb: immediate cv_broadcast\n"); 23746 cv_broadcast(&un->un_state_cv); 23747 } 23748 } 23749 mutex_exit(SD_MUTEX(un)); 23750 return (0); 23751 } 23752 23753 23754 /* 23755 * Function: sd_dkio_get_temp 23756 * 23757 * Description: This routine is the driver entry point for handling ioctl 23758 * requests to get the disk temperature. 23759 * 23760 * Arguments: dev - the device number 23761 * arg - pointer to user provided dk_temperature structure. 23762 * flag - this argument is a pass through to ddi_copyxxx() 23763 * directly from the mode argument of ioctl(). 23764 * 23765 * Return Code: 0 23766 * EFAULT 23767 * ENXIO 23768 * EAGAIN 23769 */ 23770 23771 static int 23772 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag) 23773 { 23774 struct sd_lun *un = NULL; 23775 struct dk_temperature *dktemp = NULL; 23776 uchar_t *temperature_page; 23777 int rval = 0; 23778 int path_flag = SD_PATH_STANDARD; 23779 sd_ssc_t *ssc; 23780 23781 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23782 return (ENXIO); 23783 } 23784 23785 ssc = sd_ssc_init(un); 23786 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP); 23787 23788 /* copyin the disk temp argument to get the user flags */ 23789 if (ddi_copyin((void *)arg, dktemp, 23790 sizeof (struct dk_temperature), flag) != 0) { 23791 rval = EFAULT; 23792 goto done; 23793 } 23794 23795 /* Initialize the temperature to invalid. */ 23796 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 23797 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 23798 23799 /* 23800 * Note: Investigate removing the "bypass pm" semantic. 23801 * Can we just bypass PM always? 23802 */ 23803 if (dktemp->dkt_flags & DKT_BYPASS_PM) { 23804 path_flag = SD_PATH_DIRECT; 23805 ASSERT(!mutex_owned(&un->un_pm_mutex)); 23806 mutex_enter(&un->un_pm_mutex); 23807 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 23808 /* 23809 * If DKT_BYPASS_PM is set, and the drive happens to be 23810 * in low power mode, we can not wake it up, Need to 23811 * return EAGAIN. 23812 */ 23813 mutex_exit(&un->un_pm_mutex); 23814 rval = EAGAIN; 23815 goto done; 23816 } else { 23817 /* 23818 * Indicate to PM the device is busy. This is required 23819 * to avoid a race - i.e. the ioctl is issuing a 23820 * command and the pm framework brings down the device 23821 * to low power mode (possible power cut-off on some 23822 * platforms). 23823 */ 23824 mutex_exit(&un->un_pm_mutex); 23825 if (sd_pm_entry(un) != DDI_SUCCESS) { 23826 rval = EAGAIN; 23827 goto done; 23828 } 23829 } 23830 } 23831 23832 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP); 23833 23834 rval = sd_send_scsi_LOG_SENSE(ssc, temperature_page, 23835 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag); 23836 if (rval != 0) 23837 goto done2; 23838 23839 /* 23840 * For the current temperature verify that the parameter length is 0x02 23841 * and the parameter code is 0x00 23842 */ 23843 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) && 23844 (temperature_page[5] == 0x00)) { 23845 if (temperature_page[9] == 0xFF) { 23846 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 23847 } else { 23848 dktemp->dkt_cur_temp = (short)(temperature_page[9]); 23849 } 23850 } 23851 23852 /* 23853 * For the reference temperature verify that the parameter 23854 * length is 0x02 and the parameter code is 0x01 23855 */ 23856 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) && 23857 (temperature_page[11] == 0x01)) { 23858 if (temperature_page[15] == 0xFF) { 23859 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 23860 } else { 23861 dktemp->dkt_ref_temp = (short)(temperature_page[15]); 23862 } 23863 } 23864 23865 /* Do the copyout regardless of the temperature commands status. */ 23866 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature), 23867 flag) != 0) { 23868 rval = EFAULT; 23869 goto done1; 23870 } 23871 23872 done2: 23873 if (rval != 0) { 23874 if (rval == EIO) 23875 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23876 else 23877 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23878 } 23879 done1: 23880 if (path_flag == SD_PATH_DIRECT) { 23881 sd_pm_exit(un); 23882 } 23883 23884 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE); 23885 done: 23886 sd_ssc_fini(ssc); 23887 if (dktemp != NULL) { 23888 kmem_free(dktemp, sizeof (struct dk_temperature)); 23889 } 23890 23891 return (rval); 23892 } 23893 23894 23895 /* 23896 * Function: sd_log_page_supported 23897 * 23898 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of 23899 * supported log pages. 23900 * 23901 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 23902 * structure for this target. 23903 * log_page - 23904 * 23905 * Return Code: -1 - on error (log sense is optional and may not be supported). 23906 * 0 - log page not found. 23907 * 1 - log page found. 23908 */ 23909 23910 static int 23911 sd_log_page_supported(sd_ssc_t *ssc, int log_page) 23912 { 23913 uchar_t *log_page_data; 23914 int i; 23915 int match = 0; 23916 int log_size; 23917 int status = 0; 23918 struct sd_lun *un; 23919 23920 ASSERT(ssc != NULL); 23921 un = ssc->ssc_un; 23922 ASSERT(un != NULL); 23923 23924 log_page_data = kmem_zalloc(0xFF, KM_SLEEP); 23925 23926 status = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 0xFF, 0, 0x01, 0, 23927 SD_PATH_DIRECT); 23928 23929 if (status != 0) { 23930 if (status == EIO) { 23931 /* 23932 * Some disks do not support log sense, we 23933 * should ignore this kind of error(sense key is 23934 * 0x5 - illegal request). 23935 */ 23936 uint8_t *sensep; 23937 int senlen; 23938 23939 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 23940 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 23941 ssc->ssc_uscsi_cmd->uscsi_rqresid); 23942 23943 if (senlen > 0 && 23944 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 23945 sd_ssc_assessment(ssc, 23946 SD_FMT_IGNORE_COMPROMISE); 23947 } else { 23948 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23949 } 23950 } else { 23951 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23952 } 23953 23954 SD_ERROR(SD_LOG_COMMON, un, 23955 "sd_log_page_supported: failed log page retrieval\n"); 23956 kmem_free(log_page_data, 0xFF); 23957 return (-1); 23958 } 23959 23960 log_size = log_page_data[3]; 23961 23962 /* 23963 * The list of supported log pages start from the fourth byte. Check 23964 * until we run out of log pages or a match is found. 23965 */ 23966 for (i = 4; (i < (log_size + 4)) && !match; i++) { 23967 if (log_page_data[i] == log_page) { 23968 match++; 23969 } 23970 } 23971 kmem_free(log_page_data, 0xFF); 23972 return (match); 23973 } 23974 23975 23976 /* 23977 * Function: sd_mhdioc_failfast 23978 * 23979 * Description: This routine is the driver entry point for handling ioctl 23980 * requests to enable/disable the multihost failfast option. 23981 * (MHIOCENFAILFAST) 23982 * 23983 * Arguments: dev - the device number 23984 * arg - user specified probing interval. 23985 * flag - this argument is a pass through to ddi_copyxxx() 23986 * directly from the mode argument of ioctl(). 23987 * 23988 * Return Code: 0 23989 * EFAULT 23990 * ENXIO 23991 */ 23992 23993 static int 23994 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag) 23995 { 23996 struct sd_lun *un = NULL; 23997 int mh_time; 23998 int rval = 0; 23999 24000 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24001 return (ENXIO); 24002 } 24003 24004 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag)) 24005 return (EFAULT); 24006 24007 if (mh_time) { 24008 mutex_enter(SD_MUTEX(un)); 24009 un->un_resvd_status |= SD_FAILFAST; 24010 mutex_exit(SD_MUTEX(un)); 24011 /* 24012 * If mh_time is INT_MAX, then this ioctl is being used for 24013 * SCSI-3 PGR purposes, and we don't need to spawn watch thread. 24014 */ 24015 if (mh_time != INT_MAX) { 24016 rval = sd_check_mhd(dev, mh_time); 24017 } 24018 } else { 24019 (void) sd_check_mhd(dev, 0); 24020 mutex_enter(SD_MUTEX(un)); 24021 un->un_resvd_status &= ~SD_FAILFAST; 24022 mutex_exit(SD_MUTEX(un)); 24023 } 24024 return (rval); 24025 } 24026 24027 24028 /* 24029 * Function: sd_mhdioc_takeown 24030 * 24031 * Description: This routine is the driver entry point for handling ioctl 24032 * requests to forcefully acquire exclusive access rights to the 24033 * multihost disk (MHIOCTKOWN). 24034 * 24035 * Arguments: dev - the device number 24036 * arg - user provided structure specifying the delay 24037 * parameters in milliseconds 24038 * flag - this argument is a pass through to ddi_copyxxx() 24039 * directly from the mode argument of ioctl(). 24040 * 24041 * Return Code: 0 24042 * EFAULT 24043 * ENXIO 24044 */ 24045 24046 static int 24047 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag) 24048 { 24049 struct sd_lun *un = NULL; 24050 struct mhioctkown *tkown = NULL; 24051 int rval = 0; 24052 24053 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24054 return (ENXIO); 24055 } 24056 24057 if (arg != NULL) { 24058 tkown = (struct mhioctkown *) 24059 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP); 24060 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag); 24061 if (rval != 0) { 24062 rval = EFAULT; 24063 goto error; 24064 } 24065 } 24066 24067 rval = sd_take_ownership(dev, tkown); 24068 mutex_enter(SD_MUTEX(un)); 24069 if (rval == 0) { 24070 un->un_resvd_status |= SD_RESERVE; 24071 if (tkown != NULL && tkown->reinstate_resv_delay != 0) { 24072 sd_reinstate_resv_delay = 24073 tkown->reinstate_resv_delay * 1000; 24074 } else { 24075 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 24076 } 24077 /* 24078 * Give the scsi_watch routine interval set by 24079 * the MHIOCENFAILFAST ioctl precedence here. 24080 */ 24081 if ((un->un_resvd_status & SD_FAILFAST) == 0) { 24082 mutex_exit(SD_MUTEX(un)); 24083 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000); 24084 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24085 "sd_mhdioc_takeown : %d\n", 24086 sd_reinstate_resv_delay); 24087 } else { 24088 mutex_exit(SD_MUTEX(un)); 24089 } 24090 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY, 24091 sd_mhd_reset_notify_cb, (caddr_t)un); 24092 } else { 24093 un->un_resvd_status &= ~SD_RESERVE; 24094 mutex_exit(SD_MUTEX(un)); 24095 } 24096 24097 error: 24098 if (tkown != NULL) { 24099 kmem_free(tkown, sizeof (struct mhioctkown)); 24100 } 24101 return (rval); 24102 } 24103 24104 24105 /* 24106 * Function: sd_mhdioc_release 24107 * 24108 * Description: This routine is the driver entry point for handling ioctl 24109 * requests to release exclusive access rights to the multihost 24110 * disk (MHIOCRELEASE). 24111 * 24112 * Arguments: dev - the device number 24113 * 24114 * Return Code: 0 24115 * ENXIO 24116 */ 24117 24118 static int 24119 sd_mhdioc_release(dev_t dev) 24120 { 24121 struct sd_lun *un = NULL; 24122 timeout_id_t resvd_timeid_save; 24123 int resvd_status_save; 24124 int rval = 0; 24125 24126 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24127 return (ENXIO); 24128 } 24129 24130 mutex_enter(SD_MUTEX(un)); 24131 resvd_status_save = un->un_resvd_status; 24132 un->un_resvd_status &= 24133 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE); 24134 if (un->un_resvd_timeid) { 24135 resvd_timeid_save = un->un_resvd_timeid; 24136 un->un_resvd_timeid = NULL; 24137 mutex_exit(SD_MUTEX(un)); 24138 (void) untimeout(resvd_timeid_save); 24139 } else { 24140 mutex_exit(SD_MUTEX(un)); 24141 } 24142 24143 /* 24144 * destroy any pending timeout thread that may be attempting to 24145 * reinstate reservation on this device. 24146 */ 24147 sd_rmv_resv_reclaim_req(dev); 24148 24149 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) { 24150 mutex_enter(SD_MUTEX(un)); 24151 if ((un->un_mhd_token) && 24152 ((un->un_resvd_status & SD_FAILFAST) == 0)) { 24153 mutex_exit(SD_MUTEX(un)); 24154 (void) sd_check_mhd(dev, 0); 24155 } else { 24156 mutex_exit(SD_MUTEX(un)); 24157 } 24158 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 24159 sd_mhd_reset_notify_cb, (caddr_t)un); 24160 } else { 24161 /* 24162 * sd_mhd_watch_cb will restart the resvd recover timeout thread 24163 */ 24164 mutex_enter(SD_MUTEX(un)); 24165 un->un_resvd_status = resvd_status_save; 24166 mutex_exit(SD_MUTEX(un)); 24167 } 24168 return (rval); 24169 } 24170 24171 24172 /* 24173 * Function: sd_mhdioc_register_devid 24174 * 24175 * Description: This routine is the driver entry point for handling ioctl 24176 * requests to register the device id (MHIOCREREGISTERDEVID). 24177 * 24178 * Note: The implementation for this ioctl has been updated to 24179 * be consistent with the original PSARC case (1999/357) 24180 * (4375899, 4241671, 4220005) 24181 * 24182 * Arguments: dev - the device number 24183 * 24184 * Return Code: 0 24185 * ENXIO 24186 */ 24187 24188 static int 24189 sd_mhdioc_register_devid(dev_t dev) 24190 { 24191 struct sd_lun *un = NULL; 24192 int rval = 0; 24193 sd_ssc_t *ssc; 24194 24195 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24196 return (ENXIO); 24197 } 24198 24199 ASSERT(!mutex_owned(SD_MUTEX(un))); 24200 24201 mutex_enter(SD_MUTEX(un)); 24202 24203 /* If a devid already exists, de-register it */ 24204 if (un->un_devid != NULL) { 24205 ddi_devid_unregister(SD_DEVINFO(un)); 24206 /* 24207 * After unregister devid, needs to free devid memory 24208 */ 24209 ddi_devid_free(un->un_devid); 24210 un->un_devid = NULL; 24211 } 24212 24213 /* Check for reservation conflict */ 24214 mutex_exit(SD_MUTEX(un)); 24215 ssc = sd_ssc_init(un); 24216 rval = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 24217 mutex_enter(SD_MUTEX(un)); 24218 24219 switch (rval) { 24220 case 0: 24221 sd_register_devid(ssc, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED); 24222 break; 24223 case EACCES: 24224 break; 24225 default: 24226 rval = EIO; 24227 } 24228 24229 mutex_exit(SD_MUTEX(un)); 24230 if (rval != 0) { 24231 if (rval == EIO) 24232 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 24233 else 24234 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 24235 } 24236 sd_ssc_fini(ssc); 24237 return (rval); 24238 } 24239 24240 24241 /* 24242 * Function: sd_mhdioc_inkeys 24243 * 24244 * Description: This routine is the driver entry point for handling ioctl 24245 * requests to issue the SCSI-3 Persistent In Read Keys command 24246 * to the device (MHIOCGRP_INKEYS). 24247 * 24248 * Arguments: dev - the device number 24249 * arg - user provided in_keys structure 24250 * flag - this argument is a pass through to ddi_copyxxx() 24251 * directly from the mode argument of ioctl(). 24252 * 24253 * Return Code: code returned by sd_persistent_reservation_in_read_keys() 24254 * ENXIO 24255 * EFAULT 24256 */ 24257 24258 static int 24259 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag) 24260 { 24261 struct sd_lun *un; 24262 mhioc_inkeys_t inkeys; 24263 int rval = 0; 24264 24265 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24266 return (ENXIO); 24267 } 24268 24269 #ifdef _MULTI_DATAMODEL 24270 switch (ddi_model_convert_from(flag & FMODELS)) { 24271 case DDI_MODEL_ILP32: { 24272 struct mhioc_inkeys32 inkeys32; 24273 24274 if (ddi_copyin(arg, &inkeys32, 24275 sizeof (struct mhioc_inkeys32), flag) != 0) { 24276 return (EFAULT); 24277 } 24278 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li; 24279 if ((rval = sd_persistent_reservation_in_read_keys(un, 24280 &inkeys, flag)) != 0) { 24281 return (rval); 24282 } 24283 inkeys32.generation = inkeys.generation; 24284 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32), 24285 flag) != 0) { 24286 return (EFAULT); 24287 } 24288 break; 24289 } 24290 case DDI_MODEL_NONE: 24291 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), 24292 flag) != 0) { 24293 return (EFAULT); 24294 } 24295 if ((rval = sd_persistent_reservation_in_read_keys(un, 24296 &inkeys, flag)) != 0) { 24297 return (rval); 24298 } 24299 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), 24300 flag) != 0) { 24301 return (EFAULT); 24302 } 24303 break; 24304 } 24305 24306 #else /* ! _MULTI_DATAMODEL */ 24307 24308 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) { 24309 return (EFAULT); 24310 } 24311 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag); 24312 if (rval != 0) { 24313 return (rval); 24314 } 24315 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) { 24316 return (EFAULT); 24317 } 24318 24319 #endif /* _MULTI_DATAMODEL */ 24320 24321 return (rval); 24322 } 24323 24324 24325 /* 24326 * Function: sd_mhdioc_inresv 24327 * 24328 * Description: This routine is the driver entry point for handling ioctl 24329 * requests to issue the SCSI-3 Persistent In Read Reservations 24330 * command to the device (MHIOCGRP_INKEYS). 24331 * 24332 * Arguments: dev - the device number 24333 * arg - user provided in_resv structure 24334 * flag - this argument is a pass through to ddi_copyxxx() 24335 * directly from the mode argument of ioctl(). 24336 * 24337 * Return Code: code returned by sd_persistent_reservation_in_read_resv() 24338 * ENXIO 24339 * EFAULT 24340 */ 24341 24342 static int 24343 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag) 24344 { 24345 struct sd_lun *un; 24346 mhioc_inresvs_t inresvs; 24347 int rval = 0; 24348 24349 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24350 return (ENXIO); 24351 } 24352 24353 #ifdef _MULTI_DATAMODEL 24354 24355 switch (ddi_model_convert_from(flag & FMODELS)) { 24356 case DDI_MODEL_ILP32: { 24357 struct mhioc_inresvs32 inresvs32; 24358 24359 if (ddi_copyin(arg, &inresvs32, 24360 sizeof (struct mhioc_inresvs32), flag) != 0) { 24361 return (EFAULT); 24362 } 24363 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li; 24364 if ((rval = sd_persistent_reservation_in_read_resv(un, 24365 &inresvs, flag)) != 0) { 24366 return (rval); 24367 } 24368 inresvs32.generation = inresvs.generation; 24369 if (ddi_copyout(&inresvs32, arg, 24370 sizeof (struct mhioc_inresvs32), flag) != 0) { 24371 return (EFAULT); 24372 } 24373 break; 24374 } 24375 case DDI_MODEL_NONE: 24376 if (ddi_copyin(arg, &inresvs, 24377 sizeof (mhioc_inresvs_t), flag) != 0) { 24378 return (EFAULT); 24379 } 24380 if ((rval = sd_persistent_reservation_in_read_resv(un, 24381 &inresvs, flag)) != 0) { 24382 return (rval); 24383 } 24384 if (ddi_copyout(&inresvs, arg, 24385 sizeof (mhioc_inresvs_t), flag) != 0) { 24386 return (EFAULT); 24387 } 24388 break; 24389 } 24390 24391 #else /* ! _MULTI_DATAMODEL */ 24392 24393 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) { 24394 return (EFAULT); 24395 } 24396 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag); 24397 if (rval != 0) { 24398 return (rval); 24399 } 24400 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) { 24401 return (EFAULT); 24402 } 24403 24404 #endif /* ! _MULTI_DATAMODEL */ 24405 24406 return (rval); 24407 } 24408 24409 24410 /* 24411 * The following routines support the clustering functionality described below 24412 * and implement lost reservation reclaim functionality. 24413 * 24414 * Clustering 24415 * ---------- 24416 * The clustering code uses two different, independent forms of SCSI 24417 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3 24418 * Persistent Group Reservations. For any particular disk, it will use either 24419 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk. 24420 * 24421 * SCSI-2 24422 * The cluster software takes ownership of a multi-hosted disk by issuing the 24423 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the 24424 * MHIOCRELEASE ioctl. Closely related is the MHIOCENFAILFAST ioctl -- a 24425 * cluster, just after taking ownership of the disk with the MHIOCTKOWN ioctl 24426 * then issues the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the 24427 * driver. The meaning of failfast is that if the driver (on this host) ever 24428 * encounters the scsi error return code RESERVATION_CONFLICT from the device, 24429 * it should immediately panic the host. The motivation for this ioctl is that 24430 * if this host does encounter reservation conflict, the underlying cause is 24431 * that some other host of the cluster has decided that this host is no longer 24432 * in the cluster and has seized control of the disks for itself. Since this 24433 * host is no longer in the cluster, it ought to panic itself. The 24434 * MHIOCENFAILFAST ioctl does two things: 24435 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT 24436 * error to panic the host 24437 * (b) it sets up a periodic timer to test whether this host still has 24438 * "access" (in that no other host has reserved the device): if the 24439 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The 24440 * purpose of that periodic timer is to handle scenarios where the host is 24441 * otherwise temporarily quiescent, temporarily doing no real i/o. 24442 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host, 24443 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for 24444 * the device itself. 24445 * 24446 * SCSI-3 PGR 24447 * A direct semantic implementation of the SCSI-3 Persistent Reservation 24448 * facility is supported through the shared multihost disk ioctls 24449 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE, 24450 * MHIOCGRP_PREEMPTANDABORT) 24451 * 24452 * Reservation Reclaim: 24453 * -------------------- 24454 * To support the lost reservation reclaim operations this driver creates a 24455 * single thread to handle reinstating reservations on all devices that have 24456 * lost reservations sd_resv_reclaim_requests are logged for all devices that 24457 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb 24458 * and the reservation reclaim thread loops through the requests to regain the 24459 * lost reservations. 24460 */ 24461 24462 /* 24463 * Function: sd_check_mhd() 24464 * 24465 * Description: This function sets up and submits a scsi watch request or 24466 * terminates an existing watch request. This routine is used in 24467 * support of reservation reclaim. 24468 * 24469 * Arguments: dev - the device 'dev_t' is used for context to discriminate 24470 * among multiple watches that share the callback function 24471 * interval - the number of microseconds specifying the watch 24472 * interval for issuing TEST UNIT READY commands. If 24473 * set to 0 the watch should be terminated. If the 24474 * interval is set to 0 and if the device is required 24475 * to hold reservation while disabling failfast, the 24476 * watch is restarted with an interval of 24477 * reinstate_resv_delay. 24478 * 24479 * Return Code: 0 - Successful submit/terminate of scsi watch request 24480 * ENXIO - Indicates an invalid device was specified 24481 * EAGAIN - Unable to submit the scsi watch request 24482 */ 24483 24484 static int 24485 sd_check_mhd(dev_t dev, int interval) 24486 { 24487 struct sd_lun *un; 24488 opaque_t token; 24489 24490 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24491 return (ENXIO); 24492 } 24493 24494 /* is this a watch termination request? */ 24495 if (interval == 0) { 24496 mutex_enter(SD_MUTEX(un)); 24497 /* if there is an existing watch task then terminate it */ 24498 if (un->un_mhd_token) { 24499 token = un->un_mhd_token; 24500 un->un_mhd_token = NULL; 24501 mutex_exit(SD_MUTEX(un)); 24502 (void) scsi_watch_request_terminate(token, 24503 SCSI_WATCH_TERMINATE_ALL_WAIT); 24504 mutex_enter(SD_MUTEX(un)); 24505 } else { 24506 mutex_exit(SD_MUTEX(un)); 24507 /* 24508 * Note: If we return here we don't check for the 24509 * failfast case. This is the original legacy 24510 * implementation but perhaps we should be checking 24511 * the failfast case. 24512 */ 24513 return (0); 24514 } 24515 /* 24516 * If the device is required to hold reservation while 24517 * disabling failfast, we need to restart the scsi_watch 24518 * routine with an interval of reinstate_resv_delay. 24519 */ 24520 if (un->un_resvd_status & SD_RESERVE) { 24521 interval = sd_reinstate_resv_delay/1000; 24522 } else { 24523 /* no failfast so bail */ 24524 mutex_exit(SD_MUTEX(un)); 24525 return (0); 24526 } 24527 mutex_exit(SD_MUTEX(un)); 24528 } 24529 24530 /* 24531 * adjust minimum time interval to 1 second, 24532 * and convert from msecs to usecs 24533 */ 24534 if (interval > 0 && interval < 1000) { 24535 interval = 1000; 24536 } 24537 interval *= 1000; 24538 24539 /* 24540 * submit the request to the scsi_watch service 24541 */ 24542 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval, 24543 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev); 24544 if (token == NULL) { 24545 return (EAGAIN); 24546 } 24547 24548 /* 24549 * save token for termination later on 24550 */ 24551 mutex_enter(SD_MUTEX(un)); 24552 un->un_mhd_token = token; 24553 mutex_exit(SD_MUTEX(un)); 24554 return (0); 24555 } 24556 24557 24558 /* 24559 * Function: sd_mhd_watch_cb() 24560 * 24561 * Description: This function is the call back function used by the scsi watch 24562 * facility. The scsi watch facility sends the "Test Unit Ready" 24563 * and processes the status. If applicable (i.e. a "Unit Attention" 24564 * status and automatic "Request Sense" not used) the scsi watch 24565 * facility will send a "Request Sense" and retrieve the sense data 24566 * to be passed to this callback function. In either case the 24567 * automatic "Request Sense" or the facility submitting one, this 24568 * callback is passed the status and sense data. 24569 * 24570 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24571 * among multiple watches that share this callback function 24572 * resultp - scsi watch facility result packet containing scsi 24573 * packet, status byte and sense data 24574 * 24575 * Return Code: 0 - continue the watch task 24576 * non-zero - terminate the watch task 24577 */ 24578 24579 static int 24580 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 24581 { 24582 struct sd_lun *un; 24583 struct scsi_status *statusp; 24584 uint8_t *sensep; 24585 struct scsi_pkt *pkt; 24586 uchar_t actual_sense_length; 24587 dev_t dev = (dev_t)arg; 24588 24589 ASSERT(resultp != NULL); 24590 statusp = resultp->statusp; 24591 sensep = (uint8_t *)resultp->sensep; 24592 pkt = resultp->pkt; 24593 actual_sense_length = resultp->actual_sense_length; 24594 24595 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24596 return (ENXIO); 24597 } 24598 24599 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24600 "sd_mhd_watch_cb: reason '%s', status '%s'\n", 24601 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp))); 24602 24603 /* Begin processing of the status and/or sense data */ 24604 if (pkt->pkt_reason != CMD_CMPLT) { 24605 /* Handle the incomplete packet */ 24606 sd_mhd_watch_incomplete(un, pkt); 24607 return (0); 24608 } else if (*((unsigned char *)statusp) != STATUS_GOOD) { 24609 if (*((unsigned char *)statusp) 24610 == STATUS_RESERVATION_CONFLICT) { 24611 /* 24612 * Handle a reservation conflict by panicking if 24613 * configured for failfast or by logging the conflict 24614 * and updating the reservation status 24615 */ 24616 mutex_enter(SD_MUTEX(un)); 24617 if ((un->un_resvd_status & SD_FAILFAST) && 24618 (sd_failfast_enable)) { 24619 sd_panic_for_res_conflict(un); 24620 /*NOTREACHED*/ 24621 } 24622 SD_INFO(SD_LOG_IOCTL_MHD, un, 24623 "sd_mhd_watch_cb: Reservation Conflict\n"); 24624 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 24625 mutex_exit(SD_MUTEX(un)); 24626 } 24627 } 24628 24629 if (sensep != NULL) { 24630 if (actual_sense_length >= (SENSE_LENGTH - 2)) { 24631 mutex_enter(SD_MUTEX(un)); 24632 if ((scsi_sense_asc(sensep) == 24633 SD_SCSI_RESET_SENSE_CODE) && 24634 (un->un_resvd_status & SD_RESERVE)) { 24635 /* 24636 * The additional sense code indicates a power 24637 * on or bus device reset has occurred; update 24638 * the reservation status. 24639 */ 24640 un->un_resvd_status |= 24641 (SD_LOST_RESERVE | SD_WANT_RESERVE); 24642 SD_INFO(SD_LOG_IOCTL_MHD, un, 24643 "sd_mhd_watch_cb: Lost Reservation\n"); 24644 } 24645 } else { 24646 return (0); 24647 } 24648 } else { 24649 mutex_enter(SD_MUTEX(un)); 24650 } 24651 24652 if ((un->un_resvd_status & SD_RESERVE) && 24653 (un->un_resvd_status & SD_LOST_RESERVE)) { 24654 if (un->un_resvd_status & SD_WANT_RESERVE) { 24655 /* 24656 * A reset occurred in between the last probe and this 24657 * one so if a timeout is pending cancel it. 24658 */ 24659 if (un->un_resvd_timeid) { 24660 timeout_id_t temp_id = un->un_resvd_timeid; 24661 un->un_resvd_timeid = NULL; 24662 mutex_exit(SD_MUTEX(un)); 24663 (void) untimeout(temp_id); 24664 mutex_enter(SD_MUTEX(un)); 24665 } 24666 un->un_resvd_status &= ~SD_WANT_RESERVE; 24667 } 24668 if (un->un_resvd_timeid == 0) { 24669 /* Schedule a timeout to handle the lost reservation */ 24670 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover, 24671 (void *)dev, 24672 drv_usectohz(sd_reinstate_resv_delay)); 24673 } 24674 } 24675 mutex_exit(SD_MUTEX(un)); 24676 return (0); 24677 } 24678 24679 24680 /* 24681 * Function: sd_mhd_watch_incomplete() 24682 * 24683 * Description: This function is used to find out why a scsi pkt sent by the 24684 * scsi watch facility was not completed. Under some scenarios this 24685 * routine will return. Otherwise it will send a bus reset to see 24686 * if the drive is still online. 24687 * 24688 * Arguments: un - driver soft state (unit) structure 24689 * pkt - incomplete scsi pkt 24690 */ 24691 24692 static void 24693 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt) 24694 { 24695 int be_chatty; 24696 int perr; 24697 24698 ASSERT(pkt != NULL); 24699 ASSERT(un != NULL); 24700 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT)); 24701 perr = (pkt->pkt_statistics & STAT_PERR); 24702 24703 mutex_enter(SD_MUTEX(un)); 24704 if (un->un_state == SD_STATE_DUMPING) { 24705 mutex_exit(SD_MUTEX(un)); 24706 return; 24707 } 24708 24709 switch (pkt->pkt_reason) { 24710 case CMD_UNX_BUS_FREE: 24711 /* 24712 * If we had a parity error that caused the target to drop BSY*, 24713 * don't be chatty about it. 24714 */ 24715 if (perr && be_chatty) { 24716 be_chatty = 0; 24717 } 24718 break; 24719 case CMD_TAG_REJECT: 24720 /* 24721 * The SCSI-2 spec states that a tag reject will be sent by the 24722 * target if tagged queuing is not supported. A tag reject may 24723 * also be sent during certain initialization periods or to 24724 * control internal resources. For the latter case the target 24725 * may also return Queue Full. 24726 * 24727 * If this driver receives a tag reject from a target that is 24728 * going through an init period or controlling internal 24729 * resources tagged queuing will be disabled. This is a less 24730 * than optimal behavior but the driver is unable to determine 24731 * the target state and assumes tagged queueing is not supported 24732 */ 24733 pkt->pkt_flags = 0; 24734 un->un_tagflags = 0; 24735 24736 if (un->un_f_opt_queueing == TRUE) { 24737 un->un_throttle = min(un->un_throttle, 3); 24738 } else { 24739 un->un_throttle = 1; 24740 } 24741 mutex_exit(SD_MUTEX(un)); 24742 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 24743 mutex_enter(SD_MUTEX(un)); 24744 break; 24745 case CMD_INCOMPLETE: 24746 /* 24747 * The transport stopped with an abnormal state, fallthrough and 24748 * reset the target and/or bus unless selection did not complete 24749 * (indicated by STATE_GOT_BUS) in which case we don't want to 24750 * go through a target/bus reset 24751 */ 24752 if (pkt->pkt_state == STATE_GOT_BUS) { 24753 break; 24754 } 24755 /*FALLTHROUGH*/ 24756 24757 case CMD_TIMEOUT: 24758 default: 24759 /* 24760 * The lun may still be running the command, so a lun reset 24761 * should be attempted. If the lun reset fails or cannot be 24762 * issued, than try a target reset. Lastly try a bus reset. 24763 */ 24764 if ((pkt->pkt_statistics & 24765 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 24766 int reset_retval = 0; 24767 mutex_exit(SD_MUTEX(un)); 24768 if (un->un_f_allow_bus_device_reset == TRUE) { 24769 if (un->un_f_lun_reset_enabled == TRUE) { 24770 reset_retval = 24771 scsi_reset(SD_ADDRESS(un), 24772 RESET_LUN); 24773 } 24774 if (reset_retval == 0) { 24775 reset_retval = 24776 scsi_reset(SD_ADDRESS(un), 24777 RESET_TARGET); 24778 } 24779 } 24780 if (reset_retval == 0) { 24781 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 24782 } 24783 mutex_enter(SD_MUTEX(un)); 24784 } 24785 break; 24786 } 24787 24788 /* A device/bus reset has occurred; update the reservation status. */ 24789 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics & 24790 (STAT_BUS_RESET | STAT_DEV_RESET))) { 24791 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 24792 un->un_resvd_status |= 24793 (SD_LOST_RESERVE | SD_WANT_RESERVE); 24794 SD_INFO(SD_LOG_IOCTL_MHD, un, 24795 "sd_mhd_watch_incomplete: Lost Reservation\n"); 24796 } 24797 } 24798 24799 /* 24800 * The disk has been turned off; Update the device state. 24801 * 24802 * Note: Should we be offlining the disk here? 24803 */ 24804 if (pkt->pkt_state == STATE_GOT_BUS) { 24805 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: " 24806 "Disk not responding to selection\n"); 24807 if (un->un_state != SD_STATE_OFFLINE) { 24808 New_state(un, SD_STATE_OFFLINE); 24809 } 24810 } else if (be_chatty) { 24811 /* 24812 * suppress messages if they are all the same pkt reason; 24813 * with TQ, many (up to 256) are returned with the same 24814 * pkt_reason 24815 */ 24816 if (pkt->pkt_reason != un->un_last_pkt_reason) { 24817 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24818 "sd_mhd_watch_incomplete: " 24819 "SCSI transport failed: reason '%s'\n", 24820 scsi_rname(pkt->pkt_reason)); 24821 } 24822 } 24823 un->un_last_pkt_reason = pkt->pkt_reason; 24824 mutex_exit(SD_MUTEX(un)); 24825 } 24826 24827 24828 /* 24829 * Function: sd_sname() 24830 * 24831 * Description: This is a simple little routine to return a string containing 24832 * a printable description of command status byte for use in 24833 * logging. 24834 * 24835 * Arguments: status - pointer to a status byte 24836 * 24837 * Return Code: char * - string containing status description. 24838 */ 24839 24840 static char * 24841 sd_sname(uchar_t status) 24842 { 24843 switch (status & STATUS_MASK) { 24844 case STATUS_GOOD: 24845 return ("good status"); 24846 case STATUS_CHECK: 24847 return ("check condition"); 24848 case STATUS_MET: 24849 return ("condition met"); 24850 case STATUS_BUSY: 24851 return ("busy"); 24852 case STATUS_INTERMEDIATE: 24853 return ("intermediate"); 24854 case STATUS_INTERMEDIATE_MET: 24855 return ("intermediate - condition met"); 24856 case STATUS_RESERVATION_CONFLICT: 24857 return ("reservation_conflict"); 24858 case STATUS_TERMINATED: 24859 return ("command terminated"); 24860 case STATUS_QFULL: 24861 return ("queue full"); 24862 default: 24863 return ("<unknown status>"); 24864 } 24865 } 24866 24867 24868 /* 24869 * Function: sd_mhd_resvd_recover() 24870 * 24871 * Description: This function adds a reservation entry to the 24872 * sd_resv_reclaim_request list and signals the reservation 24873 * reclaim thread that there is work pending. If the reservation 24874 * reclaim thread has not been previously created this function 24875 * will kick it off. 24876 * 24877 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24878 * among multiple watches that share this callback function 24879 * 24880 * Context: This routine is called by timeout() and is run in interrupt 24881 * context. It must not sleep or call other functions which may 24882 * sleep. 24883 */ 24884 24885 static void 24886 sd_mhd_resvd_recover(void *arg) 24887 { 24888 dev_t dev = (dev_t)arg; 24889 struct sd_lun *un; 24890 struct sd_thr_request *sd_treq = NULL; 24891 struct sd_thr_request *sd_cur = NULL; 24892 struct sd_thr_request *sd_prev = NULL; 24893 int already_there = 0; 24894 24895 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24896 return; 24897 } 24898 24899 mutex_enter(SD_MUTEX(un)); 24900 un->un_resvd_timeid = NULL; 24901 if (un->un_resvd_status & SD_WANT_RESERVE) { 24902 /* 24903 * There was a reset so don't issue the reserve, allow the 24904 * sd_mhd_watch_cb callback function to notice this and 24905 * reschedule the timeout for reservation. 24906 */ 24907 mutex_exit(SD_MUTEX(un)); 24908 return; 24909 } 24910 mutex_exit(SD_MUTEX(un)); 24911 24912 /* 24913 * Add this device to the sd_resv_reclaim_request list and the 24914 * sd_resv_reclaim_thread should take care of the rest. 24915 * 24916 * Note: We can't sleep in this context so if the memory allocation 24917 * fails allow the sd_mhd_watch_cb callback function to notice this and 24918 * reschedule the timeout for reservation. (4378460) 24919 */ 24920 sd_treq = (struct sd_thr_request *) 24921 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP); 24922 if (sd_treq == NULL) { 24923 return; 24924 } 24925 24926 sd_treq->sd_thr_req_next = NULL; 24927 sd_treq->dev = dev; 24928 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24929 if (sd_tr.srq_thr_req_head == NULL) { 24930 sd_tr.srq_thr_req_head = sd_treq; 24931 } else { 24932 sd_cur = sd_prev = sd_tr.srq_thr_req_head; 24933 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) { 24934 if (sd_cur->dev == dev) { 24935 /* 24936 * already in Queue so don't log 24937 * another request for the device 24938 */ 24939 already_there = 1; 24940 break; 24941 } 24942 sd_prev = sd_cur; 24943 } 24944 if (!already_there) { 24945 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: " 24946 "logging request for %lx\n", dev); 24947 sd_prev->sd_thr_req_next = sd_treq; 24948 } else { 24949 kmem_free(sd_treq, sizeof (struct sd_thr_request)); 24950 } 24951 } 24952 24953 /* 24954 * Create a kernel thread to do the reservation reclaim and free up this 24955 * thread. We cannot block this thread while we go away to do the 24956 * reservation reclaim 24957 */ 24958 if (sd_tr.srq_resv_reclaim_thread == NULL) 24959 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0, 24960 sd_resv_reclaim_thread, NULL, 24961 0, &p0, TS_RUN, v.v_maxsyspri - 2); 24962 24963 /* Tell the reservation reclaim thread that it has work to do */ 24964 cv_signal(&sd_tr.srq_resv_reclaim_cv); 24965 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24966 } 24967 24968 /* 24969 * Function: sd_resv_reclaim_thread() 24970 * 24971 * Description: This function implements the reservation reclaim operations 24972 * 24973 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24974 * among multiple watches that share this callback function 24975 */ 24976 24977 static void 24978 sd_resv_reclaim_thread() 24979 { 24980 struct sd_lun *un; 24981 struct sd_thr_request *sd_mhreq; 24982 24983 /* Wait for work */ 24984 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24985 if (sd_tr.srq_thr_req_head == NULL) { 24986 cv_wait(&sd_tr.srq_resv_reclaim_cv, 24987 &sd_tr.srq_resv_reclaim_mutex); 24988 } 24989 24990 /* Loop while we have work */ 24991 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) { 24992 un = ddi_get_soft_state(sd_state, 24993 SDUNIT(sd_tr.srq_thr_cur_req->dev)); 24994 if (un == NULL) { 24995 /* 24996 * softstate structure is NULL so just 24997 * dequeue the request and continue 24998 */ 24999 sd_tr.srq_thr_req_head = 25000 sd_tr.srq_thr_cur_req->sd_thr_req_next; 25001 kmem_free(sd_tr.srq_thr_cur_req, 25002 sizeof (struct sd_thr_request)); 25003 continue; 25004 } 25005 25006 /* dequeue the request */ 25007 sd_mhreq = sd_tr.srq_thr_cur_req; 25008 sd_tr.srq_thr_req_head = 25009 sd_tr.srq_thr_cur_req->sd_thr_req_next; 25010 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25011 25012 /* 25013 * Reclaim reservation only if SD_RESERVE is still set. There 25014 * may have been a call to MHIOCRELEASE before we got here. 25015 */ 25016 mutex_enter(SD_MUTEX(un)); 25017 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 25018 /* 25019 * Note: The SD_LOST_RESERVE flag is cleared before 25020 * reclaiming the reservation. If this is done after the 25021 * call to sd_reserve_release a reservation loss in the 25022 * window between pkt completion of reserve cmd and 25023 * mutex_enter below may not be recognized 25024 */ 25025 un->un_resvd_status &= ~SD_LOST_RESERVE; 25026 mutex_exit(SD_MUTEX(un)); 25027 25028 if (sd_reserve_release(sd_mhreq->dev, 25029 SD_RESERVE) == 0) { 25030 mutex_enter(SD_MUTEX(un)); 25031 un->un_resvd_status |= SD_RESERVE; 25032 mutex_exit(SD_MUTEX(un)); 25033 SD_INFO(SD_LOG_IOCTL_MHD, un, 25034 "sd_resv_reclaim_thread: " 25035 "Reservation Recovered\n"); 25036 } else { 25037 mutex_enter(SD_MUTEX(un)); 25038 un->un_resvd_status |= SD_LOST_RESERVE; 25039 mutex_exit(SD_MUTEX(un)); 25040 SD_INFO(SD_LOG_IOCTL_MHD, un, 25041 "sd_resv_reclaim_thread: Failed " 25042 "Reservation Recovery\n"); 25043 } 25044 } else { 25045 mutex_exit(SD_MUTEX(un)); 25046 } 25047 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25048 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req); 25049 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 25050 sd_mhreq = sd_tr.srq_thr_cur_req = NULL; 25051 /* 25052 * wakeup the destroy thread if anyone is waiting on 25053 * us to complete. 25054 */ 25055 cv_signal(&sd_tr.srq_inprocess_cv); 25056 SD_TRACE(SD_LOG_IOCTL_MHD, un, 25057 "sd_resv_reclaim_thread: cv_signalling current request \n"); 25058 } 25059 25060 /* 25061 * cleanup the sd_tr structure now that this thread will not exist 25062 */ 25063 ASSERT(sd_tr.srq_thr_req_head == NULL); 25064 ASSERT(sd_tr.srq_thr_cur_req == NULL); 25065 sd_tr.srq_resv_reclaim_thread = NULL; 25066 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25067 thread_exit(); 25068 } 25069 25070 25071 /* 25072 * Function: sd_rmv_resv_reclaim_req() 25073 * 25074 * Description: This function removes any pending reservation reclaim requests 25075 * for the specified device. 25076 * 25077 * Arguments: dev - the device 'dev_t' 25078 */ 25079 25080 static void 25081 sd_rmv_resv_reclaim_req(dev_t dev) 25082 { 25083 struct sd_thr_request *sd_mhreq; 25084 struct sd_thr_request *sd_prev; 25085 25086 /* Remove a reservation reclaim request from the list */ 25087 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25088 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) { 25089 /* 25090 * We are attempting to reinstate reservation for 25091 * this device. We wait for sd_reserve_release() 25092 * to return before we return. 25093 */ 25094 cv_wait(&sd_tr.srq_inprocess_cv, 25095 &sd_tr.srq_resv_reclaim_mutex); 25096 } else { 25097 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head; 25098 if (sd_mhreq && sd_mhreq->dev == dev) { 25099 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next; 25100 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 25101 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25102 return; 25103 } 25104 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) { 25105 if (sd_mhreq && sd_mhreq->dev == dev) { 25106 break; 25107 } 25108 sd_prev = sd_mhreq; 25109 } 25110 if (sd_mhreq != NULL) { 25111 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next; 25112 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 25113 } 25114 } 25115 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25116 } 25117 25118 25119 /* 25120 * Function: sd_mhd_reset_notify_cb() 25121 * 25122 * Description: This is a call back function for scsi_reset_notify. This 25123 * function updates the softstate reserved status and logs the 25124 * reset. The driver scsi watch facility callback function 25125 * (sd_mhd_watch_cb) and reservation reclaim thread functionality 25126 * will reclaim the reservation. 25127 * 25128 * Arguments: arg - driver soft state (unit) structure 25129 */ 25130 25131 static void 25132 sd_mhd_reset_notify_cb(caddr_t arg) 25133 { 25134 struct sd_lun *un = (struct sd_lun *)arg; 25135 25136 mutex_enter(SD_MUTEX(un)); 25137 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 25138 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE); 25139 SD_INFO(SD_LOG_IOCTL_MHD, un, 25140 "sd_mhd_reset_notify_cb: Lost Reservation\n"); 25141 } 25142 mutex_exit(SD_MUTEX(un)); 25143 } 25144 25145 25146 /* 25147 * Function: sd_take_ownership() 25148 * 25149 * Description: This routine implements an algorithm to achieve a stable 25150 * reservation on disks which don't implement priority reserve, 25151 * and makes sure that other host lose re-reservation attempts. 25152 * This algorithm contains of a loop that keeps issuing the RESERVE 25153 * for some period of time (min_ownership_delay, default 6 seconds) 25154 * During that loop, it looks to see if there has been a bus device 25155 * reset or bus reset (both of which cause an existing reservation 25156 * to be lost). If the reservation is lost issue RESERVE until a 25157 * period of min_ownership_delay with no resets has gone by, or 25158 * until max_ownership_delay has expired. This loop ensures that 25159 * the host really did manage to reserve the device, in spite of 25160 * resets. The looping for min_ownership_delay (default six 25161 * seconds) is important to early generation clustering products, 25162 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an 25163 * MHIOCENFAILFAST periodic timer of two seconds. By having 25164 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having 25165 * MHIOCENFAILFAST poll every two seconds, the idea is that by the 25166 * time the MHIOCTKOWN ioctl returns, the other host (if any) will 25167 * have already noticed, via the MHIOCENFAILFAST polling, that it 25168 * no longer "owns" the disk and will have panicked itself. Thus, 25169 * the host issuing the MHIOCTKOWN is assured (with timing 25170 * dependencies) that by the time it actually starts to use the 25171 * disk for real work, the old owner is no longer accessing it. 25172 * 25173 * min_ownership_delay is the minimum amount of time for which the 25174 * disk must be reserved continuously devoid of resets before the 25175 * MHIOCTKOWN ioctl will return success. 25176 * 25177 * max_ownership_delay indicates the amount of time by which the 25178 * take ownership should succeed or timeout with an error. 25179 * 25180 * Arguments: dev - the device 'dev_t' 25181 * *p - struct containing timing info. 25182 * 25183 * Return Code: 0 for success or error code 25184 */ 25185 25186 static int 25187 sd_take_ownership(dev_t dev, struct mhioctkown *p) 25188 { 25189 struct sd_lun *un; 25190 int rval; 25191 int err; 25192 int reservation_count = 0; 25193 int min_ownership_delay = 6000000; /* in usec */ 25194 int max_ownership_delay = 30000000; /* in usec */ 25195 clock_t start_time; /* starting time of this algorithm */ 25196 clock_t end_time; /* time limit for giving up */ 25197 clock_t ownership_time; /* time limit for stable ownership */ 25198 clock_t current_time; 25199 clock_t previous_current_time; 25200 25201 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25202 return (ENXIO); 25203 } 25204 25205 /* 25206 * Attempt a device reservation. A priority reservation is requested. 25207 */ 25208 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE)) 25209 != SD_SUCCESS) { 25210 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25211 "sd_take_ownership: return(1)=%d\n", rval); 25212 return (rval); 25213 } 25214 25215 /* Update the softstate reserved status to indicate the reservation */ 25216 mutex_enter(SD_MUTEX(un)); 25217 un->un_resvd_status |= SD_RESERVE; 25218 un->un_resvd_status &= 25219 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT); 25220 mutex_exit(SD_MUTEX(un)); 25221 25222 if (p != NULL) { 25223 if (p->min_ownership_delay != 0) { 25224 min_ownership_delay = p->min_ownership_delay * 1000; 25225 } 25226 if (p->max_ownership_delay != 0) { 25227 max_ownership_delay = p->max_ownership_delay * 1000; 25228 } 25229 } 25230 SD_INFO(SD_LOG_IOCTL_MHD, un, 25231 "sd_take_ownership: min, max delays: %d, %d\n", 25232 min_ownership_delay, max_ownership_delay); 25233 25234 start_time = ddi_get_lbolt(); 25235 current_time = start_time; 25236 ownership_time = current_time + drv_usectohz(min_ownership_delay); 25237 end_time = start_time + drv_usectohz(max_ownership_delay); 25238 25239 while (current_time - end_time < 0) { 25240 delay(drv_usectohz(500000)); 25241 25242 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) { 25243 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) { 25244 mutex_enter(SD_MUTEX(un)); 25245 rval = (un->un_resvd_status & 25246 SD_RESERVATION_CONFLICT) ? EACCES : EIO; 25247 mutex_exit(SD_MUTEX(un)); 25248 break; 25249 } 25250 } 25251 previous_current_time = current_time; 25252 current_time = ddi_get_lbolt(); 25253 mutex_enter(SD_MUTEX(un)); 25254 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) { 25255 ownership_time = ddi_get_lbolt() + 25256 drv_usectohz(min_ownership_delay); 25257 reservation_count = 0; 25258 } else { 25259 reservation_count++; 25260 } 25261 un->un_resvd_status |= SD_RESERVE; 25262 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE); 25263 mutex_exit(SD_MUTEX(un)); 25264 25265 SD_INFO(SD_LOG_IOCTL_MHD, un, 25266 "sd_take_ownership: ticks for loop iteration=%ld, " 25267 "reservation=%s\n", (current_time - previous_current_time), 25268 reservation_count ? "ok" : "reclaimed"); 25269 25270 if (current_time - ownership_time >= 0 && 25271 reservation_count >= 4) { 25272 rval = 0; /* Achieved a stable ownership */ 25273 break; 25274 } 25275 if (current_time - end_time >= 0) { 25276 rval = EACCES; /* No ownership in max possible time */ 25277 break; 25278 } 25279 } 25280 SD_TRACE(SD_LOG_IOCTL_MHD, un, 25281 "sd_take_ownership: return(2)=%d\n", rval); 25282 return (rval); 25283 } 25284 25285 25286 /* 25287 * Function: sd_reserve_release() 25288 * 25289 * Description: This function builds and sends scsi RESERVE, RELEASE, and 25290 * PRIORITY RESERVE commands based on a user specified command type 25291 * 25292 * Arguments: dev - the device 'dev_t' 25293 * cmd - user specified command type; one of SD_PRIORITY_RESERVE, 25294 * SD_RESERVE, SD_RELEASE 25295 * 25296 * Return Code: 0 or Error Code 25297 */ 25298 25299 static int 25300 sd_reserve_release(dev_t dev, int cmd) 25301 { 25302 struct uscsi_cmd *com = NULL; 25303 struct sd_lun *un = NULL; 25304 char cdb[CDB_GROUP0]; 25305 int rval; 25306 25307 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) || 25308 (cmd == SD_PRIORITY_RESERVE)); 25309 25310 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25311 return (ENXIO); 25312 } 25313 25314 /* instantiate and initialize the command and cdb */ 25315 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25316 bzero(cdb, CDB_GROUP0); 25317 com->uscsi_flags = USCSI_SILENT; 25318 com->uscsi_timeout = un->un_reserve_release_time; 25319 com->uscsi_cdblen = CDB_GROUP0; 25320 com->uscsi_cdb = cdb; 25321 if (cmd == SD_RELEASE) { 25322 cdb[0] = SCMD_RELEASE; 25323 } else { 25324 cdb[0] = SCMD_RESERVE; 25325 } 25326 25327 /* Send the command. */ 25328 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25329 SD_PATH_STANDARD); 25330 25331 /* 25332 * "break" a reservation that is held by another host, by issuing a 25333 * reset if priority reserve is desired, and we could not get the 25334 * device. 25335 */ 25336 if ((cmd == SD_PRIORITY_RESERVE) && 25337 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 25338 /* 25339 * First try to reset the LUN. If we cannot, then try a target 25340 * reset, followed by a bus reset if the target reset fails. 25341 */ 25342 int reset_retval = 0; 25343 if (un->un_f_lun_reset_enabled == TRUE) { 25344 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 25345 } 25346 if (reset_retval == 0) { 25347 /* The LUN reset either failed or was not issued */ 25348 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 25349 } 25350 if ((reset_retval == 0) && 25351 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) { 25352 rval = EIO; 25353 kmem_free(com, sizeof (*com)); 25354 return (rval); 25355 } 25356 25357 bzero(com, sizeof (struct uscsi_cmd)); 25358 com->uscsi_flags = USCSI_SILENT; 25359 com->uscsi_cdb = cdb; 25360 com->uscsi_cdblen = CDB_GROUP0; 25361 com->uscsi_timeout = 5; 25362 25363 /* 25364 * Reissue the last reserve command, this time without request 25365 * sense. Assume that it is just a regular reserve command. 25366 */ 25367 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25368 SD_PATH_STANDARD); 25369 } 25370 25371 /* Return an error if still getting a reservation conflict. */ 25372 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 25373 rval = EACCES; 25374 } 25375 25376 kmem_free(com, sizeof (*com)); 25377 return (rval); 25378 } 25379 25380 25381 #define SD_NDUMP_RETRIES 12 25382 /* 25383 * System Crash Dump routine 25384 */ 25385 25386 static int 25387 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 25388 { 25389 int instance; 25390 int partition; 25391 int i; 25392 int err; 25393 struct sd_lun *un; 25394 struct scsi_pkt *wr_pktp; 25395 struct buf *wr_bp; 25396 struct buf wr_buf; 25397 daddr_t tgt_byte_offset; /* rmw - byte offset for target */ 25398 daddr_t tgt_blkno; /* rmw - blkno for target */ 25399 size_t tgt_byte_count; /* rmw - # of bytes to xfer */ 25400 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */ 25401 size_t io_start_offset; 25402 int doing_rmw = FALSE; 25403 int rval; 25404 ssize_t dma_resid; 25405 daddr_t oblkno; 25406 diskaddr_t nblks = 0; 25407 diskaddr_t start_block; 25408 25409 instance = SDUNIT(dev); 25410 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 25411 !SD_IS_VALID_LABEL(un) || ISCD(un)) { 25412 return (ENXIO); 25413 } 25414 25415 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 25416 25417 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); 25418 25419 partition = SDPART(dev); 25420 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition); 25421 25422 if (!(NOT_DEVBSIZE(un))) { 25423 int secmask = 0; 25424 int blknomask = 0; 25425 25426 blknomask = (un->un_tgt_blocksize / DEV_BSIZE) - 1; 25427 secmask = un->un_tgt_blocksize - 1; 25428 25429 if (blkno & blknomask) { 25430 SD_TRACE(SD_LOG_DUMP, un, 25431 "sddump: dump start block not modulo %d\n", 25432 un->un_tgt_blocksize); 25433 return (EINVAL); 25434 } 25435 25436 if ((nblk * DEV_BSIZE) & secmask) { 25437 SD_TRACE(SD_LOG_DUMP, un, 25438 "sddump: dump length not modulo %d\n", 25439 un->un_tgt_blocksize); 25440 return (EINVAL); 25441 } 25442 25443 } 25444 25445 /* Validate blocks to dump at against partition size. */ 25446 25447 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 25448 &nblks, &start_block, NULL, NULL, (void *)SD_PATH_DIRECT); 25449 25450 if (NOT_DEVBSIZE(un)) { 25451 if ((blkno + nblk) > nblks) { 25452 SD_TRACE(SD_LOG_DUMP, un, 25453 "sddump: dump range larger than partition: " 25454 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 25455 blkno, nblk, nblks); 25456 return (EINVAL); 25457 } 25458 } else { 25459 if (((blkno / (un->un_tgt_blocksize / DEV_BSIZE)) + 25460 (nblk / (un->un_tgt_blocksize / DEV_BSIZE))) > nblks) { 25461 SD_TRACE(SD_LOG_DUMP, un, 25462 "sddump: dump range larger than partition: " 25463 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 25464 blkno, nblk, nblks); 25465 return (EINVAL); 25466 } 25467 } 25468 25469 mutex_enter(&un->un_pm_mutex); 25470 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 25471 struct scsi_pkt *start_pktp; 25472 25473 mutex_exit(&un->un_pm_mutex); 25474 25475 /* 25476 * use pm framework to power on HBA 1st 25477 */ 25478 (void) pm_raise_power(SD_DEVINFO(un), 0, 25479 SD_PM_STATE_ACTIVE(un)); 25480 25481 /* 25482 * Dump no long uses sdpower to power on a device, it's 25483 * in-line here so it can be done in polled mode. 25484 */ 25485 25486 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n"); 25487 25488 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL, 25489 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL); 25490 25491 if (start_pktp == NULL) { 25492 /* We were not given a SCSI packet, fail. */ 25493 return (EIO); 25494 } 25495 bzero(start_pktp->pkt_cdbp, CDB_GROUP0); 25496 start_pktp->pkt_cdbp[0] = SCMD_START_STOP; 25497 start_pktp->pkt_cdbp[4] = SD_TARGET_START; 25498 start_pktp->pkt_flags = FLAG_NOINTR; 25499 25500 mutex_enter(SD_MUTEX(un)); 25501 SD_FILL_SCSI1_LUN(un, start_pktp); 25502 mutex_exit(SD_MUTEX(un)); 25503 /* 25504 * Scsi_poll returns 0 (success) if the command completes and 25505 * the status block is STATUS_GOOD. 25506 */ 25507 if (sd_scsi_poll(un, start_pktp) != 0) { 25508 scsi_destroy_pkt(start_pktp); 25509 return (EIO); 25510 } 25511 scsi_destroy_pkt(start_pktp); 25512 (void) sd_pm_state_change(un, SD_PM_STATE_ACTIVE(un), 25513 SD_PM_STATE_CHANGE); 25514 } else { 25515 mutex_exit(&un->un_pm_mutex); 25516 } 25517 25518 mutex_enter(SD_MUTEX(un)); 25519 un->un_throttle = 0; 25520 25521 /* 25522 * The first time through, reset the specific target device. 25523 * However, when cpr calls sddump we know that sd is in a 25524 * a good state so no bus reset is required. 25525 * Clear sense data via Request Sense cmd. 25526 * In sddump we don't care about allow_bus_device_reset anymore 25527 */ 25528 25529 if ((un->un_state != SD_STATE_SUSPENDED) && 25530 (un->un_state != SD_STATE_DUMPING)) { 25531 25532 New_state(un, SD_STATE_DUMPING); 25533 25534 if (un->un_f_is_fibre == FALSE) { 25535 mutex_exit(SD_MUTEX(un)); 25536 /* 25537 * Attempt a bus reset for parallel scsi. 25538 * 25539 * Note: A bus reset is required because on some host 25540 * systems (i.e. E420R) a bus device reset is 25541 * insufficient to reset the state of the target. 25542 * 25543 * Note: Don't issue the reset for fibre-channel, 25544 * because this tends to hang the bus (loop) for 25545 * too long while everyone is logging out and in 25546 * and the deadman timer for dumping will fire 25547 * before the dump is complete. 25548 */ 25549 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) { 25550 mutex_enter(SD_MUTEX(un)); 25551 Restore_state(un); 25552 mutex_exit(SD_MUTEX(un)); 25553 return (EIO); 25554 } 25555 25556 /* Delay to give the device some recovery time. */ 25557 drv_usecwait(10000); 25558 25559 if (sd_send_polled_RQS(un) == SD_FAILURE) { 25560 SD_INFO(SD_LOG_DUMP, un, 25561 "sddump: sd_send_polled_RQS failed\n"); 25562 } 25563 mutex_enter(SD_MUTEX(un)); 25564 } 25565 } 25566 25567 /* 25568 * Convert the partition-relative block number to a 25569 * disk physical block number. 25570 */ 25571 if (NOT_DEVBSIZE(un)) { 25572 blkno += start_block; 25573 } else { 25574 blkno = blkno / (un->un_tgt_blocksize / DEV_BSIZE); 25575 blkno += start_block; 25576 } 25577 25578 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno); 25579 25580 25581 /* 25582 * Check if the device has a non-512 block size. 25583 */ 25584 wr_bp = NULL; 25585 if (NOT_DEVBSIZE(un)) { 25586 tgt_byte_offset = blkno * un->un_sys_blocksize; 25587 tgt_byte_count = nblk * un->un_sys_blocksize; 25588 if ((tgt_byte_offset % un->un_tgt_blocksize) || 25589 (tgt_byte_count % un->un_tgt_blocksize)) { 25590 doing_rmw = TRUE; 25591 /* 25592 * Calculate the block number and number of block 25593 * in terms of the media block size. 25594 */ 25595 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 25596 tgt_nblk = 25597 ((tgt_byte_offset + tgt_byte_count + 25598 (un->un_tgt_blocksize - 1)) / 25599 un->un_tgt_blocksize) - tgt_blkno; 25600 25601 /* 25602 * Invoke the routine which is going to do read part 25603 * of read-modify-write. 25604 * Note that this routine returns a pointer to 25605 * a valid bp in wr_bp. 25606 */ 25607 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk, 25608 &wr_bp); 25609 if (err) { 25610 mutex_exit(SD_MUTEX(un)); 25611 return (err); 25612 } 25613 /* 25614 * Offset is being calculated as - 25615 * (original block # * system block size) - 25616 * (new block # * target block size) 25617 */ 25618 io_start_offset = 25619 ((uint64_t)(blkno * un->un_sys_blocksize)) - 25620 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize)); 25621 25622 ASSERT((io_start_offset >= 0) && 25623 (io_start_offset < un->un_tgt_blocksize)); 25624 /* 25625 * Do the modify portion of read modify write. 25626 */ 25627 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset], 25628 (size_t)nblk * un->un_sys_blocksize); 25629 } else { 25630 doing_rmw = FALSE; 25631 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 25632 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize; 25633 } 25634 25635 /* Convert blkno and nblk to target blocks */ 25636 blkno = tgt_blkno; 25637 nblk = tgt_nblk; 25638 } else { 25639 wr_bp = &wr_buf; 25640 bzero(wr_bp, sizeof (struct buf)); 25641 wr_bp->b_flags = B_BUSY; 25642 wr_bp->b_un.b_addr = addr; 25643 wr_bp->b_bcount = nblk << DEV_BSHIFT; 25644 wr_bp->b_resid = 0; 25645 } 25646 25647 mutex_exit(SD_MUTEX(un)); 25648 25649 /* 25650 * Obtain a SCSI packet for the write command. 25651 * It should be safe to call the allocator here without 25652 * worrying about being locked for DVMA mapping because 25653 * the address we're passed is already a DVMA mapping 25654 * 25655 * We are also not going to worry about semaphore ownership 25656 * in the dump buffer. Dumping is single threaded at present. 25657 */ 25658 25659 wr_pktp = NULL; 25660 25661 dma_resid = wr_bp->b_bcount; 25662 oblkno = blkno; 25663 25664 if (!(NOT_DEVBSIZE(un))) { 25665 nblk = nblk / (un->un_tgt_blocksize / DEV_BSIZE); 25666 } 25667 25668 while (dma_resid != 0) { 25669 25670 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 25671 wr_bp->b_flags &= ~B_ERROR; 25672 25673 if (un->un_partial_dma_supported == 1) { 25674 blkno = oblkno + 25675 ((wr_bp->b_bcount - dma_resid) / 25676 un->un_tgt_blocksize); 25677 nblk = dma_resid / un->un_tgt_blocksize; 25678 25679 if (wr_pktp) { 25680 /* 25681 * Partial DMA transfers after initial transfer 25682 */ 25683 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp, 25684 blkno, nblk); 25685 } else { 25686 /* Initial transfer */ 25687 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 25688 un->un_pkt_flags, NULL_FUNC, NULL, 25689 blkno, nblk); 25690 } 25691 } else { 25692 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 25693 0, NULL_FUNC, NULL, blkno, nblk); 25694 } 25695 25696 if (rval == 0) { 25697 /* We were given a SCSI packet, continue. */ 25698 break; 25699 } 25700 25701 if (i == 0) { 25702 if (wr_bp->b_flags & B_ERROR) { 25703 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25704 "no resources for dumping; " 25705 "error code: 0x%x, retrying", 25706 geterror(wr_bp)); 25707 } else { 25708 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25709 "no resources for dumping; retrying"); 25710 } 25711 } else if (i != (SD_NDUMP_RETRIES - 1)) { 25712 if (wr_bp->b_flags & B_ERROR) { 25713 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 25714 "no resources for dumping; error code: " 25715 "0x%x, retrying\n", geterror(wr_bp)); 25716 } 25717 } else { 25718 if (wr_bp->b_flags & B_ERROR) { 25719 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 25720 "no resources for dumping; " 25721 "error code: 0x%x, retries failed, " 25722 "giving up.\n", geterror(wr_bp)); 25723 } else { 25724 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 25725 "no resources for dumping; " 25726 "retries failed, giving up.\n"); 25727 } 25728 mutex_enter(SD_MUTEX(un)); 25729 Restore_state(un); 25730 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) { 25731 mutex_exit(SD_MUTEX(un)); 25732 scsi_free_consistent_buf(wr_bp); 25733 } else { 25734 mutex_exit(SD_MUTEX(un)); 25735 } 25736 return (EIO); 25737 } 25738 drv_usecwait(10000); 25739 } 25740 25741 if (un->un_partial_dma_supported == 1) { 25742 /* 25743 * save the resid from PARTIAL_DMA 25744 */ 25745 dma_resid = wr_pktp->pkt_resid; 25746 if (dma_resid != 0) 25747 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid); 25748 wr_pktp->pkt_resid = 0; 25749 } else { 25750 dma_resid = 0; 25751 } 25752 25753 /* SunBug 1222170 */ 25754 wr_pktp->pkt_flags = FLAG_NOINTR; 25755 25756 err = EIO; 25757 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 25758 25759 /* 25760 * Scsi_poll returns 0 (success) if the command completes and 25761 * the status block is STATUS_GOOD. We should only check 25762 * errors if this condition is not true. Even then we should 25763 * send our own request sense packet only if we have a check 25764 * condition and auto request sense has not been performed by 25765 * the hba. 25766 */ 25767 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n"); 25768 25769 if ((sd_scsi_poll(un, wr_pktp) == 0) && 25770 (wr_pktp->pkt_resid == 0)) { 25771 err = SD_SUCCESS; 25772 break; 25773 } 25774 25775 /* 25776 * Check CMD_DEV_GONE 1st, give up if device is gone. 25777 */ 25778 if (wr_pktp->pkt_reason == CMD_DEV_GONE) { 25779 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25780 "Error while dumping state...Device is gone\n"); 25781 break; 25782 } 25783 25784 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) { 25785 SD_INFO(SD_LOG_DUMP, un, 25786 "sddump: write failed with CHECK, try # %d\n", i); 25787 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) { 25788 (void) sd_send_polled_RQS(un); 25789 } 25790 25791 continue; 25792 } 25793 25794 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) { 25795 int reset_retval = 0; 25796 25797 SD_INFO(SD_LOG_DUMP, un, 25798 "sddump: write failed with BUSY, try # %d\n", i); 25799 25800 if (un->un_f_lun_reset_enabled == TRUE) { 25801 reset_retval = scsi_reset(SD_ADDRESS(un), 25802 RESET_LUN); 25803 } 25804 if (reset_retval == 0) { 25805 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 25806 } 25807 (void) sd_send_polled_RQS(un); 25808 25809 } else { 25810 SD_INFO(SD_LOG_DUMP, un, 25811 "sddump: write failed with 0x%x, try # %d\n", 25812 SD_GET_PKT_STATUS(wr_pktp), i); 25813 mutex_enter(SD_MUTEX(un)); 25814 sd_reset_target(un, wr_pktp); 25815 mutex_exit(SD_MUTEX(un)); 25816 } 25817 25818 /* 25819 * If we are not getting anywhere with lun/target resets, 25820 * let's reset the bus. 25821 */ 25822 if (i == SD_NDUMP_RETRIES/2) { 25823 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 25824 (void) sd_send_polled_RQS(un); 25825 } 25826 } 25827 } 25828 25829 scsi_destroy_pkt(wr_pktp); 25830 mutex_enter(SD_MUTEX(un)); 25831 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) { 25832 mutex_exit(SD_MUTEX(un)); 25833 scsi_free_consistent_buf(wr_bp); 25834 } else { 25835 mutex_exit(SD_MUTEX(un)); 25836 } 25837 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err); 25838 return (err); 25839 } 25840 25841 /* 25842 * Function: sd_scsi_poll() 25843 * 25844 * Description: This is a wrapper for the scsi_poll call. 25845 * 25846 * Arguments: sd_lun - The unit structure 25847 * scsi_pkt - The scsi packet being sent to the device. 25848 * 25849 * Return Code: 0 - Command completed successfully with good status 25850 * -1 - Command failed. This could indicate a check condition 25851 * or other status value requiring recovery action. 25852 * 25853 * NOTE: This code is only called off sddump(). 25854 */ 25855 25856 static int 25857 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp) 25858 { 25859 int status; 25860 25861 ASSERT(un != NULL); 25862 ASSERT(!mutex_owned(SD_MUTEX(un))); 25863 ASSERT(pktp != NULL); 25864 25865 status = SD_SUCCESS; 25866 25867 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) { 25868 pktp->pkt_flags |= un->un_tagflags; 25869 pktp->pkt_flags &= ~FLAG_NODISCON; 25870 } 25871 25872 status = sd_ddi_scsi_poll(pktp); 25873 /* 25874 * Scsi_poll returns 0 (success) if the command completes and the 25875 * status block is STATUS_GOOD. We should only check errors if this 25876 * condition is not true. Even then we should send our own request 25877 * sense packet only if we have a check condition and auto 25878 * request sense has not been performed by the hba. 25879 * Don't get RQS data if pkt_reason is CMD_DEV_GONE. 25880 */ 25881 if ((status != SD_SUCCESS) && 25882 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) && 25883 (pktp->pkt_state & STATE_ARQ_DONE) == 0 && 25884 (pktp->pkt_reason != CMD_DEV_GONE)) 25885 (void) sd_send_polled_RQS(un); 25886 25887 return (status); 25888 } 25889 25890 /* 25891 * Function: sd_send_polled_RQS() 25892 * 25893 * Description: This sends the request sense command to a device. 25894 * 25895 * Arguments: sd_lun - The unit structure 25896 * 25897 * Return Code: 0 - Command completed successfully with good status 25898 * -1 - Command failed. 25899 * 25900 */ 25901 25902 static int 25903 sd_send_polled_RQS(struct sd_lun *un) 25904 { 25905 int ret_val; 25906 struct scsi_pkt *rqs_pktp; 25907 struct buf *rqs_bp; 25908 25909 ASSERT(un != NULL); 25910 ASSERT(!mutex_owned(SD_MUTEX(un))); 25911 25912 ret_val = SD_SUCCESS; 25913 25914 rqs_pktp = un->un_rqs_pktp; 25915 rqs_bp = un->un_rqs_bp; 25916 25917 mutex_enter(SD_MUTEX(un)); 25918 25919 if (un->un_sense_isbusy) { 25920 ret_val = SD_FAILURE; 25921 mutex_exit(SD_MUTEX(un)); 25922 return (ret_val); 25923 } 25924 25925 /* 25926 * If the request sense buffer (and packet) is not in use, 25927 * let's set the un_sense_isbusy and send our packet 25928 */ 25929 un->un_sense_isbusy = 1; 25930 rqs_pktp->pkt_resid = 0; 25931 rqs_pktp->pkt_reason = 0; 25932 rqs_pktp->pkt_flags |= FLAG_NOINTR; 25933 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH); 25934 25935 mutex_exit(SD_MUTEX(un)); 25936 25937 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at" 25938 " 0x%p\n", rqs_bp->b_un.b_addr); 25939 25940 /* 25941 * Can't send this to sd_scsi_poll, we wrap ourselves around the 25942 * axle - it has a call into us! 25943 */ 25944 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) { 25945 SD_INFO(SD_LOG_COMMON, un, 25946 "sd_send_polled_RQS: RQS failed\n"); 25947 } 25948 25949 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:", 25950 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX); 25951 25952 mutex_enter(SD_MUTEX(un)); 25953 un->un_sense_isbusy = 0; 25954 mutex_exit(SD_MUTEX(un)); 25955 25956 return (ret_val); 25957 } 25958 25959 /* 25960 * Defines needed for localized version of the scsi_poll routine. 25961 */ 25962 #define CSEC 10000 /* usecs */ 25963 #define SEC_TO_CSEC (1000000/CSEC) 25964 25965 /* 25966 * Function: sd_ddi_scsi_poll() 25967 * 25968 * Description: Localized version of the scsi_poll routine. The purpose is to 25969 * send a scsi_pkt to a device as a polled command. This version 25970 * is to ensure more robust handling of transport errors. 25971 * Specifically this routine cures not ready, coming ready 25972 * transition for power up and reset of sonoma's. This can take 25973 * up to 45 seconds for power-on and 20 seconds for reset of a 25974 * sonoma lun. 25975 * 25976 * Arguments: scsi_pkt - The scsi_pkt being sent to a device 25977 * 25978 * Return Code: 0 - Command completed successfully with good status 25979 * -1 - Command failed. 25980 * 25981 * NOTE: This code is almost identical to scsi_poll, however before 6668774 can 25982 * be fixed (removing this code), we need to determine how to handle the 25983 * KEY_UNIT_ATTENTION condition below in conditions not as limited as sddump(). 25984 * 25985 * NOTE: This code is only called off sddump(). 25986 */ 25987 static int 25988 sd_ddi_scsi_poll(struct scsi_pkt *pkt) 25989 { 25990 int rval = -1; 25991 int savef; 25992 long savet; 25993 void (*savec)(); 25994 int timeout; 25995 int busy_count; 25996 int poll_delay; 25997 int rc; 25998 uint8_t *sensep; 25999 struct scsi_arq_status *arqstat; 26000 extern int do_polled_io; 26001 26002 ASSERT(pkt->pkt_scbp); 26003 26004 /* 26005 * save old flags.. 26006 */ 26007 savef = pkt->pkt_flags; 26008 savec = pkt->pkt_comp; 26009 savet = pkt->pkt_time; 26010 26011 pkt->pkt_flags |= FLAG_NOINTR; 26012 26013 /* 26014 * XXX there is nothing in the SCSA spec that states that we should not 26015 * do a callback for polled cmds; however, removing this will break sd 26016 * and probably other target drivers 26017 */ 26018 pkt->pkt_comp = NULL; 26019 26020 /* 26021 * we don't like a polled command without timeout. 26022 * 60 seconds seems long enough. 26023 */ 26024 if (pkt->pkt_time == 0) 26025 pkt->pkt_time = SCSI_POLL_TIMEOUT; 26026 26027 /* 26028 * Send polled cmd. 26029 * 26030 * We do some error recovery for various errors. Tran_busy, 26031 * queue full, and non-dispatched commands are retried every 10 msec. 26032 * as they are typically transient failures. Busy status and Not 26033 * Ready are retried every second as this status takes a while to 26034 * change. 26035 */ 26036 timeout = pkt->pkt_time * SEC_TO_CSEC; 26037 26038 for (busy_count = 0; busy_count < timeout; busy_count++) { 26039 /* 26040 * Initialize pkt status variables. 26041 */ 26042 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0; 26043 26044 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) { 26045 if (rc != TRAN_BUSY) { 26046 /* Transport failed - give up. */ 26047 break; 26048 } else { 26049 /* Transport busy - try again. */ 26050 poll_delay = 1 * CSEC; /* 10 msec. */ 26051 } 26052 } else { 26053 /* 26054 * Transport accepted - check pkt status. 26055 */ 26056 rc = (*pkt->pkt_scbp) & STATUS_MASK; 26057 if ((pkt->pkt_reason == CMD_CMPLT) && 26058 (rc == STATUS_CHECK) && 26059 (pkt->pkt_state & STATE_ARQ_DONE)) { 26060 arqstat = 26061 (struct scsi_arq_status *)(pkt->pkt_scbp); 26062 sensep = (uint8_t *)&arqstat->sts_sensedata; 26063 } else { 26064 sensep = NULL; 26065 } 26066 26067 if ((pkt->pkt_reason == CMD_CMPLT) && 26068 (rc == STATUS_GOOD)) { 26069 /* No error - we're done */ 26070 rval = 0; 26071 break; 26072 26073 } else if (pkt->pkt_reason == CMD_DEV_GONE) { 26074 /* Lost connection - give up */ 26075 break; 26076 26077 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) && 26078 (pkt->pkt_state == 0)) { 26079 /* Pkt not dispatched - try again. */ 26080 poll_delay = 1 * CSEC; /* 10 msec. */ 26081 26082 } else if ((pkt->pkt_reason == CMD_CMPLT) && 26083 (rc == STATUS_QFULL)) { 26084 /* Queue full - try again. */ 26085 poll_delay = 1 * CSEC; /* 10 msec. */ 26086 26087 } else if ((pkt->pkt_reason == CMD_CMPLT) && 26088 (rc == STATUS_BUSY)) { 26089 /* Busy - try again. */ 26090 poll_delay = 100 * CSEC; /* 1 sec. */ 26091 busy_count += (SEC_TO_CSEC - 1); 26092 26093 } else if ((sensep != NULL) && 26094 (scsi_sense_key(sensep) == KEY_UNIT_ATTENTION)) { 26095 /* 26096 * Unit Attention - try again. 26097 * Pretend it took 1 sec. 26098 * NOTE: 'continue' avoids poll_delay 26099 */ 26100 busy_count += (SEC_TO_CSEC - 1); 26101 continue; 26102 26103 } else if ((sensep != NULL) && 26104 (scsi_sense_key(sensep) == KEY_NOT_READY) && 26105 (scsi_sense_asc(sensep) == 0x04) && 26106 (scsi_sense_ascq(sensep) == 0x01)) { 26107 /* 26108 * Not ready -> ready - try again. 26109 * 04h/01h: LUN IS IN PROCESS OF BECOMING READY 26110 * ...same as STATUS_BUSY 26111 */ 26112 poll_delay = 100 * CSEC; /* 1 sec. */ 26113 busy_count += (SEC_TO_CSEC - 1); 26114 26115 } else { 26116 /* BAD status - give up. */ 26117 break; 26118 } 26119 } 26120 26121 if (((curthread->t_flag & T_INTR_THREAD) == 0) && 26122 !do_polled_io) { 26123 delay(drv_usectohz(poll_delay)); 26124 } else { 26125 /* we busy wait during cpr_dump or interrupt threads */ 26126 drv_usecwait(poll_delay); 26127 } 26128 } 26129 26130 pkt->pkt_flags = savef; 26131 pkt->pkt_comp = savec; 26132 pkt->pkt_time = savet; 26133 26134 /* return on error */ 26135 if (rval) 26136 return (rval); 26137 26138 /* 26139 * This is not a performance critical code path. 26140 * 26141 * As an accommodation for scsi_poll callers, to avoid ddi_dma_sync() 26142 * issues associated with looking at DMA memory prior to 26143 * scsi_pkt_destroy(), we scsi_sync_pkt() prior to return. 26144 */ 26145 scsi_sync_pkt(pkt); 26146 return (0); 26147 } 26148 26149 26150 26151 /* 26152 * Function: sd_persistent_reservation_in_read_keys 26153 * 26154 * Description: This routine is the driver entry point for handling CD-ROM 26155 * multi-host persistent reservation requests (MHIOCGRP_INKEYS) 26156 * by sending the SCSI-3 PRIN commands to the device. 26157 * Processes the read keys command response by copying the 26158 * reservation key information into the user provided buffer. 26159 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented. 26160 * 26161 * Arguments: un - Pointer to soft state struct for the target. 26162 * usrp - user provided pointer to multihost Persistent In Read 26163 * Keys structure (mhioc_inkeys_t) 26164 * flag - this argument is a pass through to ddi_copyxxx() 26165 * directly from the mode argument of ioctl(). 26166 * 26167 * Return Code: 0 - Success 26168 * EACCES 26169 * ENOTSUP 26170 * errno return code from sd_send_scsi_cmd() 26171 * 26172 * Context: Can sleep. Does not return until command is completed. 26173 */ 26174 26175 static int 26176 sd_persistent_reservation_in_read_keys(struct sd_lun *un, 26177 mhioc_inkeys_t *usrp, int flag) 26178 { 26179 #ifdef _MULTI_DATAMODEL 26180 struct mhioc_key_list32 li32; 26181 #endif 26182 sd_prin_readkeys_t *in; 26183 mhioc_inkeys_t *ptr; 26184 mhioc_key_list_t li; 26185 uchar_t *data_bufp; 26186 int data_len; 26187 int rval = 0; 26188 size_t copysz; 26189 sd_ssc_t *ssc; 26190 26191 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) { 26192 return (EINVAL); 26193 } 26194 bzero(&li, sizeof (mhioc_key_list_t)); 26195 26196 ssc = sd_ssc_init(un); 26197 26198 /* 26199 * Get the listsize from user 26200 */ 26201 #ifdef _MULTI_DATAMODEL 26202 26203 switch (ddi_model_convert_from(flag & FMODELS)) { 26204 case DDI_MODEL_ILP32: 26205 copysz = sizeof (struct mhioc_key_list32); 26206 if (ddi_copyin(ptr->li, &li32, copysz, flag)) { 26207 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26208 "sd_persistent_reservation_in_read_keys: " 26209 "failed ddi_copyin: mhioc_key_list32_t\n"); 26210 rval = EFAULT; 26211 goto done; 26212 } 26213 li.listsize = li32.listsize; 26214 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list; 26215 break; 26216 26217 case DDI_MODEL_NONE: 26218 copysz = sizeof (mhioc_key_list_t); 26219 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 26220 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26221 "sd_persistent_reservation_in_read_keys: " 26222 "failed ddi_copyin: mhioc_key_list_t\n"); 26223 rval = EFAULT; 26224 goto done; 26225 } 26226 break; 26227 } 26228 26229 #else /* ! _MULTI_DATAMODEL */ 26230 copysz = sizeof (mhioc_key_list_t); 26231 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 26232 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26233 "sd_persistent_reservation_in_read_keys: " 26234 "failed ddi_copyin: mhioc_key_list_t\n"); 26235 rval = EFAULT; 26236 goto done; 26237 } 26238 #endif 26239 26240 data_len = li.listsize * MHIOC_RESV_KEY_SIZE; 26241 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t)); 26242 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 26243 26244 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS, 26245 data_len, data_bufp); 26246 if (rval != 0) { 26247 if (rval == EIO) 26248 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 26249 else 26250 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 26251 goto done; 26252 } 26253 in = (sd_prin_readkeys_t *)data_bufp; 26254 ptr->generation = BE_32(in->generation); 26255 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE; 26256 26257 /* 26258 * Return the min(listsize, listlen) keys 26259 */ 26260 #ifdef _MULTI_DATAMODEL 26261 26262 switch (ddi_model_convert_from(flag & FMODELS)) { 26263 case DDI_MODEL_ILP32: 26264 li32.listlen = li.listlen; 26265 if (ddi_copyout(&li32, ptr->li, copysz, flag)) { 26266 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26267 "sd_persistent_reservation_in_read_keys: " 26268 "failed ddi_copyout: mhioc_key_list32_t\n"); 26269 rval = EFAULT; 26270 goto done; 26271 } 26272 break; 26273 26274 case DDI_MODEL_NONE: 26275 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 26276 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26277 "sd_persistent_reservation_in_read_keys: " 26278 "failed ddi_copyout: mhioc_key_list_t\n"); 26279 rval = EFAULT; 26280 goto done; 26281 } 26282 break; 26283 } 26284 26285 #else /* ! _MULTI_DATAMODEL */ 26286 26287 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 26288 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26289 "sd_persistent_reservation_in_read_keys: " 26290 "failed ddi_copyout: mhioc_key_list_t\n"); 26291 rval = EFAULT; 26292 goto done; 26293 } 26294 26295 #endif /* _MULTI_DATAMODEL */ 26296 26297 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE, 26298 li.listsize * MHIOC_RESV_KEY_SIZE); 26299 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) { 26300 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26301 "sd_persistent_reservation_in_read_keys: " 26302 "failed ddi_copyout: keylist\n"); 26303 rval = EFAULT; 26304 } 26305 done: 26306 sd_ssc_fini(ssc); 26307 kmem_free(data_bufp, data_len); 26308 return (rval); 26309 } 26310 26311 26312 /* 26313 * Function: sd_persistent_reservation_in_read_resv 26314 * 26315 * Description: This routine is the driver entry point for handling CD-ROM 26316 * multi-host persistent reservation requests (MHIOCGRP_INRESV) 26317 * by sending the SCSI-3 PRIN commands to the device. 26318 * Process the read persistent reservations command response by 26319 * copying the reservation information into the user provided 26320 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented. 26321 * 26322 * Arguments: un - Pointer to soft state struct for the target. 26323 * usrp - user provided pointer to multihost Persistent In Read 26324 * Keys structure (mhioc_inkeys_t) 26325 * flag - this argument is a pass through to ddi_copyxxx() 26326 * directly from the mode argument of ioctl(). 26327 * 26328 * Return Code: 0 - Success 26329 * EACCES 26330 * ENOTSUP 26331 * errno return code from sd_send_scsi_cmd() 26332 * 26333 * Context: Can sleep. Does not return until command is completed. 26334 */ 26335 26336 static int 26337 sd_persistent_reservation_in_read_resv(struct sd_lun *un, 26338 mhioc_inresvs_t *usrp, int flag) 26339 { 26340 #ifdef _MULTI_DATAMODEL 26341 struct mhioc_resv_desc_list32 resvlist32; 26342 #endif 26343 sd_prin_readresv_t *in; 26344 mhioc_inresvs_t *ptr; 26345 sd_readresv_desc_t *readresv_ptr; 26346 mhioc_resv_desc_list_t resvlist; 26347 mhioc_resv_desc_t resvdesc; 26348 uchar_t *data_bufp = NULL; 26349 int data_len; 26350 int rval = 0; 26351 int i; 26352 size_t copysz; 26353 mhioc_resv_desc_t *bufp; 26354 sd_ssc_t *ssc; 26355 26356 if ((ptr = usrp) == NULL) { 26357 return (EINVAL); 26358 } 26359 26360 ssc = sd_ssc_init(un); 26361 26362 /* 26363 * Get the listsize from user 26364 */ 26365 #ifdef _MULTI_DATAMODEL 26366 switch (ddi_model_convert_from(flag & FMODELS)) { 26367 case DDI_MODEL_ILP32: 26368 copysz = sizeof (struct mhioc_resv_desc_list32); 26369 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) { 26370 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26371 "sd_persistent_reservation_in_read_resv: " 26372 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26373 rval = EFAULT; 26374 goto done; 26375 } 26376 resvlist.listsize = resvlist32.listsize; 26377 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list; 26378 break; 26379 26380 case DDI_MODEL_NONE: 26381 copysz = sizeof (mhioc_resv_desc_list_t); 26382 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 26383 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26384 "sd_persistent_reservation_in_read_resv: " 26385 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26386 rval = EFAULT; 26387 goto done; 26388 } 26389 break; 26390 } 26391 #else /* ! _MULTI_DATAMODEL */ 26392 copysz = sizeof (mhioc_resv_desc_list_t); 26393 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 26394 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26395 "sd_persistent_reservation_in_read_resv: " 26396 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26397 rval = EFAULT; 26398 goto done; 26399 } 26400 #endif /* ! _MULTI_DATAMODEL */ 26401 26402 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN; 26403 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t)); 26404 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 26405 26406 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_RESV, 26407 data_len, data_bufp); 26408 if (rval != 0) { 26409 if (rval == EIO) 26410 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 26411 else 26412 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 26413 goto done; 26414 } 26415 in = (sd_prin_readresv_t *)data_bufp; 26416 ptr->generation = BE_32(in->generation); 26417 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN; 26418 26419 /* 26420 * Return the min(listsize, listlen( keys 26421 */ 26422 #ifdef _MULTI_DATAMODEL 26423 26424 switch (ddi_model_convert_from(flag & FMODELS)) { 26425 case DDI_MODEL_ILP32: 26426 resvlist32.listlen = resvlist.listlen; 26427 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) { 26428 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26429 "sd_persistent_reservation_in_read_resv: " 26430 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26431 rval = EFAULT; 26432 goto done; 26433 } 26434 break; 26435 26436 case DDI_MODEL_NONE: 26437 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 26438 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26439 "sd_persistent_reservation_in_read_resv: " 26440 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26441 rval = EFAULT; 26442 goto done; 26443 } 26444 break; 26445 } 26446 26447 #else /* ! _MULTI_DATAMODEL */ 26448 26449 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 26450 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26451 "sd_persistent_reservation_in_read_resv: " 26452 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26453 rval = EFAULT; 26454 goto done; 26455 } 26456 26457 #endif /* ! _MULTI_DATAMODEL */ 26458 26459 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc; 26460 bufp = resvlist.list; 26461 copysz = sizeof (mhioc_resv_desc_t); 26462 for (i = 0; i < min(resvlist.listlen, resvlist.listsize); 26463 i++, readresv_ptr++, bufp++) { 26464 26465 bcopy(&readresv_ptr->resvkey, &resvdesc.key, 26466 MHIOC_RESV_KEY_SIZE); 26467 resvdesc.type = readresv_ptr->type; 26468 resvdesc.scope = readresv_ptr->scope; 26469 resvdesc.scope_specific_addr = 26470 BE_32(readresv_ptr->scope_specific_addr); 26471 26472 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) { 26473 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26474 "sd_persistent_reservation_in_read_resv: " 26475 "failed ddi_copyout: resvlist\n"); 26476 rval = EFAULT; 26477 goto done; 26478 } 26479 } 26480 done: 26481 sd_ssc_fini(ssc); 26482 /* only if data_bufp is allocated, we need to free it */ 26483 if (data_bufp) { 26484 kmem_free(data_bufp, data_len); 26485 } 26486 return (rval); 26487 } 26488 26489 26490 /* 26491 * Function: sr_change_blkmode() 26492 * 26493 * Description: This routine is the driver entry point for handling CD-ROM 26494 * block mode ioctl requests. Support for returning and changing 26495 * the current block size in use by the device is implemented. The 26496 * LBA size is changed via a MODE SELECT Block Descriptor. 26497 * 26498 * This routine issues a mode sense with an allocation length of 26499 * 12 bytes for the mode page header and a single block descriptor. 26500 * 26501 * Arguments: dev - the device 'dev_t' 26502 * cmd - the request type; one of CDROMGBLKMODE (get) or 26503 * CDROMSBLKMODE (set) 26504 * data - current block size or requested block size 26505 * flag - this argument is a pass through to ddi_copyxxx() directly 26506 * from the mode argument of ioctl(). 26507 * 26508 * Return Code: the code returned by sd_send_scsi_cmd() 26509 * EINVAL if invalid arguments are provided 26510 * EFAULT if ddi_copyxxx() fails 26511 * ENXIO if fail ddi_get_soft_state 26512 * EIO if invalid mode sense block descriptor length 26513 * 26514 */ 26515 26516 static int 26517 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag) 26518 { 26519 struct sd_lun *un = NULL; 26520 struct mode_header *sense_mhp, *select_mhp; 26521 struct block_descriptor *sense_desc, *select_desc; 26522 int current_bsize; 26523 int rval = EINVAL; 26524 uchar_t *sense = NULL; 26525 uchar_t *select = NULL; 26526 sd_ssc_t *ssc; 26527 26528 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE)); 26529 26530 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26531 return (ENXIO); 26532 } 26533 26534 /* 26535 * The block length is changed via the Mode Select block descriptor, the 26536 * "Read/Write Error Recovery" mode page (0x1) contents are not actually 26537 * required as part of this routine. Therefore the mode sense allocation 26538 * length is specified to be the length of a mode page header and a 26539 * block descriptor. 26540 */ 26541 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 26542 26543 ssc = sd_ssc_init(un); 26544 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 26545 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD); 26546 sd_ssc_fini(ssc); 26547 if (rval != 0) { 26548 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26549 "sr_change_blkmode: Mode Sense Failed\n"); 26550 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26551 return (rval); 26552 } 26553 26554 /* Check the block descriptor len to handle only 1 block descriptor */ 26555 sense_mhp = (struct mode_header *)sense; 26556 if ((sense_mhp->bdesc_length == 0) || 26557 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) { 26558 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26559 "sr_change_blkmode: Mode Sense returned invalid block" 26560 " descriptor length\n"); 26561 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26562 return (EIO); 26563 } 26564 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH); 26565 current_bsize = ((sense_desc->blksize_hi << 16) | 26566 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo); 26567 26568 /* Process command */ 26569 switch (cmd) { 26570 case CDROMGBLKMODE: 26571 /* Return the block size obtained during the mode sense */ 26572 if (ddi_copyout(¤t_bsize, (void *)data, 26573 sizeof (int), flag) != 0) 26574 rval = EFAULT; 26575 break; 26576 case CDROMSBLKMODE: 26577 /* Validate the requested block size */ 26578 switch (data) { 26579 case CDROM_BLK_512: 26580 case CDROM_BLK_1024: 26581 case CDROM_BLK_2048: 26582 case CDROM_BLK_2056: 26583 case CDROM_BLK_2336: 26584 case CDROM_BLK_2340: 26585 case CDROM_BLK_2352: 26586 case CDROM_BLK_2368: 26587 case CDROM_BLK_2448: 26588 case CDROM_BLK_2646: 26589 case CDROM_BLK_2647: 26590 break; 26591 default: 26592 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26593 "sr_change_blkmode: " 26594 "Block Size '%ld' Not Supported\n", data); 26595 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26596 return (EINVAL); 26597 } 26598 26599 /* 26600 * The current block size matches the requested block size so 26601 * there is no need to send the mode select to change the size 26602 */ 26603 if (current_bsize == data) { 26604 break; 26605 } 26606 26607 /* Build the select data for the requested block size */ 26608 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 26609 select_mhp = (struct mode_header *)select; 26610 select_desc = 26611 (struct block_descriptor *)(select + MODE_HEADER_LENGTH); 26612 /* 26613 * The LBA size is changed via the block descriptor, so the 26614 * descriptor is built according to the user data 26615 */ 26616 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH; 26617 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16); 26618 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8); 26619 select_desc->blksize_lo = (char)((data) & 0x000000ff); 26620 26621 /* Send the mode select for the requested block size */ 26622 ssc = sd_ssc_init(un); 26623 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, 26624 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 26625 SD_PATH_STANDARD); 26626 sd_ssc_fini(ssc); 26627 if (rval != 0) { 26628 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26629 "sr_change_blkmode: Mode Select Failed\n"); 26630 /* 26631 * The mode select failed for the requested block size, 26632 * so reset the data for the original block size and 26633 * send it to the target. The error is indicated by the 26634 * return value for the failed mode select. 26635 */ 26636 select_desc->blksize_hi = sense_desc->blksize_hi; 26637 select_desc->blksize_mid = sense_desc->blksize_mid; 26638 select_desc->blksize_lo = sense_desc->blksize_lo; 26639 ssc = sd_ssc_init(un); 26640 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, 26641 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 26642 SD_PATH_STANDARD); 26643 sd_ssc_fini(ssc); 26644 } else { 26645 ASSERT(!mutex_owned(SD_MUTEX(un))); 26646 mutex_enter(SD_MUTEX(un)); 26647 sd_update_block_info(un, (uint32_t)data, 0); 26648 mutex_exit(SD_MUTEX(un)); 26649 } 26650 break; 26651 default: 26652 /* should not reach here, but check anyway */ 26653 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26654 "sr_change_blkmode: Command '%x' Not Supported\n", cmd); 26655 rval = EINVAL; 26656 break; 26657 } 26658 26659 if (select) { 26660 kmem_free(select, BUFLEN_CHG_BLK_MODE); 26661 } 26662 if (sense) { 26663 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26664 } 26665 return (rval); 26666 } 26667 26668 26669 /* 26670 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines 26671 * implement driver support for getting and setting the CD speed. The command 26672 * set used will be based on the device type. If the device has not been 26673 * identified as MMC the Toshiba vendor specific mode page will be used. If 26674 * the device is MMC but does not support the Real Time Streaming feature 26675 * the SET CD SPEED command will be used to set speed and mode page 0x2A will 26676 * be used to read the speed. 26677 */ 26678 26679 /* 26680 * Function: sr_change_speed() 26681 * 26682 * Description: This routine is the driver entry point for handling CD-ROM 26683 * drive speed ioctl requests for devices supporting the Toshiba 26684 * vendor specific drive speed mode page. Support for returning 26685 * and changing the current drive speed in use by the device is 26686 * implemented. 26687 * 26688 * Arguments: dev - the device 'dev_t' 26689 * cmd - the request type; one of CDROMGDRVSPEED (get) or 26690 * CDROMSDRVSPEED (set) 26691 * data - current drive speed or requested drive speed 26692 * flag - this argument is a pass through to ddi_copyxxx() directly 26693 * from the mode argument of ioctl(). 26694 * 26695 * Return Code: the code returned by sd_send_scsi_cmd() 26696 * EINVAL if invalid arguments are provided 26697 * EFAULT if ddi_copyxxx() fails 26698 * ENXIO if fail ddi_get_soft_state 26699 * EIO if invalid mode sense block descriptor length 26700 */ 26701 26702 static int 26703 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 26704 { 26705 struct sd_lun *un = NULL; 26706 struct mode_header *sense_mhp, *select_mhp; 26707 struct mode_speed *sense_page, *select_page; 26708 int current_speed; 26709 int rval = EINVAL; 26710 int bd_len; 26711 uchar_t *sense = NULL; 26712 uchar_t *select = NULL; 26713 sd_ssc_t *ssc; 26714 26715 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 26716 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26717 return (ENXIO); 26718 } 26719 26720 /* 26721 * Note: The drive speed is being modified here according to a Toshiba 26722 * vendor specific mode page (0x31). 26723 */ 26724 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 26725 26726 ssc = sd_ssc_init(un); 26727 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 26728 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED, 26729 SD_PATH_STANDARD); 26730 sd_ssc_fini(ssc); 26731 if (rval != 0) { 26732 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26733 "sr_change_speed: Mode Sense Failed\n"); 26734 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26735 return (rval); 26736 } 26737 sense_mhp = (struct mode_header *)sense; 26738 26739 /* Check the block descriptor len to handle only 1 block descriptor */ 26740 bd_len = sense_mhp->bdesc_length; 26741 if (bd_len > MODE_BLK_DESC_LENGTH) { 26742 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26743 "sr_change_speed: Mode Sense returned invalid block " 26744 "descriptor length\n"); 26745 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26746 return (EIO); 26747 } 26748 26749 sense_page = (struct mode_speed *) 26750 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 26751 current_speed = sense_page->speed; 26752 26753 /* Process command */ 26754 switch (cmd) { 26755 case CDROMGDRVSPEED: 26756 /* Return the drive speed obtained during the mode sense */ 26757 if (current_speed == 0x2) { 26758 current_speed = CDROM_TWELVE_SPEED; 26759 } 26760 if (ddi_copyout(¤t_speed, (void *)data, 26761 sizeof (int), flag) != 0) { 26762 rval = EFAULT; 26763 } 26764 break; 26765 case CDROMSDRVSPEED: 26766 /* Validate the requested drive speed */ 26767 switch ((uchar_t)data) { 26768 case CDROM_TWELVE_SPEED: 26769 data = 0x2; 26770 /*FALLTHROUGH*/ 26771 case CDROM_NORMAL_SPEED: 26772 case CDROM_DOUBLE_SPEED: 26773 case CDROM_QUAD_SPEED: 26774 case CDROM_MAXIMUM_SPEED: 26775 break; 26776 default: 26777 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26778 "sr_change_speed: " 26779 "Drive Speed '%d' Not Supported\n", (uchar_t)data); 26780 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26781 return (EINVAL); 26782 } 26783 26784 /* 26785 * The current drive speed matches the requested drive speed so 26786 * there is no need to send the mode select to change the speed 26787 */ 26788 if (current_speed == data) { 26789 break; 26790 } 26791 26792 /* Build the select data for the requested drive speed */ 26793 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 26794 select_mhp = (struct mode_header *)select; 26795 select_mhp->bdesc_length = 0; 26796 select_page = 26797 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 26798 select_page = 26799 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 26800 select_page->mode_page.code = CDROM_MODE_SPEED; 26801 select_page->mode_page.length = 2; 26802 select_page->speed = (uchar_t)data; 26803 26804 /* Send the mode select for the requested block size */ 26805 ssc = sd_ssc_init(un); 26806 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 26807 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 26808 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 26809 sd_ssc_fini(ssc); 26810 if (rval != 0) { 26811 /* 26812 * The mode select failed for the requested drive speed, 26813 * so reset the data for the original drive speed and 26814 * send it to the target. The error is indicated by the 26815 * return value for the failed mode select. 26816 */ 26817 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26818 "sr_drive_speed: Mode Select Failed\n"); 26819 select_page->speed = sense_page->speed; 26820 ssc = sd_ssc_init(un); 26821 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 26822 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 26823 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 26824 sd_ssc_fini(ssc); 26825 } 26826 break; 26827 default: 26828 /* should not reach here, but check anyway */ 26829 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26830 "sr_change_speed: Command '%x' Not Supported\n", cmd); 26831 rval = EINVAL; 26832 break; 26833 } 26834 26835 if (select) { 26836 kmem_free(select, BUFLEN_MODE_CDROM_SPEED); 26837 } 26838 if (sense) { 26839 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26840 } 26841 26842 return (rval); 26843 } 26844 26845 26846 /* 26847 * Function: sr_atapi_change_speed() 26848 * 26849 * Description: This routine is the driver entry point for handling CD-ROM 26850 * drive speed ioctl requests for MMC devices that do not support 26851 * the Real Time Streaming feature (0x107). 26852 * 26853 * Note: This routine will use the SET SPEED command which may not 26854 * be supported by all devices. 26855 * 26856 * Arguments: dev- the device 'dev_t' 26857 * cmd- the request type; one of CDROMGDRVSPEED (get) or 26858 * CDROMSDRVSPEED (set) 26859 * data- current drive speed or requested drive speed 26860 * flag- this argument is a pass through to ddi_copyxxx() directly 26861 * from the mode argument of ioctl(). 26862 * 26863 * Return Code: the code returned by sd_send_scsi_cmd() 26864 * EINVAL if invalid arguments are provided 26865 * EFAULT if ddi_copyxxx() fails 26866 * ENXIO if fail ddi_get_soft_state 26867 * EIO if invalid mode sense block descriptor length 26868 */ 26869 26870 static int 26871 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 26872 { 26873 struct sd_lun *un; 26874 struct uscsi_cmd *com = NULL; 26875 struct mode_header_grp2 *sense_mhp; 26876 uchar_t *sense_page; 26877 uchar_t *sense = NULL; 26878 char cdb[CDB_GROUP5]; 26879 int bd_len; 26880 int current_speed = 0; 26881 int max_speed = 0; 26882 int rval; 26883 sd_ssc_t *ssc; 26884 26885 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 26886 26887 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26888 return (ENXIO); 26889 } 26890 26891 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 26892 26893 ssc = sd_ssc_init(un); 26894 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, 26895 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, 26896 SD_PATH_STANDARD); 26897 sd_ssc_fini(ssc); 26898 if (rval != 0) { 26899 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26900 "sr_atapi_change_speed: Mode Sense Failed\n"); 26901 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26902 return (rval); 26903 } 26904 26905 /* Check the block descriptor len to handle only 1 block descriptor */ 26906 sense_mhp = (struct mode_header_grp2 *)sense; 26907 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 26908 if (bd_len > MODE_BLK_DESC_LENGTH) { 26909 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26910 "sr_atapi_change_speed: Mode Sense returned invalid " 26911 "block descriptor length\n"); 26912 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26913 return (EIO); 26914 } 26915 26916 /* Calculate the current and maximum drive speeds */ 26917 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 26918 current_speed = (sense_page[14] << 8) | sense_page[15]; 26919 max_speed = (sense_page[8] << 8) | sense_page[9]; 26920 26921 /* Process the command */ 26922 switch (cmd) { 26923 case CDROMGDRVSPEED: 26924 current_speed /= SD_SPEED_1X; 26925 if (ddi_copyout(¤t_speed, (void *)data, 26926 sizeof (int), flag) != 0) 26927 rval = EFAULT; 26928 break; 26929 case CDROMSDRVSPEED: 26930 /* Convert the speed code to KB/sec */ 26931 switch ((uchar_t)data) { 26932 case CDROM_NORMAL_SPEED: 26933 current_speed = SD_SPEED_1X; 26934 break; 26935 case CDROM_DOUBLE_SPEED: 26936 current_speed = 2 * SD_SPEED_1X; 26937 break; 26938 case CDROM_QUAD_SPEED: 26939 current_speed = 4 * SD_SPEED_1X; 26940 break; 26941 case CDROM_TWELVE_SPEED: 26942 current_speed = 12 * SD_SPEED_1X; 26943 break; 26944 case CDROM_MAXIMUM_SPEED: 26945 current_speed = 0xffff; 26946 break; 26947 default: 26948 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26949 "sr_atapi_change_speed: invalid drive speed %d\n", 26950 (uchar_t)data); 26951 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26952 return (EINVAL); 26953 } 26954 26955 /* Check the request against the drive's max speed. */ 26956 if (current_speed != 0xffff) { 26957 if (current_speed > max_speed) { 26958 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26959 return (EINVAL); 26960 } 26961 } 26962 26963 /* 26964 * Build and send the SET SPEED command 26965 * 26966 * Note: The SET SPEED (0xBB) command used in this routine is 26967 * obsolete per the SCSI MMC spec but still supported in the 26968 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 26969 * therefore the command is still implemented in this routine. 26970 */ 26971 bzero(cdb, sizeof (cdb)); 26972 cdb[0] = (char)SCMD_SET_CDROM_SPEED; 26973 cdb[2] = (uchar_t)(current_speed >> 8); 26974 cdb[3] = (uchar_t)current_speed; 26975 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26976 com->uscsi_cdb = (caddr_t)cdb; 26977 com->uscsi_cdblen = CDB_GROUP5; 26978 com->uscsi_bufaddr = NULL; 26979 com->uscsi_buflen = 0; 26980 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26981 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, 0, SD_PATH_STANDARD); 26982 break; 26983 default: 26984 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26985 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd); 26986 rval = EINVAL; 26987 } 26988 26989 if (sense) { 26990 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26991 } 26992 if (com) { 26993 kmem_free(com, sizeof (*com)); 26994 } 26995 return (rval); 26996 } 26997 26998 26999 /* 27000 * Function: sr_pause_resume() 27001 * 27002 * Description: This routine is the driver entry point for handling CD-ROM 27003 * pause/resume ioctl requests. This only affects the audio play 27004 * operation. 27005 * 27006 * Arguments: dev - the device 'dev_t' 27007 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used 27008 * for setting the resume bit of the cdb. 27009 * 27010 * Return Code: the code returned by sd_send_scsi_cmd() 27011 * EINVAL if invalid mode specified 27012 * 27013 */ 27014 27015 static int 27016 sr_pause_resume(dev_t dev, int cmd) 27017 { 27018 struct sd_lun *un; 27019 struct uscsi_cmd *com; 27020 char cdb[CDB_GROUP1]; 27021 int rval; 27022 27023 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27024 return (ENXIO); 27025 } 27026 27027 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27028 bzero(cdb, CDB_GROUP1); 27029 cdb[0] = SCMD_PAUSE_RESUME; 27030 switch (cmd) { 27031 case CDROMRESUME: 27032 cdb[8] = 1; 27033 break; 27034 case CDROMPAUSE: 27035 cdb[8] = 0; 27036 break; 27037 default: 27038 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:" 27039 " Command '%x' Not Supported\n", cmd); 27040 rval = EINVAL; 27041 goto done; 27042 } 27043 27044 com->uscsi_cdb = cdb; 27045 com->uscsi_cdblen = CDB_GROUP1; 27046 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 27047 27048 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27049 SD_PATH_STANDARD); 27050 27051 done: 27052 kmem_free(com, sizeof (*com)); 27053 return (rval); 27054 } 27055 27056 27057 /* 27058 * Function: sr_play_msf() 27059 * 27060 * Description: This routine is the driver entry point for handling CD-ROM 27061 * ioctl requests to output the audio signals at the specified 27062 * starting address and continue the audio play until the specified 27063 * ending address (CDROMPLAYMSF) The address is in Minute Second 27064 * Frame (MSF) format. 27065 * 27066 * Arguments: dev - the device 'dev_t' 27067 * data - pointer to user provided audio msf structure, 27068 * specifying start/end addresses. 27069 * flag - this argument is a pass through to ddi_copyxxx() 27070 * directly from the mode argument of ioctl(). 27071 * 27072 * Return Code: the code returned by sd_send_scsi_cmd() 27073 * EFAULT if ddi_copyxxx() fails 27074 * ENXIO if fail ddi_get_soft_state 27075 * EINVAL if data pointer is NULL 27076 */ 27077 27078 static int 27079 sr_play_msf(dev_t dev, caddr_t data, int flag) 27080 { 27081 struct sd_lun *un; 27082 struct uscsi_cmd *com; 27083 struct cdrom_msf msf_struct; 27084 struct cdrom_msf *msf = &msf_struct; 27085 char cdb[CDB_GROUP1]; 27086 int rval; 27087 27088 if (data == NULL) { 27089 return (EINVAL); 27090 } 27091 27092 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27093 return (ENXIO); 27094 } 27095 27096 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) { 27097 return (EFAULT); 27098 } 27099 27100 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27101 bzero(cdb, CDB_GROUP1); 27102 cdb[0] = SCMD_PLAYAUDIO_MSF; 27103 if (un->un_f_cfg_playmsf_bcd == TRUE) { 27104 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0); 27105 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0); 27106 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0); 27107 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1); 27108 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1); 27109 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1); 27110 } else { 27111 cdb[3] = msf->cdmsf_min0; 27112 cdb[4] = msf->cdmsf_sec0; 27113 cdb[5] = msf->cdmsf_frame0; 27114 cdb[6] = msf->cdmsf_min1; 27115 cdb[7] = msf->cdmsf_sec1; 27116 cdb[8] = msf->cdmsf_frame1; 27117 } 27118 com->uscsi_cdb = cdb; 27119 com->uscsi_cdblen = CDB_GROUP1; 27120 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 27121 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27122 SD_PATH_STANDARD); 27123 kmem_free(com, sizeof (*com)); 27124 return (rval); 27125 } 27126 27127 27128 /* 27129 * Function: sr_play_trkind() 27130 * 27131 * Description: This routine is the driver entry point for handling CD-ROM 27132 * ioctl requests to output the audio signals at the specified 27133 * starting address and continue the audio play until the specified 27134 * ending address (CDROMPLAYTRKIND). The address is in Track Index 27135 * format. 27136 * 27137 * Arguments: dev - the device 'dev_t' 27138 * data - pointer to user provided audio track/index structure, 27139 * specifying start/end addresses. 27140 * flag - this argument is a pass through to ddi_copyxxx() 27141 * directly from the mode argument of ioctl(). 27142 * 27143 * Return Code: the code returned by sd_send_scsi_cmd() 27144 * EFAULT if ddi_copyxxx() fails 27145 * ENXIO if fail ddi_get_soft_state 27146 * EINVAL if data pointer is NULL 27147 */ 27148 27149 static int 27150 sr_play_trkind(dev_t dev, caddr_t data, int flag) 27151 { 27152 struct cdrom_ti ti_struct; 27153 struct cdrom_ti *ti = &ti_struct; 27154 struct uscsi_cmd *com = NULL; 27155 char cdb[CDB_GROUP1]; 27156 int rval; 27157 27158 if (data == NULL) { 27159 return (EINVAL); 27160 } 27161 27162 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) { 27163 return (EFAULT); 27164 } 27165 27166 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27167 bzero(cdb, CDB_GROUP1); 27168 cdb[0] = SCMD_PLAYAUDIO_TI; 27169 cdb[4] = ti->cdti_trk0; 27170 cdb[5] = ti->cdti_ind0; 27171 cdb[7] = ti->cdti_trk1; 27172 cdb[8] = ti->cdti_ind1; 27173 com->uscsi_cdb = cdb; 27174 com->uscsi_cdblen = CDB_GROUP1; 27175 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 27176 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27177 SD_PATH_STANDARD); 27178 kmem_free(com, sizeof (*com)); 27179 return (rval); 27180 } 27181 27182 27183 /* 27184 * Function: sr_read_all_subcodes() 27185 * 27186 * Description: This routine is the driver entry point for handling CD-ROM 27187 * ioctl requests to return raw subcode data while the target is 27188 * playing audio (CDROMSUBCODE). 27189 * 27190 * Arguments: dev - the device 'dev_t' 27191 * data - pointer to user provided cdrom subcode structure, 27192 * specifying the transfer length and address. 27193 * flag - this argument is a pass through to ddi_copyxxx() 27194 * directly from the mode argument of ioctl(). 27195 * 27196 * Return Code: the code returned by sd_send_scsi_cmd() 27197 * EFAULT if ddi_copyxxx() fails 27198 * ENXIO if fail ddi_get_soft_state 27199 * EINVAL if data pointer is NULL 27200 */ 27201 27202 static int 27203 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag) 27204 { 27205 struct sd_lun *un = NULL; 27206 struct uscsi_cmd *com = NULL; 27207 struct cdrom_subcode *subcode = NULL; 27208 int rval; 27209 size_t buflen; 27210 char cdb[CDB_GROUP5]; 27211 27212 #ifdef _MULTI_DATAMODEL 27213 /* To support ILP32 applications in an LP64 world */ 27214 struct cdrom_subcode32 cdrom_subcode32; 27215 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32; 27216 #endif 27217 if (data == NULL) { 27218 return (EINVAL); 27219 } 27220 27221 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27222 return (ENXIO); 27223 } 27224 27225 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP); 27226 27227 #ifdef _MULTI_DATAMODEL 27228 switch (ddi_model_convert_from(flag & FMODELS)) { 27229 case DDI_MODEL_ILP32: 27230 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) { 27231 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27232 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27233 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27234 return (EFAULT); 27235 } 27236 /* Convert the ILP32 uscsi data from the application to LP64 */ 27237 cdrom_subcode32tocdrom_subcode(cdsc32, subcode); 27238 break; 27239 case DDI_MODEL_NONE: 27240 if (ddi_copyin(data, subcode, 27241 sizeof (struct cdrom_subcode), flag)) { 27242 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27243 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27244 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27245 return (EFAULT); 27246 } 27247 break; 27248 } 27249 #else /* ! _MULTI_DATAMODEL */ 27250 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) { 27251 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27252 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27253 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27254 return (EFAULT); 27255 } 27256 #endif /* _MULTI_DATAMODEL */ 27257 27258 /* 27259 * Since MMC-2 expects max 3 bytes for length, check if the 27260 * length input is greater than 3 bytes 27261 */ 27262 if ((subcode->cdsc_length & 0xFF000000) != 0) { 27263 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27264 "sr_read_all_subcodes: " 27265 "cdrom transfer length too large: %d (limit %d)\n", 27266 subcode->cdsc_length, 0xFFFFFF); 27267 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27268 return (EINVAL); 27269 } 27270 27271 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length; 27272 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27273 bzero(cdb, CDB_GROUP5); 27274 27275 if (un->un_f_mmc_cap == TRUE) { 27276 cdb[0] = (char)SCMD_READ_CD; 27277 cdb[2] = (char)0xff; 27278 cdb[3] = (char)0xff; 27279 cdb[4] = (char)0xff; 27280 cdb[5] = (char)0xff; 27281 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 27282 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 27283 cdb[8] = ((subcode->cdsc_length) & 0x000000ff); 27284 cdb[10] = 1; 27285 } else { 27286 /* 27287 * Note: A vendor specific command (0xDF) is being used her to 27288 * request a read of all subcodes. 27289 */ 27290 cdb[0] = (char)SCMD_READ_ALL_SUBCODES; 27291 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24); 27292 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 27293 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 27294 cdb[9] = ((subcode->cdsc_length) & 0x000000ff); 27295 } 27296 com->uscsi_cdb = cdb; 27297 com->uscsi_cdblen = CDB_GROUP5; 27298 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr; 27299 com->uscsi_buflen = buflen; 27300 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27301 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27302 SD_PATH_STANDARD); 27303 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27304 kmem_free(com, sizeof (*com)); 27305 return (rval); 27306 } 27307 27308 27309 /* 27310 * Function: sr_read_subchannel() 27311 * 27312 * Description: This routine is the driver entry point for handling CD-ROM 27313 * ioctl requests to return the Q sub-channel data of the CD 27314 * current position block. (CDROMSUBCHNL) The data includes the 27315 * track number, index number, absolute CD-ROM address (LBA or MSF 27316 * format per the user) , track relative CD-ROM address (LBA or MSF 27317 * format per the user), control data and audio status. 27318 * 27319 * Arguments: dev - the device 'dev_t' 27320 * data - pointer to user provided cdrom sub-channel structure 27321 * flag - this argument is a pass through to ddi_copyxxx() 27322 * directly from the mode argument of ioctl(). 27323 * 27324 * Return Code: the code returned by sd_send_scsi_cmd() 27325 * EFAULT if ddi_copyxxx() fails 27326 * ENXIO if fail ddi_get_soft_state 27327 * EINVAL if data pointer is NULL 27328 */ 27329 27330 static int 27331 sr_read_subchannel(dev_t dev, caddr_t data, int flag) 27332 { 27333 struct sd_lun *un; 27334 struct uscsi_cmd *com; 27335 struct cdrom_subchnl subchanel; 27336 struct cdrom_subchnl *subchnl = &subchanel; 27337 char cdb[CDB_GROUP1]; 27338 caddr_t buffer; 27339 int rval; 27340 27341 if (data == NULL) { 27342 return (EINVAL); 27343 } 27344 27345 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27346 (un->un_state == SD_STATE_OFFLINE)) { 27347 return (ENXIO); 27348 } 27349 27350 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) { 27351 return (EFAULT); 27352 } 27353 27354 buffer = kmem_zalloc((size_t)16, KM_SLEEP); 27355 bzero(cdb, CDB_GROUP1); 27356 cdb[0] = SCMD_READ_SUBCHANNEL; 27357 /* Set the MSF bit based on the user requested address format */ 27358 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02; 27359 /* 27360 * Set the Q bit in byte 2 to indicate that Q sub-channel data be 27361 * returned 27362 */ 27363 cdb[2] = 0x40; 27364 /* 27365 * Set byte 3 to specify the return data format. A value of 0x01 27366 * indicates that the CD-ROM current position should be returned. 27367 */ 27368 cdb[3] = 0x01; 27369 cdb[8] = 0x10; 27370 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27371 com->uscsi_cdb = cdb; 27372 com->uscsi_cdblen = CDB_GROUP1; 27373 com->uscsi_bufaddr = buffer; 27374 com->uscsi_buflen = 16; 27375 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27376 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27377 SD_PATH_STANDARD); 27378 if (rval != 0) { 27379 kmem_free(buffer, 16); 27380 kmem_free(com, sizeof (*com)); 27381 return (rval); 27382 } 27383 27384 /* Process the returned Q sub-channel data */ 27385 subchnl->cdsc_audiostatus = buffer[1]; 27386 subchnl->cdsc_adr = (buffer[5] & 0xF0); 27387 subchnl->cdsc_ctrl = (buffer[5] & 0x0F); 27388 subchnl->cdsc_trk = buffer[6]; 27389 subchnl->cdsc_ind = buffer[7]; 27390 if (subchnl->cdsc_format & CDROM_LBA) { 27391 subchnl->cdsc_absaddr.lba = 27392 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 27393 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 27394 subchnl->cdsc_reladdr.lba = 27395 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) + 27396 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]); 27397 } else if (un->un_f_cfg_readsub_bcd == TRUE) { 27398 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]); 27399 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]); 27400 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]); 27401 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]); 27402 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]); 27403 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]); 27404 } else { 27405 subchnl->cdsc_absaddr.msf.minute = buffer[9]; 27406 subchnl->cdsc_absaddr.msf.second = buffer[10]; 27407 subchnl->cdsc_absaddr.msf.frame = buffer[11]; 27408 subchnl->cdsc_reladdr.msf.minute = buffer[13]; 27409 subchnl->cdsc_reladdr.msf.second = buffer[14]; 27410 subchnl->cdsc_reladdr.msf.frame = buffer[15]; 27411 } 27412 kmem_free(buffer, 16); 27413 kmem_free(com, sizeof (*com)); 27414 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag) 27415 != 0) { 27416 return (EFAULT); 27417 } 27418 return (rval); 27419 } 27420 27421 27422 /* 27423 * Function: sr_read_tocentry() 27424 * 27425 * Description: This routine is the driver entry point for handling CD-ROM 27426 * ioctl requests to read from the Table of Contents (TOC) 27427 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL 27428 * fields, the starting address (LBA or MSF format per the user) 27429 * and the data mode if the user specified track is a data track. 27430 * 27431 * Note: The READ HEADER (0x44) command used in this routine is 27432 * obsolete per the SCSI MMC spec but still supported in the 27433 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 27434 * therefore the command is still implemented in this routine. 27435 * 27436 * Arguments: dev - the device 'dev_t' 27437 * data - pointer to user provided toc entry structure, 27438 * specifying the track # and the address format 27439 * (LBA or MSF). 27440 * flag - this argument is a pass through to ddi_copyxxx() 27441 * directly from the mode argument of ioctl(). 27442 * 27443 * Return Code: the code returned by sd_send_scsi_cmd() 27444 * EFAULT if ddi_copyxxx() fails 27445 * ENXIO if fail ddi_get_soft_state 27446 * EINVAL if data pointer is NULL 27447 */ 27448 27449 static int 27450 sr_read_tocentry(dev_t dev, caddr_t data, int flag) 27451 { 27452 struct sd_lun *un = NULL; 27453 struct uscsi_cmd *com; 27454 struct cdrom_tocentry toc_entry; 27455 struct cdrom_tocentry *entry = &toc_entry; 27456 caddr_t buffer; 27457 int rval; 27458 char cdb[CDB_GROUP1]; 27459 27460 if (data == NULL) { 27461 return (EINVAL); 27462 } 27463 27464 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27465 (un->un_state == SD_STATE_OFFLINE)) { 27466 return (ENXIO); 27467 } 27468 27469 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) { 27470 return (EFAULT); 27471 } 27472 27473 /* Validate the requested track and address format */ 27474 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) { 27475 return (EINVAL); 27476 } 27477 27478 if (entry->cdte_track == 0) { 27479 return (EINVAL); 27480 } 27481 27482 buffer = kmem_zalloc((size_t)12, KM_SLEEP); 27483 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27484 bzero(cdb, CDB_GROUP1); 27485 27486 cdb[0] = SCMD_READ_TOC; 27487 /* Set the MSF bit based on the user requested address format */ 27488 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2); 27489 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 27490 cdb[6] = BYTE_TO_BCD(entry->cdte_track); 27491 } else { 27492 cdb[6] = entry->cdte_track; 27493 } 27494 27495 /* 27496 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 27497 * (4 byte TOC response header + 8 byte track descriptor) 27498 */ 27499 cdb[8] = 12; 27500 com->uscsi_cdb = cdb; 27501 com->uscsi_cdblen = CDB_GROUP1; 27502 com->uscsi_bufaddr = buffer; 27503 com->uscsi_buflen = 0x0C; 27504 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ); 27505 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27506 SD_PATH_STANDARD); 27507 if (rval != 0) { 27508 kmem_free(buffer, 12); 27509 kmem_free(com, sizeof (*com)); 27510 return (rval); 27511 } 27512 27513 /* Process the toc entry */ 27514 entry->cdte_adr = (buffer[5] & 0xF0) >> 4; 27515 entry->cdte_ctrl = (buffer[5] & 0x0F); 27516 if (entry->cdte_format & CDROM_LBA) { 27517 entry->cdte_addr.lba = 27518 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 27519 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 27520 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) { 27521 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]); 27522 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]); 27523 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]); 27524 /* 27525 * Send a READ TOC command using the LBA address format to get 27526 * the LBA for the track requested so it can be used in the 27527 * READ HEADER request 27528 * 27529 * Note: The MSF bit of the READ HEADER command specifies the 27530 * output format. The block address specified in that command 27531 * must be in LBA format. 27532 */ 27533 cdb[1] = 0; 27534 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27535 SD_PATH_STANDARD); 27536 if (rval != 0) { 27537 kmem_free(buffer, 12); 27538 kmem_free(com, sizeof (*com)); 27539 return (rval); 27540 } 27541 } else { 27542 entry->cdte_addr.msf.minute = buffer[9]; 27543 entry->cdte_addr.msf.second = buffer[10]; 27544 entry->cdte_addr.msf.frame = buffer[11]; 27545 /* 27546 * Send a READ TOC command using the LBA address format to get 27547 * the LBA for the track requested so it can be used in the 27548 * READ HEADER request 27549 * 27550 * Note: The MSF bit of the READ HEADER command specifies the 27551 * output format. The block address specified in that command 27552 * must be in LBA format. 27553 */ 27554 cdb[1] = 0; 27555 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27556 SD_PATH_STANDARD); 27557 if (rval != 0) { 27558 kmem_free(buffer, 12); 27559 kmem_free(com, sizeof (*com)); 27560 return (rval); 27561 } 27562 } 27563 27564 /* 27565 * Build and send the READ HEADER command to determine the data mode of 27566 * the user specified track. 27567 */ 27568 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) && 27569 (entry->cdte_track != CDROM_LEADOUT)) { 27570 bzero(cdb, CDB_GROUP1); 27571 cdb[0] = SCMD_READ_HEADER; 27572 cdb[2] = buffer[8]; 27573 cdb[3] = buffer[9]; 27574 cdb[4] = buffer[10]; 27575 cdb[5] = buffer[11]; 27576 cdb[8] = 0x08; 27577 com->uscsi_buflen = 0x08; 27578 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27579 SD_PATH_STANDARD); 27580 if (rval == 0) { 27581 entry->cdte_datamode = buffer[0]; 27582 } else { 27583 /* 27584 * READ HEADER command failed, since this is 27585 * obsoleted in one spec, its better to return 27586 * -1 for an invlid track so that we can still 27587 * receive the rest of the TOC data. 27588 */ 27589 entry->cdte_datamode = (uchar_t)-1; 27590 } 27591 } else { 27592 entry->cdte_datamode = (uchar_t)-1; 27593 } 27594 27595 kmem_free(buffer, 12); 27596 kmem_free(com, sizeof (*com)); 27597 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0) 27598 return (EFAULT); 27599 27600 return (rval); 27601 } 27602 27603 27604 /* 27605 * Function: sr_read_tochdr() 27606 * 27607 * Description: This routine is the driver entry point for handling CD-ROM 27608 * ioctl requests to read the Table of Contents (TOC) header 27609 * (CDROMREADTOHDR). The TOC header consists of the disk starting 27610 * and ending track numbers 27611 * 27612 * Arguments: dev - the device 'dev_t' 27613 * data - pointer to user provided toc header structure, 27614 * specifying the starting and ending track numbers. 27615 * flag - this argument is a pass through to ddi_copyxxx() 27616 * directly from the mode argument of ioctl(). 27617 * 27618 * Return Code: the code returned by sd_send_scsi_cmd() 27619 * EFAULT if ddi_copyxxx() fails 27620 * ENXIO if fail ddi_get_soft_state 27621 * EINVAL if data pointer is NULL 27622 */ 27623 27624 static int 27625 sr_read_tochdr(dev_t dev, caddr_t data, int flag) 27626 { 27627 struct sd_lun *un; 27628 struct uscsi_cmd *com; 27629 struct cdrom_tochdr toc_header; 27630 struct cdrom_tochdr *hdr = &toc_header; 27631 char cdb[CDB_GROUP1]; 27632 int rval; 27633 caddr_t buffer; 27634 27635 if (data == NULL) { 27636 return (EINVAL); 27637 } 27638 27639 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27640 (un->un_state == SD_STATE_OFFLINE)) { 27641 return (ENXIO); 27642 } 27643 27644 buffer = kmem_zalloc(4, KM_SLEEP); 27645 bzero(cdb, CDB_GROUP1); 27646 cdb[0] = SCMD_READ_TOC; 27647 /* 27648 * Specifying a track number of 0x00 in the READ TOC command indicates 27649 * that the TOC header should be returned 27650 */ 27651 cdb[6] = 0x00; 27652 /* 27653 * Bytes 7 & 8 are the 4 byte allocation length for TOC header. 27654 * (2 byte data len + 1 byte starting track # + 1 byte ending track #) 27655 */ 27656 cdb[8] = 0x04; 27657 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27658 com->uscsi_cdb = cdb; 27659 com->uscsi_cdblen = CDB_GROUP1; 27660 com->uscsi_bufaddr = buffer; 27661 com->uscsi_buflen = 0x04; 27662 com->uscsi_timeout = 300; 27663 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27664 27665 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27666 SD_PATH_STANDARD); 27667 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 27668 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]); 27669 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]); 27670 } else { 27671 hdr->cdth_trk0 = buffer[2]; 27672 hdr->cdth_trk1 = buffer[3]; 27673 } 27674 kmem_free(buffer, 4); 27675 kmem_free(com, sizeof (*com)); 27676 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) { 27677 return (EFAULT); 27678 } 27679 return (rval); 27680 } 27681 27682 27683 /* 27684 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(), 27685 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for 27686 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data, 27687 * digital audio and extended architecture digital audio. These modes are 27688 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3 27689 * MMC specs. 27690 * 27691 * In addition to support for the various data formats these routines also 27692 * include support for devices that implement only the direct access READ 27693 * commands (0x08, 0x28), devices that implement the READ_CD commands 27694 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and 27695 * READ CDXA commands (0xD8, 0xDB) 27696 */ 27697 27698 /* 27699 * Function: sr_read_mode1() 27700 * 27701 * Description: This routine is the driver entry point for handling CD-ROM 27702 * ioctl read mode1 requests (CDROMREADMODE1). 27703 * 27704 * Arguments: dev - the device 'dev_t' 27705 * data - pointer to user provided cd read structure specifying 27706 * the lba buffer address and length. 27707 * flag - this argument is a pass through to ddi_copyxxx() 27708 * directly from the mode argument of ioctl(). 27709 * 27710 * Return Code: the code returned by sd_send_scsi_cmd() 27711 * EFAULT if ddi_copyxxx() fails 27712 * ENXIO if fail ddi_get_soft_state 27713 * EINVAL if data pointer is NULL 27714 */ 27715 27716 static int 27717 sr_read_mode1(dev_t dev, caddr_t data, int flag) 27718 { 27719 struct sd_lun *un; 27720 struct cdrom_read mode1_struct; 27721 struct cdrom_read *mode1 = &mode1_struct; 27722 int rval; 27723 sd_ssc_t *ssc; 27724 27725 #ifdef _MULTI_DATAMODEL 27726 /* To support ILP32 applications in an LP64 world */ 27727 struct cdrom_read32 cdrom_read32; 27728 struct cdrom_read32 *cdrd32 = &cdrom_read32; 27729 #endif /* _MULTI_DATAMODEL */ 27730 27731 if (data == NULL) { 27732 return (EINVAL); 27733 } 27734 27735 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27736 (un->un_state == SD_STATE_OFFLINE)) { 27737 return (ENXIO); 27738 } 27739 27740 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27741 "sd_read_mode1: entry: un:0x%p\n", un); 27742 27743 #ifdef _MULTI_DATAMODEL 27744 switch (ddi_model_convert_from(flag & FMODELS)) { 27745 case DDI_MODEL_ILP32: 27746 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 27747 return (EFAULT); 27748 } 27749 /* Convert the ILP32 uscsi data from the application to LP64 */ 27750 cdrom_read32tocdrom_read(cdrd32, mode1); 27751 break; 27752 case DDI_MODEL_NONE: 27753 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 27754 return (EFAULT); 27755 } 27756 } 27757 #else /* ! _MULTI_DATAMODEL */ 27758 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 27759 return (EFAULT); 27760 } 27761 #endif /* _MULTI_DATAMODEL */ 27762 27763 ssc = sd_ssc_init(un); 27764 rval = sd_send_scsi_READ(ssc, mode1->cdread_bufaddr, 27765 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD); 27766 sd_ssc_fini(ssc); 27767 27768 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27769 "sd_read_mode1: exit: un:0x%p\n", un); 27770 27771 return (rval); 27772 } 27773 27774 27775 /* 27776 * Function: sr_read_cd_mode2() 27777 * 27778 * Description: This routine is the driver entry point for handling CD-ROM 27779 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 27780 * support the READ CD (0xBE) command or the 1st generation 27781 * READ CD (0xD4) command. 27782 * 27783 * Arguments: dev - the device 'dev_t' 27784 * data - pointer to user provided cd read structure specifying 27785 * the lba buffer address and length. 27786 * flag - this argument is a pass through to ddi_copyxxx() 27787 * directly from the mode argument of ioctl(). 27788 * 27789 * Return Code: the code returned by sd_send_scsi_cmd() 27790 * EFAULT if ddi_copyxxx() fails 27791 * ENXIO if fail ddi_get_soft_state 27792 * EINVAL if data pointer is NULL 27793 */ 27794 27795 static int 27796 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag) 27797 { 27798 struct sd_lun *un; 27799 struct uscsi_cmd *com; 27800 struct cdrom_read mode2_struct; 27801 struct cdrom_read *mode2 = &mode2_struct; 27802 uchar_t cdb[CDB_GROUP5]; 27803 int nblocks; 27804 int rval; 27805 #ifdef _MULTI_DATAMODEL 27806 /* To support ILP32 applications in an LP64 world */ 27807 struct cdrom_read32 cdrom_read32; 27808 struct cdrom_read32 *cdrd32 = &cdrom_read32; 27809 #endif /* _MULTI_DATAMODEL */ 27810 27811 if (data == NULL) { 27812 return (EINVAL); 27813 } 27814 27815 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27816 (un->un_state == SD_STATE_OFFLINE)) { 27817 return (ENXIO); 27818 } 27819 27820 #ifdef _MULTI_DATAMODEL 27821 switch (ddi_model_convert_from(flag & FMODELS)) { 27822 case DDI_MODEL_ILP32: 27823 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 27824 return (EFAULT); 27825 } 27826 /* Convert the ILP32 uscsi data from the application to LP64 */ 27827 cdrom_read32tocdrom_read(cdrd32, mode2); 27828 break; 27829 case DDI_MODEL_NONE: 27830 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 27831 return (EFAULT); 27832 } 27833 break; 27834 } 27835 27836 #else /* ! _MULTI_DATAMODEL */ 27837 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 27838 return (EFAULT); 27839 } 27840 #endif /* _MULTI_DATAMODEL */ 27841 27842 bzero(cdb, sizeof (cdb)); 27843 if (un->un_f_cfg_read_cd_xd4 == TRUE) { 27844 /* Read command supported by 1st generation atapi drives */ 27845 cdb[0] = SCMD_READ_CDD4; 27846 } else { 27847 /* Universal CD Access Command */ 27848 cdb[0] = SCMD_READ_CD; 27849 } 27850 27851 /* 27852 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book 27853 */ 27854 cdb[1] = CDROM_SECTOR_TYPE_MODE2; 27855 27856 /* set the start address */ 27857 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF); 27858 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF); 27859 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 27860 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF); 27861 27862 /* set the transfer length */ 27863 nblocks = mode2->cdread_buflen / 2336; 27864 cdb[6] = (uchar_t)(nblocks >> 16); 27865 cdb[7] = (uchar_t)(nblocks >> 8); 27866 cdb[8] = (uchar_t)nblocks; 27867 27868 /* set the filter bits */ 27869 cdb[9] = CDROM_READ_CD_USERDATA; 27870 27871 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27872 com->uscsi_cdb = (caddr_t)cdb; 27873 com->uscsi_cdblen = sizeof (cdb); 27874 com->uscsi_bufaddr = mode2->cdread_bufaddr; 27875 com->uscsi_buflen = mode2->cdread_buflen; 27876 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27877 27878 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27879 SD_PATH_STANDARD); 27880 kmem_free(com, sizeof (*com)); 27881 return (rval); 27882 } 27883 27884 27885 /* 27886 * Function: sr_read_mode2() 27887 * 27888 * Description: This routine is the driver entry point for handling CD-ROM 27889 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 27890 * do not support the READ CD (0xBE) command. 27891 * 27892 * Arguments: dev - the device 'dev_t' 27893 * data - pointer to user provided cd read structure specifying 27894 * the lba buffer address and length. 27895 * flag - this argument is a pass through to ddi_copyxxx() 27896 * directly from the mode argument of ioctl(). 27897 * 27898 * Return Code: the code returned by sd_send_scsi_cmd() 27899 * EFAULT if ddi_copyxxx() fails 27900 * ENXIO if fail ddi_get_soft_state 27901 * EINVAL if data pointer is NULL 27902 * EIO if fail to reset block size 27903 * EAGAIN if commands are in progress in the driver 27904 */ 27905 27906 static int 27907 sr_read_mode2(dev_t dev, caddr_t data, int flag) 27908 { 27909 struct sd_lun *un; 27910 struct cdrom_read mode2_struct; 27911 struct cdrom_read *mode2 = &mode2_struct; 27912 int rval; 27913 uint32_t restore_blksize; 27914 struct uscsi_cmd *com; 27915 uchar_t cdb[CDB_GROUP0]; 27916 int nblocks; 27917 27918 #ifdef _MULTI_DATAMODEL 27919 /* To support ILP32 applications in an LP64 world */ 27920 struct cdrom_read32 cdrom_read32; 27921 struct cdrom_read32 *cdrd32 = &cdrom_read32; 27922 #endif /* _MULTI_DATAMODEL */ 27923 27924 if (data == NULL) { 27925 return (EINVAL); 27926 } 27927 27928 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27929 (un->un_state == SD_STATE_OFFLINE)) { 27930 return (ENXIO); 27931 } 27932 27933 /* 27934 * Because this routine will update the device and driver block size 27935 * being used we want to make sure there are no commands in progress. 27936 * If commands are in progress the user will have to try again. 27937 * 27938 * We check for 1 instead of 0 because we increment un_ncmds_in_driver 27939 * in sdioctl to protect commands from sdioctl through to the top of 27940 * sd_uscsi_strategy. See sdioctl for details. 27941 */ 27942 mutex_enter(SD_MUTEX(un)); 27943 if (un->un_ncmds_in_driver != 1) { 27944 mutex_exit(SD_MUTEX(un)); 27945 return (EAGAIN); 27946 } 27947 mutex_exit(SD_MUTEX(un)); 27948 27949 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27950 "sd_read_mode2: entry: un:0x%p\n", un); 27951 27952 #ifdef _MULTI_DATAMODEL 27953 switch (ddi_model_convert_from(flag & FMODELS)) { 27954 case DDI_MODEL_ILP32: 27955 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 27956 return (EFAULT); 27957 } 27958 /* Convert the ILP32 uscsi data from the application to LP64 */ 27959 cdrom_read32tocdrom_read(cdrd32, mode2); 27960 break; 27961 case DDI_MODEL_NONE: 27962 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 27963 return (EFAULT); 27964 } 27965 break; 27966 } 27967 #else /* ! _MULTI_DATAMODEL */ 27968 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) { 27969 return (EFAULT); 27970 } 27971 #endif /* _MULTI_DATAMODEL */ 27972 27973 /* Store the current target block size for restoration later */ 27974 restore_blksize = un->un_tgt_blocksize; 27975 27976 /* Change the device and soft state target block size to 2336 */ 27977 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) { 27978 rval = EIO; 27979 goto done; 27980 } 27981 27982 27983 bzero(cdb, sizeof (cdb)); 27984 27985 /* set READ operation */ 27986 cdb[0] = SCMD_READ; 27987 27988 /* adjust lba for 2kbyte blocks from 512 byte blocks */ 27989 mode2->cdread_lba >>= 2; 27990 27991 /* set the start address */ 27992 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F); 27993 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 27994 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF); 27995 27996 /* set the transfer length */ 27997 nblocks = mode2->cdread_buflen / 2336; 27998 cdb[4] = (uchar_t)nblocks & 0xFF; 27999 28000 /* build command */ 28001 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28002 com->uscsi_cdb = (caddr_t)cdb; 28003 com->uscsi_cdblen = sizeof (cdb); 28004 com->uscsi_bufaddr = mode2->cdread_bufaddr; 28005 com->uscsi_buflen = mode2->cdread_buflen; 28006 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28007 28008 /* 28009 * Issue SCSI command with user space address for read buffer. 28010 * 28011 * This sends the command through main channel in the driver. 28012 * 28013 * Since this is accessed via an IOCTL call, we go through the 28014 * standard path, so that if the device was powered down, then 28015 * it would be 'awakened' to handle the command. 28016 */ 28017 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 28018 SD_PATH_STANDARD); 28019 28020 kmem_free(com, sizeof (*com)); 28021 28022 /* Restore the device and soft state target block size */ 28023 if (sr_sector_mode(dev, restore_blksize) != 0) { 28024 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28025 "can't do switch back to mode 1\n"); 28026 /* 28027 * If sd_send_scsi_READ succeeded we still need to report 28028 * an error because we failed to reset the block size 28029 */ 28030 if (rval == 0) { 28031 rval = EIO; 28032 } 28033 } 28034 28035 done: 28036 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 28037 "sd_read_mode2: exit: un:0x%p\n", un); 28038 28039 return (rval); 28040 } 28041 28042 28043 /* 28044 * Function: sr_sector_mode() 28045 * 28046 * Description: This utility function is used by sr_read_mode2 to set the target 28047 * block size based on the user specified size. This is a legacy 28048 * implementation based upon a vendor specific mode page 28049 * 28050 * Arguments: dev - the device 'dev_t' 28051 * data - flag indicating if block size is being set to 2336 or 28052 * 512. 28053 * 28054 * Return Code: the code returned by sd_send_scsi_cmd() 28055 * EFAULT if ddi_copyxxx() fails 28056 * ENXIO if fail ddi_get_soft_state 28057 * EINVAL if data pointer is NULL 28058 */ 28059 28060 static int 28061 sr_sector_mode(dev_t dev, uint32_t blksize) 28062 { 28063 struct sd_lun *un; 28064 uchar_t *sense; 28065 uchar_t *select; 28066 int rval; 28067 sd_ssc_t *ssc; 28068 28069 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28070 (un->un_state == SD_STATE_OFFLINE)) { 28071 return (ENXIO); 28072 } 28073 28074 sense = kmem_zalloc(20, KM_SLEEP); 28075 28076 /* Note: This is a vendor specific mode page (0x81) */ 28077 ssc = sd_ssc_init(un); 28078 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 20, 0x81, 28079 SD_PATH_STANDARD); 28080 sd_ssc_fini(ssc); 28081 if (rval != 0) { 28082 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 28083 "sr_sector_mode: Mode Sense failed\n"); 28084 kmem_free(sense, 20); 28085 return (rval); 28086 } 28087 select = kmem_zalloc(20, KM_SLEEP); 28088 select[3] = 0x08; 28089 select[10] = ((blksize >> 8) & 0xff); 28090 select[11] = (blksize & 0xff); 28091 select[12] = 0x01; 28092 select[13] = 0x06; 28093 select[14] = sense[14]; 28094 select[15] = sense[15]; 28095 if (blksize == SD_MODE2_BLKSIZE) { 28096 select[14] |= 0x01; 28097 } 28098 28099 ssc = sd_ssc_init(un); 28100 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 20, 28101 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 28102 sd_ssc_fini(ssc); 28103 if (rval != 0) { 28104 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 28105 "sr_sector_mode: Mode Select failed\n"); 28106 } else { 28107 /* 28108 * Only update the softstate block size if we successfully 28109 * changed the device block mode. 28110 */ 28111 mutex_enter(SD_MUTEX(un)); 28112 sd_update_block_info(un, blksize, 0); 28113 mutex_exit(SD_MUTEX(un)); 28114 } 28115 kmem_free(sense, 20); 28116 kmem_free(select, 20); 28117 return (rval); 28118 } 28119 28120 28121 /* 28122 * Function: sr_read_cdda() 28123 * 28124 * Description: This routine is the driver entry point for handling CD-ROM 28125 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If 28126 * the target supports CDDA these requests are handled via a vendor 28127 * specific command (0xD8) If the target does not support CDDA 28128 * these requests are handled via the READ CD command (0xBE). 28129 * 28130 * Arguments: dev - the device 'dev_t' 28131 * data - pointer to user provided CD-DA structure specifying 28132 * the track starting address, transfer length, and 28133 * subcode options. 28134 * flag - this argument is a pass through to ddi_copyxxx() 28135 * directly from the mode argument of ioctl(). 28136 * 28137 * Return Code: the code returned by sd_send_scsi_cmd() 28138 * EFAULT if ddi_copyxxx() fails 28139 * ENXIO if fail ddi_get_soft_state 28140 * EINVAL if invalid arguments are provided 28141 * ENOTTY 28142 */ 28143 28144 static int 28145 sr_read_cdda(dev_t dev, caddr_t data, int flag) 28146 { 28147 struct sd_lun *un; 28148 struct uscsi_cmd *com; 28149 struct cdrom_cdda *cdda; 28150 int rval; 28151 size_t buflen; 28152 char cdb[CDB_GROUP5]; 28153 28154 #ifdef _MULTI_DATAMODEL 28155 /* To support ILP32 applications in an LP64 world */ 28156 struct cdrom_cdda32 cdrom_cdda32; 28157 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32; 28158 #endif /* _MULTI_DATAMODEL */ 28159 28160 if (data == NULL) { 28161 return (EINVAL); 28162 } 28163 28164 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28165 return (ENXIO); 28166 } 28167 28168 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP); 28169 28170 #ifdef _MULTI_DATAMODEL 28171 switch (ddi_model_convert_from(flag & FMODELS)) { 28172 case DDI_MODEL_ILP32: 28173 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) { 28174 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28175 "sr_read_cdda: ddi_copyin Failed\n"); 28176 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28177 return (EFAULT); 28178 } 28179 /* Convert the ILP32 uscsi data from the application to LP64 */ 28180 cdrom_cdda32tocdrom_cdda(cdda32, cdda); 28181 break; 28182 case DDI_MODEL_NONE: 28183 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 28184 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28185 "sr_read_cdda: ddi_copyin Failed\n"); 28186 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28187 return (EFAULT); 28188 } 28189 break; 28190 } 28191 #else /* ! _MULTI_DATAMODEL */ 28192 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 28193 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28194 "sr_read_cdda: ddi_copyin Failed\n"); 28195 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28196 return (EFAULT); 28197 } 28198 #endif /* _MULTI_DATAMODEL */ 28199 28200 /* 28201 * Since MMC-2 expects max 3 bytes for length, check if the 28202 * length input is greater than 3 bytes 28203 */ 28204 if ((cdda->cdda_length & 0xFF000000) != 0) { 28205 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: " 28206 "cdrom transfer length too large: %d (limit %d)\n", 28207 cdda->cdda_length, 0xFFFFFF); 28208 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28209 return (EINVAL); 28210 } 28211 28212 switch (cdda->cdda_subcode) { 28213 case CDROM_DA_NO_SUBCODE: 28214 buflen = CDROM_BLK_2352 * cdda->cdda_length; 28215 break; 28216 case CDROM_DA_SUBQ: 28217 buflen = CDROM_BLK_2368 * cdda->cdda_length; 28218 break; 28219 case CDROM_DA_ALL_SUBCODE: 28220 buflen = CDROM_BLK_2448 * cdda->cdda_length; 28221 break; 28222 case CDROM_DA_SUBCODE_ONLY: 28223 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length; 28224 break; 28225 default: 28226 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28227 "sr_read_cdda: Subcode '0x%x' Not Supported\n", 28228 cdda->cdda_subcode); 28229 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28230 return (EINVAL); 28231 } 28232 28233 /* Build and send the command */ 28234 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28235 bzero(cdb, CDB_GROUP5); 28236 28237 if (un->un_f_cfg_cdda == TRUE) { 28238 cdb[0] = (char)SCMD_READ_CD; 28239 cdb[1] = 0x04; 28240 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 28241 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 28242 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 28243 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 28244 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 28245 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 28246 cdb[8] = ((cdda->cdda_length) & 0x000000ff); 28247 cdb[9] = 0x10; 28248 switch (cdda->cdda_subcode) { 28249 case CDROM_DA_NO_SUBCODE : 28250 cdb[10] = 0x0; 28251 break; 28252 case CDROM_DA_SUBQ : 28253 cdb[10] = 0x2; 28254 break; 28255 case CDROM_DA_ALL_SUBCODE : 28256 cdb[10] = 0x1; 28257 break; 28258 case CDROM_DA_SUBCODE_ONLY : 28259 /* FALLTHROUGH */ 28260 default : 28261 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28262 kmem_free(com, sizeof (*com)); 28263 return (ENOTTY); 28264 } 28265 } else { 28266 cdb[0] = (char)SCMD_READ_CDDA; 28267 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 28268 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 28269 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 28270 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 28271 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24); 28272 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 28273 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 28274 cdb[9] = ((cdda->cdda_length) & 0x000000ff); 28275 cdb[10] = cdda->cdda_subcode; 28276 } 28277 28278 com->uscsi_cdb = cdb; 28279 com->uscsi_cdblen = CDB_GROUP5; 28280 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data; 28281 com->uscsi_buflen = buflen; 28282 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28283 28284 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 28285 SD_PATH_STANDARD); 28286 28287 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28288 kmem_free(com, sizeof (*com)); 28289 return (rval); 28290 } 28291 28292 28293 /* 28294 * Function: sr_read_cdxa() 28295 * 28296 * Description: This routine is the driver entry point for handling CD-ROM 28297 * ioctl requests to return CD-XA (Extended Architecture) data. 28298 * (CDROMCDXA). 28299 * 28300 * Arguments: dev - the device 'dev_t' 28301 * data - pointer to user provided CD-XA structure specifying 28302 * the data starting address, transfer length, and format 28303 * flag - this argument is a pass through to ddi_copyxxx() 28304 * directly from the mode argument of ioctl(). 28305 * 28306 * Return Code: the code returned by sd_send_scsi_cmd() 28307 * EFAULT if ddi_copyxxx() fails 28308 * ENXIO if fail ddi_get_soft_state 28309 * EINVAL if data pointer is NULL 28310 */ 28311 28312 static int 28313 sr_read_cdxa(dev_t dev, caddr_t data, int flag) 28314 { 28315 struct sd_lun *un; 28316 struct uscsi_cmd *com; 28317 struct cdrom_cdxa *cdxa; 28318 int rval; 28319 size_t buflen; 28320 char cdb[CDB_GROUP5]; 28321 uchar_t read_flags; 28322 28323 #ifdef _MULTI_DATAMODEL 28324 /* To support ILP32 applications in an LP64 world */ 28325 struct cdrom_cdxa32 cdrom_cdxa32; 28326 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32; 28327 #endif /* _MULTI_DATAMODEL */ 28328 28329 if (data == NULL) { 28330 return (EINVAL); 28331 } 28332 28333 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28334 return (ENXIO); 28335 } 28336 28337 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP); 28338 28339 #ifdef _MULTI_DATAMODEL 28340 switch (ddi_model_convert_from(flag & FMODELS)) { 28341 case DDI_MODEL_ILP32: 28342 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) { 28343 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28344 return (EFAULT); 28345 } 28346 /* 28347 * Convert the ILP32 uscsi data from the 28348 * application to LP64 for internal use. 28349 */ 28350 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa); 28351 break; 28352 case DDI_MODEL_NONE: 28353 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 28354 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28355 return (EFAULT); 28356 } 28357 break; 28358 } 28359 #else /* ! _MULTI_DATAMODEL */ 28360 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 28361 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28362 return (EFAULT); 28363 } 28364 #endif /* _MULTI_DATAMODEL */ 28365 28366 /* 28367 * Since MMC-2 expects max 3 bytes for length, check if the 28368 * length input is greater than 3 bytes 28369 */ 28370 if ((cdxa->cdxa_length & 0xFF000000) != 0) { 28371 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: " 28372 "cdrom transfer length too large: %d (limit %d)\n", 28373 cdxa->cdxa_length, 0xFFFFFF); 28374 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28375 return (EINVAL); 28376 } 28377 28378 switch (cdxa->cdxa_format) { 28379 case CDROM_XA_DATA: 28380 buflen = CDROM_BLK_2048 * cdxa->cdxa_length; 28381 read_flags = 0x10; 28382 break; 28383 case CDROM_XA_SECTOR_DATA: 28384 buflen = CDROM_BLK_2352 * cdxa->cdxa_length; 28385 read_flags = 0xf8; 28386 break; 28387 case CDROM_XA_DATA_W_ERROR: 28388 buflen = CDROM_BLK_2646 * cdxa->cdxa_length; 28389 read_flags = 0xfc; 28390 break; 28391 default: 28392 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28393 "sr_read_cdxa: Format '0x%x' Not Supported\n", 28394 cdxa->cdxa_format); 28395 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28396 return (EINVAL); 28397 } 28398 28399 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28400 bzero(cdb, CDB_GROUP5); 28401 if (un->un_f_mmc_cap == TRUE) { 28402 cdb[0] = (char)SCMD_READ_CD; 28403 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 28404 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 28405 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 28406 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 28407 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 28408 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 28409 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff); 28410 cdb[9] = (char)read_flags; 28411 } else { 28412 /* 28413 * Note: A vendor specific command (0xDB) is being used her to 28414 * request a read of all subcodes. 28415 */ 28416 cdb[0] = (char)SCMD_READ_CDXA; 28417 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 28418 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 28419 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 28420 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 28421 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24); 28422 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 28423 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 28424 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff); 28425 cdb[10] = cdxa->cdxa_format; 28426 } 28427 com->uscsi_cdb = cdb; 28428 com->uscsi_cdblen = CDB_GROUP5; 28429 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data; 28430 com->uscsi_buflen = buflen; 28431 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28432 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 28433 SD_PATH_STANDARD); 28434 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28435 kmem_free(com, sizeof (*com)); 28436 return (rval); 28437 } 28438 28439 28440 /* 28441 * Function: sr_eject() 28442 * 28443 * Description: This routine is the driver entry point for handling CD-ROM 28444 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT) 28445 * 28446 * Arguments: dev - the device 'dev_t' 28447 * 28448 * Return Code: the code returned by sd_send_scsi_cmd() 28449 */ 28450 28451 static int 28452 sr_eject(dev_t dev) 28453 { 28454 struct sd_lun *un; 28455 int rval; 28456 sd_ssc_t *ssc; 28457 28458 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28459 (un->un_state == SD_STATE_OFFLINE)) { 28460 return (ENXIO); 28461 } 28462 28463 /* 28464 * To prevent race conditions with the eject 28465 * command, keep track of an eject command as 28466 * it progresses. If we are already handling 28467 * an eject command in the driver for the given 28468 * unit and another request to eject is received 28469 * immediately return EAGAIN so we don't lose 28470 * the command if the current eject command fails. 28471 */ 28472 mutex_enter(SD_MUTEX(un)); 28473 if (un->un_f_ejecting == TRUE) { 28474 mutex_exit(SD_MUTEX(un)); 28475 return (EAGAIN); 28476 } 28477 un->un_f_ejecting = TRUE; 28478 mutex_exit(SD_MUTEX(un)); 28479 28480 ssc = sd_ssc_init(un); 28481 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW, 28482 SD_PATH_STANDARD); 28483 sd_ssc_fini(ssc); 28484 28485 if (rval != 0) { 28486 mutex_enter(SD_MUTEX(un)); 28487 un->un_f_ejecting = FALSE; 28488 mutex_exit(SD_MUTEX(un)); 28489 return (rval); 28490 } 28491 28492 ssc = sd_ssc_init(un); 28493 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 28494 SD_TARGET_EJECT, SD_PATH_STANDARD); 28495 sd_ssc_fini(ssc); 28496 28497 if (rval == 0) { 28498 mutex_enter(SD_MUTEX(un)); 28499 sr_ejected(un); 28500 un->un_mediastate = DKIO_EJECTED; 28501 un->un_f_ejecting = FALSE; 28502 cv_broadcast(&un->un_state_cv); 28503 mutex_exit(SD_MUTEX(un)); 28504 } else { 28505 mutex_enter(SD_MUTEX(un)); 28506 un->un_f_ejecting = FALSE; 28507 mutex_exit(SD_MUTEX(un)); 28508 } 28509 return (rval); 28510 } 28511 28512 28513 /* 28514 * Function: sr_ejected() 28515 * 28516 * Description: This routine updates the soft state structure to invalidate the 28517 * geometry information after the media has been ejected or a 28518 * media eject has been detected. 28519 * 28520 * Arguments: un - driver soft state (unit) structure 28521 */ 28522 28523 static void 28524 sr_ejected(struct sd_lun *un) 28525 { 28526 struct sd_errstats *stp; 28527 28528 ASSERT(un != NULL); 28529 ASSERT(mutex_owned(SD_MUTEX(un))); 28530 28531 un->un_f_blockcount_is_valid = FALSE; 28532 un->un_f_tgt_blocksize_is_valid = FALSE; 28533 mutex_exit(SD_MUTEX(un)); 28534 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 28535 mutex_enter(SD_MUTEX(un)); 28536 28537 if (un->un_errstats != NULL) { 28538 stp = (struct sd_errstats *)un->un_errstats->ks_data; 28539 stp->sd_capacity.value.ui64 = 0; 28540 } 28541 } 28542 28543 28544 /* 28545 * Function: sr_check_wp() 28546 * 28547 * Description: This routine checks the write protection of a removable 28548 * media disk and hotpluggable devices via the write protect bit of 28549 * the Mode Page Header device specific field. Some devices choke 28550 * on unsupported mode page. In order to workaround this issue, 28551 * this routine has been implemented to use 0x3f mode page(request 28552 * for all pages) for all device types. 28553 * 28554 * Arguments: dev - the device 'dev_t' 28555 * 28556 * Return Code: int indicating if the device is write protected (1) or not (0) 28557 * 28558 * Context: Kernel thread. 28559 * 28560 */ 28561 28562 static int 28563 sr_check_wp(dev_t dev) 28564 { 28565 struct sd_lun *un; 28566 uchar_t device_specific; 28567 uchar_t *sense; 28568 int hdrlen; 28569 int rval = FALSE; 28570 int status; 28571 sd_ssc_t *ssc; 28572 28573 /* 28574 * Note: The return codes for this routine should be reworked to 28575 * properly handle the case of a NULL softstate. 28576 */ 28577 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28578 return (FALSE); 28579 } 28580 28581 if (un->un_f_cfg_is_atapi == TRUE) { 28582 /* 28583 * The mode page contents are not required; set the allocation 28584 * length for the mode page header only 28585 */ 28586 hdrlen = MODE_HEADER_LENGTH_GRP2; 28587 sense = kmem_zalloc(hdrlen, KM_SLEEP); 28588 ssc = sd_ssc_init(un); 28589 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, hdrlen, 28590 MODEPAGE_ALLPAGES, SD_PATH_STANDARD); 28591 sd_ssc_fini(ssc); 28592 if (status != 0) 28593 goto err_exit; 28594 device_specific = 28595 ((struct mode_header_grp2 *)sense)->device_specific; 28596 } else { 28597 hdrlen = MODE_HEADER_LENGTH; 28598 sense = kmem_zalloc(hdrlen, KM_SLEEP); 28599 ssc = sd_ssc_init(un); 28600 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, hdrlen, 28601 MODEPAGE_ALLPAGES, SD_PATH_STANDARD); 28602 sd_ssc_fini(ssc); 28603 if (status != 0) 28604 goto err_exit; 28605 device_specific = 28606 ((struct mode_header *)sense)->device_specific; 28607 } 28608 28609 28610 /* 28611 * Write protect mode sense failed; not all disks 28612 * understand this query. Return FALSE assuming that 28613 * these devices are not writable. 28614 */ 28615 if (device_specific & WRITE_PROTECT) { 28616 rval = TRUE; 28617 } 28618 28619 err_exit: 28620 kmem_free(sense, hdrlen); 28621 return (rval); 28622 } 28623 28624 /* 28625 * Function: sr_volume_ctrl() 28626 * 28627 * Description: This routine is the driver entry point for handling CD-ROM 28628 * audio output volume ioctl requests. (CDROMVOLCTRL) 28629 * 28630 * Arguments: dev - the device 'dev_t' 28631 * data - pointer to user audio volume control structure 28632 * flag - this argument is a pass through to ddi_copyxxx() 28633 * directly from the mode argument of ioctl(). 28634 * 28635 * Return Code: the code returned by sd_send_scsi_cmd() 28636 * EFAULT if ddi_copyxxx() fails 28637 * ENXIO if fail ddi_get_soft_state 28638 * EINVAL if data pointer is NULL 28639 * 28640 */ 28641 28642 static int 28643 sr_volume_ctrl(dev_t dev, caddr_t data, int flag) 28644 { 28645 struct sd_lun *un; 28646 struct cdrom_volctrl volume; 28647 struct cdrom_volctrl *vol = &volume; 28648 uchar_t *sense_page; 28649 uchar_t *select_page; 28650 uchar_t *sense; 28651 uchar_t *select; 28652 int sense_buflen; 28653 int select_buflen; 28654 int rval; 28655 sd_ssc_t *ssc; 28656 28657 if (data == NULL) { 28658 return (EINVAL); 28659 } 28660 28661 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28662 (un->un_state == SD_STATE_OFFLINE)) { 28663 return (ENXIO); 28664 } 28665 28666 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) { 28667 return (EFAULT); 28668 } 28669 28670 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 28671 struct mode_header_grp2 *sense_mhp; 28672 struct mode_header_grp2 *select_mhp; 28673 int bd_len; 28674 28675 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN; 28676 select_buflen = MODE_HEADER_LENGTH_GRP2 + 28677 MODEPAGE_AUDIO_CTRL_LEN; 28678 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 28679 select = kmem_zalloc(select_buflen, KM_SLEEP); 28680 ssc = sd_ssc_init(un); 28681 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, 28682 sense_buflen, MODEPAGE_AUDIO_CTRL, 28683 SD_PATH_STANDARD); 28684 sd_ssc_fini(ssc); 28685 28686 if (rval != 0) { 28687 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 28688 "sr_volume_ctrl: Mode Sense Failed\n"); 28689 kmem_free(sense, sense_buflen); 28690 kmem_free(select, select_buflen); 28691 return (rval); 28692 } 28693 sense_mhp = (struct mode_header_grp2 *)sense; 28694 select_mhp = (struct mode_header_grp2 *)select; 28695 bd_len = (sense_mhp->bdesc_length_hi << 8) | 28696 sense_mhp->bdesc_length_lo; 28697 if (bd_len > MODE_BLK_DESC_LENGTH) { 28698 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28699 "sr_volume_ctrl: Mode Sense returned invalid " 28700 "block descriptor length\n"); 28701 kmem_free(sense, sense_buflen); 28702 kmem_free(select, select_buflen); 28703 return (EIO); 28704 } 28705 sense_page = (uchar_t *) 28706 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 28707 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2); 28708 select_mhp->length_msb = 0; 28709 select_mhp->length_lsb = 0; 28710 select_mhp->bdesc_length_hi = 0; 28711 select_mhp->bdesc_length_lo = 0; 28712 } else { 28713 struct mode_header *sense_mhp, *select_mhp; 28714 28715 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 28716 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 28717 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 28718 select = kmem_zalloc(select_buflen, KM_SLEEP); 28719 ssc = sd_ssc_init(un); 28720 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 28721 sense_buflen, MODEPAGE_AUDIO_CTRL, 28722 SD_PATH_STANDARD); 28723 sd_ssc_fini(ssc); 28724 28725 if (rval != 0) { 28726 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28727 "sr_volume_ctrl: Mode Sense Failed\n"); 28728 kmem_free(sense, sense_buflen); 28729 kmem_free(select, select_buflen); 28730 return (rval); 28731 } 28732 sense_mhp = (struct mode_header *)sense; 28733 select_mhp = (struct mode_header *)select; 28734 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) { 28735 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28736 "sr_volume_ctrl: Mode Sense returned invalid " 28737 "block descriptor length\n"); 28738 kmem_free(sense, sense_buflen); 28739 kmem_free(select, select_buflen); 28740 return (EIO); 28741 } 28742 sense_page = (uchar_t *) 28743 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 28744 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH); 28745 select_mhp->length = 0; 28746 select_mhp->bdesc_length = 0; 28747 } 28748 /* 28749 * Note: An audio control data structure could be created and overlayed 28750 * on the following in place of the array indexing method implemented. 28751 */ 28752 28753 /* Build the select data for the user volume data */ 28754 select_page[0] = MODEPAGE_AUDIO_CTRL; 28755 select_page[1] = 0xE; 28756 /* Set the immediate bit */ 28757 select_page[2] = 0x04; 28758 /* Zero out reserved fields */ 28759 select_page[3] = 0x00; 28760 select_page[4] = 0x00; 28761 /* Return sense data for fields not to be modified */ 28762 select_page[5] = sense_page[5]; 28763 select_page[6] = sense_page[6]; 28764 select_page[7] = sense_page[7]; 28765 /* Set the user specified volume levels for channel 0 and 1 */ 28766 select_page[8] = 0x01; 28767 select_page[9] = vol->channel0; 28768 select_page[10] = 0x02; 28769 select_page[11] = vol->channel1; 28770 /* Channel 2 and 3 are currently unsupported so return the sense data */ 28771 select_page[12] = sense_page[12]; 28772 select_page[13] = sense_page[13]; 28773 select_page[14] = sense_page[14]; 28774 select_page[15] = sense_page[15]; 28775 28776 ssc = sd_ssc_init(un); 28777 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 28778 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP1, select, 28779 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 28780 } else { 28781 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 28782 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 28783 } 28784 sd_ssc_fini(ssc); 28785 28786 kmem_free(sense, sense_buflen); 28787 kmem_free(select, select_buflen); 28788 return (rval); 28789 } 28790 28791 28792 /* 28793 * Function: sr_read_sony_session_offset() 28794 * 28795 * Description: This routine is the driver entry point for handling CD-ROM 28796 * ioctl requests for session offset information. (CDROMREADOFFSET) 28797 * The address of the first track in the last session of a 28798 * multi-session CD-ROM is returned 28799 * 28800 * Note: This routine uses a vendor specific key value in the 28801 * command control field without implementing any vendor check here 28802 * or in the ioctl routine. 28803 * 28804 * Arguments: dev - the device 'dev_t' 28805 * data - pointer to an int to hold the requested address 28806 * flag - this argument is a pass through to ddi_copyxxx() 28807 * directly from the mode argument of ioctl(). 28808 * 28809 * Return Code: the code returned by sd_send_scsi_cmd() 28810 * EFAULT if ddi_copyxxx() fails 28811 * ENXIO if fail ddi_get_soft_state 28812 * EINVAL if data pointer is NULL 28813 */ 28814 28815 static int 28816 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag) 28817 { 28818 struct sd_lun *un; 28819 struct uscsi_cmd *com; 28820 caddr_t buffer; 28821 char cdb[CDB_GROUP1]; 28822 int session_offset = 0; 28823 int rval; 28824 28825 if (data == NULL) { 28826 return (EINVAL); 28827 } 28828 28829 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28830 (un->un_state == SD_STATE_OFFLINE)) { 28831 return (ENXIO); 28832 } 28833 28834 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP); 28835 bzero(cdb, CDB_GROUP1); 28836 cdb[0] = SCMD_READ_TOC; 28837 /* 28838 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 28839 * (4 byte TOC response header + 8 byte response data) 28840 */ 28841 cdb[8] = SONY_SESSION_OFFSET_LEN; 28842 /* Byte 9 is the control byte. A vendor specific value is used */ 28843 cdb[9] = SONY_SESSION_OFFSET_KEY; 28844 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28845 com->uscsi_cdb = cdb; 28846 com->uscsi_cdblen = CDB_GROUP1; 28847 com->uscsi_bufaddr = buffer; 28848 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN; 28849 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28850 28851 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 28852 SD_PATH_STANDARD); 28853 if (rval != 0) { 28854 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 28855 kmem_free(com, sizeof (*com)); 28856 return (rval); 28857 } 28858 if (buffer[1] == SONY_SESSION_OFFSET_VALID) { 28859 session_offset = 28860 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 28861 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 28862 /* 28863 * Offset returned offset in current lbasize block's. Convert to 28864 * 2k block's to return to the user 28865 */ 28866 if (un->un_tgt_blocksize == CDROM_BLK_512) { 28867 session_offset >>= 2; 28868 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) { 28869 session_offset >>= 1; 28870 } 28871 } 28872 28873 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) { 28874 rval = EFAULT; 28875 } 28876 28877 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 28878 kmem_free(com, sizeof (*com)); 28879 return (rval); 28880 } 28881 28882 28883 /* 28884 * Function: sd_wm_cache_constructor() 28885 * 28886 * Description: Cache Constructor for the wmap cache for the read/modify/write 28887 * devices. 28888 * 28889 * Arguments: wm - A pointer to the sd_w_map to be initialized. 28890 * un - sd_lun structure for the device. 28891 * flag - the km flags passed to constructor 28892 * 28893 * Return Code: 0 on success. 28894 * -1 on failure. 28895 */ 28896 28897 /*ARGSUSED*/ 28898 static int 28899 sd_wm_cache_constructor(void *wm, void *un, int flags) 28900 { 28901 bzero(wm, sizeof (struct sd_w_map)); 28902 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL); 28903 return (0); 28904 } 28905 28906 28907 /* 28908 * Function: sd_wm_cache_destructor() 28909 * 28910 * Description: Cache destructor for the wmap cache for the read/modify/write 28911 * devices. 28912 * 28913 * Arguments: wm - A pointer to the sd_w_map to be initialized. 28914 * un - sd_lun structure for the device. 28915 */ 28916 /*ARGSUSED*/ 28917 static void 28918 sd_wm_cache_destructor(void *wm, void *un) 28919 { 28920 cv_destroy(&((struct sd_w_map *)wm)->wm_avail); 28921 } 28922 28923 28924 /* 28925 * Function: sd_range_lock() 28926 * 28927 * Description: Lock the range of blocks specified as parameter to ensure 28928 * that read, modify write is atomic and no other i/o writes 28929 * to the same location. The range is specified in terms 28930 * of start and end blocks. Block numbers are the actual 28931 * media block numbers and not system. 28932 * 28933 * Arguments: un - sd_lun structure for the device. 28934 * startb - The starting block number 28935 * endb - The end block number 28936 * typ - type of i/o - simple/read_modify_write 28937 * 28938 * Return Code: wm - pointer to the wmap structure. 28939 * 28940 * Context: This routine can sleep. 28941 */ 28942 28943 static struct sd_w_map * 28944 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ) 28945 { 28946 struct sd_w_map *wmp = NULL; 28947 struct sd_w_map *sl_wmp = NULL; 28948 struct sd_w_map *tmp_wmp; 28949 wm_state state = SD_WM_CHK_LIST; 28950 28951 28952 ASSERT(un != NULL); 28953 ASSERT(!mutex_owned(SD_MUTEX(un))); 28954 28955 mutex_enter(SD_MUTEX(un)); 28956 28957 while (state != SD_WM_DONE) { 28958 28959 switch (state) { 28960 case SD_WM_CHK_LIST: 28961 /* 28962 * This is the starting state. Check the wmap list 28963 * to see if the range is currently available. 28964 */ 28965 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) { 28966 /* 28967 * If this is a simple write and no rmw 28968 * i/o is pending then try to lock the 28969 * range as the range should be available. 28970 */ 28971 state = SD_WM_LOCK_RANGE; 28972 } else { 28973 tmp_wmp = sd_get_range(un, startb, endb); 28974 if (tmp_wmp != NULL) { 28975 if ((wmp != NULL) && ONLIST(un, wmp)) { 28976 /* 28977 * Should not keep onlist wmps 28978 * while waiting this macro 28979 * will also do wmp = NULL; 28980 */ 28981 FREE_ONLIST_WMAP(un, wmp); 28982 } 28983 /* 28984 * sl_wmp is the wmap on which wait 28985 * is done, since the tmp_wmp points 28986 * to the inuse wmap, set sl_wmp to 28987 * tmp_wmp and change the state to sleep 28988 */ 28989 sl_wmp = tmp_wmp; 28990 state = SD_WM_WAIT_MAP; 28991 } else { 28992 state = SD_WM_LOCK_RANGE; 28993 } 28994 28995 } 28996 break; 28997 28998 case SD_WM_LOCK_RANGE: 28999 ASSERT(un->un_wm_cache); 29000 /* 29001 * The range need to be locked, try to get a wmap. 29002 * First attempt it with NO_SLEEP, want to avoid a sleep 29003 * if possible as we will have to release the sd mutex 29004 * if we have to sleep. 29005 */ 29006 if (wmp == NULL) 29007 wmp = kmem_cache_alloc(un->un_wm_cache, 29008 KM_NOSLEEP); 29009 if (wmp == NULL) { 29010 mutex_exit(SD_MUTEX(un)); 29011 _NOTE(DATA_READABLE_WITHOUT_LOCK 29012 (sd_lun::un_wm_cache)) 29013 wmp = kmem_cache_alloc(un->un_wm_cache, 29014 KM_SLEEP); 29015 mutex_enter(SD_MUTEX(un)); 29016 /* 29017 * we released the mutex so recheck and go to 29018 * check list state. 29019 */ 29020 state = SD_WM_CHK_LIST; 29021 } else { 29022 /* 29023 * We exit out of state machine since we 29024 * have the wmap. Do the housekeeping first. 29025 * place the wmap on the wmap list if it is not 29026 * on it already and then set the state to done. 29027 */ 29028 wmp->wm_start = startb; 29029 wmp->wm_end = endb; 29030 wmp->wm_flags = typ | SD_WM_BUSY; 29031 if (typ & SD_WTYPE_RMW) { 29032 un->un_rmw_count++; 29033 } 29034 /* 29035 * If not already on the list then link 29036 */ 29037 if (!ONLIST(un, wmp)) { 29038 wmp->wm_next = un->un_wm; 29039 wmp->wm_prev = NULL; 29040 if (wmp->wm_next) 29041 wmp->wm_next->wm_prev = wmp; 29042 un->un_wm = wmp; 29043 } 29044 state = SD_WM_DONE; 29045 } 29046 break; 29047 29048 case SD_WM_WAIT_MAP: 29049 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY); 29050 /* 29051 * Wait is done on sl_wmp, which is set in the 29052 * check_list state. 29053 */ 29054 sl_wmp->wm_wanted_count++; 29055 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un)); 29056 sl_wmp->wm_wanted_count--; 29057 /* 29058 * We can reuse the memory from the completed sl_wmp 29059 * lock range for our new lock, but only if noone is 29060 * waiting for it. 29061 */ 29062 ASSERT(!(sl_wmp->wm_flags & SD_WM_BUSY)); 29063 if (sl_wmp->wm_wanted_count == 0) { 29064 if (wmp != NULL) 29065 CHK_N_FREEWMP(un, wmp); 29066 wmp = sl_wmp; 29067 } 29068 sl_wmp = NULL; 29069 /* 29070 * After waking up, need to recheck for availability of 29071 * range. 29072 */ 29073 state = SD_WM_CHK_LIST; 29074 break; 29075 29076 default: 29077 panic("sd_range_lock: " 29078 "Unknown state %d in sd_range_lock", state); 29079 /*NOTREACHED*/ 29080 } /* switch(state) */ 29081 29082 } /* while(state != SD_WM_DONE) */ 29083 29084 mutex_exit(SD_MUTEX(un)); 29085 29086 ASSERT(wmp != NULL); 29087 29088 return (wmp); 29089 } 29090 29091 29092 /* 29093 * Function: sd_get_range() 29094 * 29095 * Description: Find if there any overlapping I/O to this one 29096 * Returns the write-map of 1st such I/O, NULL otherwise. 29097 * 29098 * Arguments: un - sd_lun structure for the device. 29099 * startb - The starting block number 29100 * endb - The end block number 29101 * 29102 * Return Code: wm - pointer to the wmap structure. 29103 */ 29104 29105 static struct sd_w_map * 29106 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb) 29107 { 29108 struct sd_w_map *wmp; 29109 29110 ASSERT(un != NULL); 29111 29112 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) { 29113 if (!(wmp->wm_flags & SD_WM_BUSY)) { 29114 continue; 29115 } 29116 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) { 29117 break; 29118 } 29119 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) { 29120 break; 29121 } 29122 } 29123 29124 return (wmp); 29125 } 29126 29127 29128 /* 29129 * Function: sd_free_inlist_wmap() 29130 * 29131 * Description: Unlink and free a write map struct. 29132 * 29133 * Arguments: un - sd_lun structure for the device. 29134 * wmp - sd_w_map which needs to be unlinked. 29135 */ 29136 29137 static void 29138 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp) 29139 { 29140 ASSERT(un != NULL); 29141 29142 if (un->un_wm == wmp) { 29143 un->un_wm = wmp->wm_next; 29144 } else { 29145 wmp->wm_prev->wm_next = wmp->wm_next; 29146 } 29147 29148 if (wmp->wm_next) { 29149 wmp->wm_next->wm_prev = wmp->wm_prev; 29150 } 29151 29152 wmp->wm_next = wmp->wm_prev = NULL; 29153 29154 kmem_cache_free(un->un_wm_cache, wmp); 29155 } 29156 29157 29158 /* 29159 * Function: sd_range_unlock() 29160 * 29161 * Description: Unlock the range locked by wm. 29162 * Free write map if nobody else is waiting on it. 29163 * 29164 * Arguments: un - sd_lun structure for the device. 29165 * wmp - sd_w_map which needs to be unlinked. 29166 */ 29167 29168 static void 29169 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm) 29170 { 29171 ASSERT(un != NULL); 29172 ASSERT(wm != NULL); 29173 ASSERT(!mutex_owned(SD_MUTEX(un))); 29174 29175 mutex_enter(SD_MUTEX(un)); 29176 29177 if (wm->wm_flags & SD_WTYPE_RMW) { 29178 un->un_rmw_count--; 29179 } 29180 29181 if (wm->wm_wanted_count) { 29182 wm->wm_flags = 0; 29183 /* 29184 * Broadcast that the wmap is available now. 29185 */ 29186 cv_broadcast(&wm->wm_avail); 29187 } else { 29188 /* 29189 * If no one is waiting on the map, it should be free'ed. 29190 */ 29191 sd_free_inlist_wmap(un, wm); 29192 } 29193 29194 mutex_exit(SD_MUTEX(un)); 29195 } 29196 29197 29198 /* 29199 * Function: sd_read_modify_write_task 29200 * 29201 * Description: Called from a taskq thread to initiate the write phase of 29202 * a read-modify-write request. This is used for targets where 29203 * un->un_sys_blocksize != un->un_tgt_blocksize. 29204 * 29205 * Arguments: arg - a pointer to the buf(9S) struct for the write command. 29206 * 29207 * Context: Called under taskq thread context. 29208 */ 29209 29210 static void 29211 sd_read_modify_write_task(void *arg) 29212 { 29213 struct sd_mapblocksize_info *bsp; 29214 struct buf *bp; 29215 struct sd_xbuf *xp; 29216 struct sd_lun *un; 29217 29218 bp = arg; /* The bp is given in arg */ 29219 ASSERT(bp != NULL); 29220 29221 /* Get the pointer to the layer-private data struct */ 29222 xp = SD_GET_XBUF(bp); 29223 ASSERT(xp != NULL); 29224 bsp = xp->xb_private; 29225 ASSERT(bsp != NULL); 29226 29227 un = SD_GET_UN(bp); 29228 ASSERT(un != NULL); 29229 ASSERT(!mutex_owned(SD_MUTEX(un))); 29230 29231 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 29232 "sd_read_modify_write_task: entry: buf:0x%p\n", bp); 29233 29234 /* 29235 * This is the write phase of a read-modify-write request, called 29236 * under the context of a taskq thread in response to the completion 29237 * of the read portion of the rmw request completing under interrupt 29238 * context. The write request must be sent from here down the iostart 29239 * chain as if it were being sent from sd_mapblocksize_iostart(), so 29240 * we use the layer index saved in the layer-private data area. 29241 */ 29242 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp); 29243 29244 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 29245 "sd_read_modify_write_task: exit: buf:0x%p\n", bp); 29246 } 29247 29248 29249 /* 29250 * Function: sddump_do_read_of_rmw() 29251 * 29252 * Description: This routine will be called from sddump, If sddump is called 29253 * with an I/O which not aligned on device blocksize boundary 29254 * then the write has to be converted to read-modify-write. 29255 * Do the read part here in order to keep sddump simple. 29256 * Note - That the sd_mutex is held across the call to this 29257 * routine. 29258 * 29259 * Arguments: un - sd_lun 29260 * blkno - block number in terms of media block size. 29261 * nblk - number of blocks. 29262 * bpp - pointer to pointer to the buf structure. On return 29263 * from this function, *bpp points to the valid buffer 29264 * to which the write has to be done. 29265 * 29266 * Return Code: 0 for success or errno-type return code 29267 */ 29268 29269 static int 29270 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 29271 struct buf **bpp) 29272 { 29273 int err; 29274 int i; 29275 int rval; 29276 struct buf *bp; 29277 struct scsi_pkt *pkt = NULL; 29278 uint32_t target_blocksize; 29279 29280 ASSERT(un != NULL); 29281 ASSERT(mutex_owned(SD_MUTEX(un))); 29282 29283 target_blocksize = un->un_tgt_blocksize; 29284 29285 mutex_exit(SD_MUTEX(un)); 29286 29287 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL, 29288 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL); 29289 if (bp == NULL) { 29290 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29291 "no resources for dumping; giving up"); 29292 err = ENOMEM; 29293 goto done; 29294 } 29295 29296 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL, 29297 blkno, nblk); 29298 if (rval != 0) { 29299 scsi_free_consistent_buf(bp); 29300 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29301 "no resources for dumping; giving up"); 29302 err = ENOMEM; 29303 goto done; 29304 } 29305 29306 pkt->pkt_flags |= FLAG_NOINTR; 29307 29308 err = EIO; 29309 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 29310 29311 /* 29312 * Scsi_poll returns 0 (success) if the command completes and 29313 * the status block is STATUS_GOOD. We should only check 29314 * errors if this condition is not true. Even then we should 29315 * send our own request sense packet only if we have a check 29316 * condition and auto request sense has not been performed by 29317 * the hba. 29318 */ 29319 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n"); 29320 29321 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) { 29322 err = 0; 29323 break; 29324 } 29325 29326 /* 29327 * Check CMD_DEV_GONE 1st, give up if device is gone, 29328 * no need to read RQS data. 29329 */ 29330 if (pkt->pkt_reason == CMD_DEV_GONE) { 29331 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29332 "Error while dumping state with rmw..." 29333 "Device is gone\n"); 29334 break; 29335 } 29336 29337 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) { 29338 SD_INFO(SD_LOG_DUMP, un, 29339 "sddump: read failed with CHECK, try # %d\n", i); 29340 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) { 29341 (void) sd_send_polled_RQS(un); 29342 } 29343 29344 continue; 29345 } 29346 29347 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) { 29348 int reset_retval = 0; 29349 29350 SD_INFO(SD_LOG_DUMP, un, 29351 "sddump: read failed with BUSY, try # %d\n", i); 29352 29353 if (un->un_f_lun_reset_enabled == TRUE) { 29354 reset_retval = scsi_reset(SD_ADDRESS(un), 29355 RESET_LUN); 29356 } 29357 if (reset_retval == 0) { 29358 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 29359 } 29360 (void) sd_send_polled_RQS(un); 29361 29362 } else { 29363 SD_INFO(SD_LOG_DUMP, un, 29364 "sddump: read failed with 0x%x, try # %d\n", 29365 SD_GET_PKT_STATUS(pkt), i); 29366 mutex_enter(SD_MUTEX(un)); 29367 sd_reset_target(un, pkt); 29368 mutex_exit(SD_MUTEX(un)); 29369 } 29370 29371 /* 29372 * If we are not getting anywhere with lun/target resets, 29373 * let's reset the bus. 29374 */ 29375 if (i > SD_NDUMP_RETRIES/2) { 29376 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 29377 (void) sd_send_polled_RQS(un); 29378 } 29379 29380 } 29381 scsi_destroy_pkt(pkt); 29382 29383 if (err != 0) { 29384 scsi_free_consistent_buf(bp); 29385 *bpp = NULL; 29386 } else { 29387 *bpp = bp; 29388 } 29389 29390 done: 29391 mutex_enter(SD_MUTEX(un)); 29392 return (err); 29393 } 29394 29395 29396 /* 29397 * Function: sd_failfast_flushq 29398 * 29399 * Description: Take all bp's on the wait queue that have B_FAILFAST set 29400 * in b_flags and move them onto the failfast queue, then kick 29401 * off a thread to return all bp's on the failfast queue to 29402 * their owners with an error set. 29403 * 29404 * Arguments: un - pointer to the soft state struct for the instance. 29405 * 29406 * Context: may execute in interrupt context. 29407 */ 29408 29409 static void 29410 sd_failfast_flushq(struct sd_lun *un) 29411 { 29412 struct buf *bp; 29413 struct buf *next_waitq_bp; 29414 struct buf *prev_waitq_bp = NULL; 29415 29416 ASSERT(un != NULL); 29417 ASSERT(mutex_owned(SD_MUTEX(un))); 29418 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE); 29419 ASSERT(un->un_failfast_bp == NULL); 29420 29421 SD_TRACE(SD_LOG_IO_FAILFAST, un, 29422 "sd_failfast_flushq: entry: un:0x%p\n", un); 29423 29424 /* 29425 * Check if we should flush all bufs when entering failfast state, or 29426 * just those with B_FAILFAST set. 29427 */ 29428 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) { 29429 /* 29430 * Move *all* bp's on the wait queue to the failfast flush 29431 * queue, including those that do NOT have B_FAILFAST set. 29432 */ 29433 if (un->un_failfast_headp == NULL) { 29434 ASSERT(un->un_failfast_tailp == NULL); 29435 un->un_failfast_headp = un->un_waitq_headp; 29436 } else { 29437 ASSERT(un->un_failfast_tailp != NULL); 29438 un->un_failfast_tailp->av_forw = un->un_waitq_headp; 29439 } 29440 29441 un->un_failfast_tailp = un->un_waitq_tailp; 29442 29443 /* update kstat for each bp moved out of the waitq */ 29444 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) { 29445 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 29446 } 29447 29448 /* empty the waitq */ 29449 un->un_waitq_headp = un->un_waitq_tailp = NULL; 29450 29451 } else { 29452 /* 29453 * Go thru the wait queue, pick off all entries with 29454 * B_FAILFAST set, and move these onto the failfast queue. 29455 */ 29456 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) { 29457 /* 29458 * Save the pointer to the next bp on the wait queue, 29459 * so we get to it on the next iteration of this loop. 29460 */ 29461 next_waitq_bp = bp->av_forw; 29462 29463 /* 29464 * If this bp from the wait queue does NOT have 29465 * B_FAILFAST set, just move on to the next element 29466 * in the wait queue. Note, this is the only place 29467 * where it is correct to set prev_waitq_bp. 29468 */ 29469 if ((bp->b_flags & B_FAILFAST) == 0) { 29470 prev_waitq_bp = bp; 29471 continue; 29472 } 29473 29474 /* 29475 * Remove the bp from the wait queue. 29476 */ 29477 if (bp == un->un_waitq_headp) { 29478 /* The bp is the first element of the waitq. */ 29479 un->un_waitq_headp = next_waitq_bp; 29480 if (un->un_waitq_headp == NULL) { 29481 /* The wait queue is now empty */ 29482 un->un_waitq_tailp = NULL; 29483 } 29484 } else { 29485 /* 29486 * The bp is either somewhere in the middle 29487 * or at the end of the wait queue. 29488 */ 29489 ASSERT(un->un_waitq_headp != NULL); 29490 ASSERT(prev_waitq_bp != NULL); 29491 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST) 29492 == 0); 29493 if (bp == un->un_waitq_tailp) { 29494 /* bp is the last entry on the waitq. */ 29495 ASSERT(next_waitq_bp == NULL); 29496 un->un_waitq_tailp = prev_waitq_bp; 29497 } 29498 prev_waitq_bp->av_forw = next_waitq_bp; 29499 } 29500 bp->av_forw = NULL; 29501 29502 /* 29503 * update kstat since the bp is moved out of 29504 * the waitq 29505 */ 29506 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 29507 29508 /* 29509 * Now put the bp onto the failfast queue. 29510 */ 29511 if (un->un_failfast_headp == NULL) { 29512 /* failfast queue is currently empty */ 29513 ASSERT(un->un_failfast_tailp == NULL); 29514 un->un_failfast_headp = 29515 un->un_failfast_tailp = bp; 29516 } else { 29517 /* Add the bp to the end of the failfast q */ 29518 ASSERT(un->un_failfast_tailp != NULL); 29519 ASSERT(un->un_failfast_tailp->b_flags & 29520 B_FAILFAST); 29521 un->un_failfast_tailp->av_forw = bp; 29522 un->un_failfast_tailp = bp; 29523 } 29524 } 29525 } 29526 29527 /* 29528 * Now return all bp's on the failfast queue to their owners. 29529 */ 29530 while ((bp = un->un_failfast_headp) != NULL) { 29531 29532 un->un_failfast_headp = bp->av_forw; 29533 if (un->un_failfast_headp == NULL) { 29534 un->un_failfast_tailp = NULL; 29535 } 29536 29537 /* 29538 * We want to return the bp with a failure error code, but 29539 * we do not want a call to sd_start_cmds() to occur here, 29540 * so use sd_return_failed_command_no_restart() instead of 29541 * sd_return_failed_command(). 29542 */ 29543 sd_return_failed_command_no_restart(un, bp, EIO); 29544 } 29545 29546 /* Flush the xbuf queues if required. */ 29547 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) { 29548 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback); 29549 } 29550 29551 SD_TRACE(SD_LOG_IO_FAILFAST, un, 29552 "sd_failfast_flushq: exit: un:0x%p\n", un); 29553 } 29554 29555 29556 /* 29557 * Function: sd_failfast_flushq_callback 29558 * 29559 * Description: Return TRUE if the given bp meets the criteria for failfast 29560 * flushing. Used with ddi_xbuf_flushq(9F). 29561 * 29562 * Arguments: bp - ptr to buf struct to be examined. 29563 * 29564 * Context: Any 29565 */ 29566 29567 static int 29568 sd_failfast_flushq_callback(struct buf *bp) 29569 { 29570 /* 29571 * Return TRUE if (1) we want to flush ALL bufs when the failfast 29572 * state is entered; OR (2) the given bp has B_FAILFAST set. 29573 */ 29574 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) || 29575 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE); 29576 } 29577 29578 29579 29580 /* 29581 * Function: sd_setup_next_xfer 29582 * 29583 * Description: Prepare next I/O operation using DMA_PARTIAL 29584 * 29585 */ 29586 29587 static int 29588 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 29589 struct scsi_pkt *pkt, struct sd_xbuf *xp) 29590 { 29591 ssize_t num_blks_not_xfered; 29592 daddr_t strt_blk_num; 29593 ssize_t bytes_not_xfered; 29594 int rval; 29595 29596 ASSERT(pkt->pkt_resid == 0); 29597 29598 /* 29599 * Calculate next block number and amount to be transferred. 29600 * 29601 * How much data NOT transfered to the HBA yet. 29602 */ 29603 bytes_not_xfered = xp->xb_dma_resid; 29604 29605 /* 29606 * figure how many blocks NOT transfered to the HBA yet. 29607 */ 29608 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered); 29609 29610 /* 29611 * set starting block number to the end of what WAS transfered. 29612 */ 29613 strt_blk_num = xp->xb_blkno + 29614 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered); 29615 29616 /* 29617 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt 29618 * will call scsi_initpkt with NULL_FUNC so we do not have to release 29619 * the disk mutex here. 29620 */ 29621 rval = sd_setup_next_rw_pkt(un, pkt, bp, 29622 strt_blk_num, num_blks_not_xfered); 29623 29624 if (rval == 0) { 29625 29626 /* 29627 * Success. 29628 * 29629 * Adjust things if there are still more blocks to be 29630 * transfered. 29631 */ 29632 xp->xb_dma_resid = pkt->pkt_resid; 29633 pkt->pkt_resid = 0; 29634 29635 return (1); 29636 } 29637 29638 /* 29639 * There's really only one possible return value from 29640 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt 29641 * returns NULL. 29642 */ 29643 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 29644 29645 bp->b_resid = bp->b_bcount; 29646 bp->b_flags |= B_ERROR; 29647 29648 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29649 "Error setting up next portion of DMA transfer\n"); 29650 29651 return (0); 29652 } 29653 29654 /* 29655 * Function: sd_panic_for_res_conflict 29656 * 29657 * Description: Call panic with a string formatted with "Reservation Conflict" 29658 * and a human readable identifier indicating the SD instance 29659 * that experienced the reservation conflict. 29660 * 29661 * Arguments: un - pointer to the soft state struct for the instance. 29662 * 29663 * Context: may execute in interrupt context. 29664 */ 29665 29666 #define SD_RESV_CONFLICT_FMT_LEN 40 29667 void 29668 sd_panic_for_res_conflict(struct sd_lun *un) 29669 { 29670 char panic_str[SD_RESV_CONFLICT_FMT_LEN+MAXPATHLEN]; 29671 char path_str[MAXPATHLEN]; 29672 29673 (void) snprintf(panic_str, sizeof (panic_str), 29674 "Reservation Conflict\nDisk: %s", 29675 ddi_pathname(SD_DEVINFO(un), path_str)); 29676 29677 panic(panic_str); 29678 } 29679 29680 /* 29681 * Note: The following sd_faultinjection_ioctl( ) routines implement 29682 * driver support for handling fault injection for error analysis 29683 * causing faults in multiple layers of the driver. 29684 * 29685 */ 29686 29687 #ifdef SD_FAULT_INJECTION 29688 static uint_t sd_fault_injection_on = 0; 29689 29690 /* 29691 * Function: sd_faultinjection_ioctl() 29692 * 29693 * Description: This routine is the driver entry point for handling 29694 * faultinjection ioctls to inject errors into the 29695 * layer model 29696 * 29697 * Arguments: cmd - the ioctl cmd received 29698 * arg - the arguments from user and returns 29699 */ 29700 29701 static void 29702 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) { 29703 29704 uint_t i = 0; 29705 uint_t rval; 29706 29707 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); 29708 29709 mutex_enter(SD_MUTEX(un)); 29710 29711 switch (cmd) { 29712 case SDIOCRUN: 29713 /* Allow pushed faults to be injected */ 29714 SD_INFO(SD_LOG_SDTEST, un, 29715 "sd_faultinjection_ioctl: Injecting Fault Run\n"); 29716 29717 sd_fault_injection_on = 1; 29718 29719 SD_INFO(SD_LOG_IOERR, un, 29720 "sd_faultinjection_ioctl: run finished\n"); 29721 break; 29722 29723 case SDIOCSTART: 29724 /* Start Injection Session */ 29725 SD_INFO(SD_LOG_SDTEST, un, 29726 "sd_faultinjection_ioctl: Injecting Fault Start\n"); 29727 29728 sd_fault_injection_on = 0; 29729 un->sd_injection_mask = 0xFFFFFFFF; 29730 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 29731 un->sd_fi_fifo_pkt[i] = NULL; 29732 un->sd_fi_fifo_xb[i] = NULL; 29733 un->sd_fi_fifo_un[i] = NULL; 29734 un->sd_fi_fifo_arq[i] = NULL; 29735 } 29736 un->sd_fi_fifo_start = 0; 29737 un->sd_fi_fifo_end = 0; 29738 29739 mutex_enter(&(un->un_fi_mutex)); 29740 un->sd_fi_log[0] = '\0'; 29741 un->sd_fi_buf_len = 0; 29742 mutex_exit(&(un->un_fi_mutex)); 29743 29744 SD_INFO(SD_LOG_IOERR, un, 29745 "sd_faultinjection_ioctl: start finished\n"); 29746 break; 29747 29748 case SDIOCSTOP: 29749 /* Stop Injection Session */ 29750 SD_INFO(SD_LOG_SDTEST, un, 29751 "sd_faultinjection_ioctl: Injecting Fault Stop\n"); 29752 sd_fault_injection_on = 0; 29753 un->sd_injection_mask = 0x0; 29754 29755 /* Empty stray or unuseds structs from fifo */ 29756 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 29757 if (un->sd_fi_fifo_pkt[i] != NULL) { 29758 kmem_free(un->sd_fi_fifo_pkt[i], 29759 sizeof (struct sd_fi_pkt)); 29760 } 29761 if (un->sd_fi_fifo_xb[i] != NULL) { 29762 kmem_free(un->sd_fi_fifo_xb[i], 29763 sizeof (struct sd_fi_xb)); 29764 } 29765 if (un->sd_fi_fifo_un[i] != NULL) { 29766 kmem_free(un->sd_fi_fifo_un[i], 29767 sizeof (struct sd_fi_un)); 29768 } 29769 if (un->sd_fi_fifo_arq[i] != NULL) { 29770 kmem_free(un->sd_fi_fifo_arq[i], 29771 sizeof (struct sd_fi_arq)); 29772 } 29773 un->sd_fi_fifo_pkt[i] = NULL; 29774 un->sd_fi_fifo_un[i] = NULL; 29775 un->sd_fi_fifo_xb[i] = NULL; 29776 un->sd_fi_fifo_arq[i] = NULL; 29777 } 29778 un->sd_fi_fifo_start = 0; 29779 un->sd_fi_fifo_end = 0; 29780 29781 SD_INFO(SD_LOG_IOERR, un, 29782 "sd_faultinjection_ioctl: stop finished\n"); 29783 break; 29784 29785 case SDIOCINSERTPKT: 29786 /* Store a packet struct to be pushed onto fifo */ 29787 SD_INFO(SD_LOG_SDTEST, un, 29788 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n"); 29789 29790 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29791 29792 sd_fault_injection_on = 0; 29793 29794 /* No more that SD_FI_MAX_ERROR allowed in Queue */ 29795 if (un->sd_fi_fifo_pkt[i] != NULL) { 29796 kmem_free(un->sd_fi_fifo_pkt[i], 29797 sizeof (struct sd_fi_pkt)); 29798 } 29799 if (arg != NULL) { 29800 un->sd_fi_fifo_pkt[i] = 29801 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP); 29802 if (un->sd_fi_fifo_pkt[i] == NULL) { 29803 /* Alloc failed don't store anything */ 29804 break; 29805 } 29806 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i], 29807 sizeof (struct sd_fi_pkt), 0); 29808 if (rval == -1) { 29809 kmem_free(un->sd_fi_fifo_pkt[i], 29810 sizeof (struct sd_fi_pkt)); 29811 un->sd_fi_fifo_pkt[i] = NULL; 29812 } 29813 } else { 29814 SD_INFO(SD_LOG_IOERR, un, 29815 "sd_faultinjection_ioctl: pkt null\n"); 29816 } 29817 break; 29818 29819 case SDIOCINSERTXB: 29820 /* Store a xb struct to be pushed onto fifo */ 29821 SD_INFO(SD_LOG_SDTEST, un, 29822 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n"); 29823 29824 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29825 29826 sd_fault_injection_on = 0; 29827 29828 if (un->sd_fi_fifo_xb[i] != NULL) { 29829 kmem_free(un->sd_fi_fifo_xb[i], 29830 sizeof (struct sd_fi_xb)); 29831 un->sd_fi_fifo_xb[i] = NULL; 29832 } 29833 if (arg != NULL) { 29834 un->sd_fi_fifo_xb[i] = 29835 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP); 29836 if (un->sd_fi_fifo_xb[i] == NULL) { 29837 /* Alloc failed don't store anything */ 29838 break; 29839 } 29840 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i], 29841 sizeof (struct sd_fi_xb), 0); 29842 29843 if (rval == -1) { 29844 kmem_free(un->sd_fi_fifo_xb[i], 29845 sizeof (struct sd_fi_xb)); 29846 un->sd_fi_fifo_xb[i] = NULL; 29847 } 29848 } else { 29849 SD_INFO(SD_LOG_IOERR, un, 29850 "sd_faultinjection_ioctl: xb null\n"); 29851 } 29852 break; 29853 29854 case SDIOCINSERTUN: 29855 /* Store a un struct to be pushed onto fifo */ 29856 SD_INFO(SD_LOG_SDTEST, un, 29857 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n"); 29858 29859 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29860 29861 sd_fault_injection_on = 0; 29862 29863 if (un->sd_fi_fifo_un[i] != NULL) { 29864 kmem_free(un->sd_fi_fifo_un[i], 29865 sizeof (struct sd_fi_un)); 29866 un->sd_fi_fifo_un[i] = NULL; 29867 } 29868 if (arg != NULL) { 29869 un->sd_fi_fifo_un[i] = 29870 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP); 29871 if (un->sd_fi_fifo_un[i] == NULL) { 29872 /* Alloc failed don't store anything */ 29873 break; 29874 } 29875 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i], 29876 sizeof (struct sd_fi_un), 0); 29877 if (rval == -1) { 29878 kmem_free(un->sd_fi_fifo_un[i], 29879 sizeof (struct sd_fi_un)); 29880 un->sd_fi_fifo_un[i] = NULL; 29881 } 29882 29883 } else { 29884 SD_INFO(SD_LOG_IOERR, un, 29885 "sd_faultinjection_ioctl: un null\n"); 29886 } 29887 29888 break; 29889 29890 case SDIOCINSERTARQ: 29891 /* Store a arq struct to be pushed onto fifo */ 29892 SD_INFO(SD_LOG_SDTEST, un, 29893 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n"); 29894 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29895 29896 sd_fault_injection_on = 0; 29897 29898 if (un->sd_fi_fifo_arq[i] != NULL) { 29899 kmem_free(un->sd_fi_fifo_arq[i], 29900 sizeof (struct sd_fi_arq)); 29901 un->sd_fi_fifo_arq[i] = NULL; 29902 } 29903 if (arg != NULL) { 29904 un->sd_fi_fifo_arq[i] = 29905 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP); 29906 if (un->sd_fi_fifo_arq[i] == NULL) { 29907 /* Alloc failed don't store anything */ 29908 break; 29909 } 29910 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i], 29911 sizeof (struct sd_fi_arq), 0); 29912 if (rval == -1) { 29913 kmem_free(un->sd_fi_fifo_arq[i], 29914 sizeof (struct sd_fi_arq)); 29915 un->sd_fi_fifo_arq[i] = NULL; 29916 } 29917 29918 } else { 29919 SD_INFO(SD_LOG_IOERR, un, 29920 "sd_faultinjection_ioctl: arq null\n"); 29921 } 29922 29923 break; 29924 29925 case SDIOCPUSH: 29926 /* Push stored xb, pkt, un, and arq onto fifo */ 29927 sd_fault_injection_on = 0; 29928 29929 if (arg != NULL) { 29930 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); 29931 if (rval != -1 && 29932 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 29933 un->sd_fi_fifo_end += i; 29934 } 29935 } else { 29936 SD_INFO(SD_LOG_IOERR, un, 29937 "sd_faultinjection_ioctl: push arg null\n"); 29938 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 29939 un->sd_fi_fifo_end++; 29940 } 29941 } 29942 SD_INFO(SD_LOG_IOERR, un, 29943 "sd_faultinjection_ioctl: push to end=%d\n", 29944 un->sd_fi_fifo_end); 29945 break; 29946 29947 case SDIOCRETRIEVE: 29948 /* Return buffer of log from Injection session */ 29949 SD_INFO(SD_LOG_SDTEST, un, 29950 "sd_faultinjection_ioctl: Injecting Fault Retreive"); 29951 29952 sd_fault_injection_on = 0; 29953 29954 mutex_enter(&(un->un_fi_mutex)); 29955 rval = ddi_copyout(un->sd_fi_log, (void *)arg, 29956 un->sd_fi_buf_len+1, 0); 29957 mutex_exit(&(un->un_fi_mutex)); 29958 29959 if (rval == -1) { 29960 /* 29961 * arg is possibly invalid setting 29962 * it to NULL for return 29963 */ 29964 arg = NULL; 29965 } 29966 break; 29967 } 29968 29969 mutex_exit(SD_MUTEX(un)); 29970 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl:" 29971 " exit\n"); 29972 } 29973 29974 29975 /* 29976 * Function: sd_injection_log() 29977 * 29978 * Description: This routine adds buff to the already existing injection log 29979 * for retrieval via faultinjection_ioctl for use in fault 29980 * detection and recovery 29981 * 29982 * Arguments: buf - the string to add to the log 29983 */ 29984 29985 static void 29986 sd_injection_log(char *buf, struct sd_lun *un) 29987 { 29988 uint_t len; 29989 29990 ASSERT(un != NULL); 29991 ASSERT(buf != NULL); 29992 29993 mutex_enter(&(un->un_fi_mutex)); 29994 29995 len = min(strlen(buf), 255); 29996 /* Add logged value to Injection log to be returned later */ 29997 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) { 29998 uint_t offset = strlen((char *)un->sd_fi_log); 29999 char *destp = (char *)un->sd_fi_log + offset; 30000 int i; 30001 for (i = 0; i < len; i++) { 30002 *destp++ = *buf++; 30003 } 30004 un->sd_fi_buf_len += len; 30005 un->sd_fi_log[un->sd_fi_buf_len] = '\0'; 30006 } 30007 30008 mutex_exit(&(un->un_fi_mutex)); 30009 } 30010 30011 30012 /* 30013 * Function: sd_faultinjection() 30014 * 30015 * Description: This routine takes the pkt and changes its 30016 * content based on error injection scenerio. 30017 * 30018 * Arguments: pktp - packet to be changed 30019 */ 30020 30021 static void 30022 sd_faultinjection(struct scsi_pkt *pktp) 30023 { 30024 uint_t i; 30025 struct sd_fi_pkt *fi_pkt; 30026 struct sd_fi_xb *fi_xb; 30027 struct sd_fi_un *fi_un; 30028 struct sd_fi_arq *fi_arq; 30029 struct buf *bp; 30030 struct sd_xbuf *xb; 30031 struct sd_lun *un; 30032 30033 ASSERT(pktp != NULL); 30034 30035 /* pull bp xb and un from pktp */ 30036 bp = (struct buf *)pktp->pkt_private; 30037 xb = SD_GET_XBUF(bp); 30038 un = SD_GET_UN(bp); 30039 30040 ASSERT(un != NULL); 30041 30042 mutex_enter(SD_MUTEX(un)); 30043 30044 SD_TRACE(SD_LOG_SDTEST, un, 30045 "sd_faultinjection: entry Injection from sdintr\n"); 30046 30047 /* if injection is off return */ 30048 if (sd_fault_injection_on == 0 || 30049 un->sd_fi_fifo_start == un->sd_fi_fifo_end) { 30050 mutex_exit(SD_MUTEX(un)); 30051 return; 30052 } 30053 30054 SD_INFO(SD_LOG_SDTEST, un, 30055 "sd_faultinjection: is working for copying\n"); 30056 30057 /* take next set off fifo */ 30058 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR; 30059 30060 fi_pkt = un->sd_fi_fifo_pkt[i]; 30061 fi_xb = un->sd_fi_fifo_xb[i]; 30062 fi_un = un->sd_fi_fifo_un[i]; 30063 fi_arq = un->sd_fi_fifo_arq[i]; 30064 30065 30066 /* set variables accordingly */ 30067 /* set pkt if it was on fifo */ 30068 if (fi_pkt != NULL) { 30069 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags"); 30070 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp"); 30071 if (fi_pkt->pkt_cdbp != 0xff) 30072 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp"); 30073 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state"); 30074 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics"); 30075 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason"); 30076 30077 } 30078 /* set xb if it was on fifo */ 30079 if (fi_xb != NULL) { 30080 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno"); 30081 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid"); 30082 if (fi_xb->xb_retry_count != 0) 30083 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count"); 30084 SD_CONDSET(xb, xb, xb_victim_retry_count, 30085 "xb_victim_retry_count"); 30086 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status"); 30087 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state"); 30088 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid"); 30089 30090 /* copy in block data from sense */ 30091 /* 30092 * if (fi_xb->xb_sense_data[0] != -1) { 30093 * bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, 30094 * SENSE_LENGTH); 30095 * } 30096 */ 30097 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, SENSE_LENGTH); 30098 30099 /* copy in extended sense codes */ 30100 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 30101 xb, es_code, "es_code"); 30102 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 30103 xb, es_key, "es_key"); 30104 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 30105 xb, es_add_code, "es_add_code"); 30106 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 30107 xb, es_qual_code, "es_qual_code"); 30108 struct scsi_extended_sense *esp; 30109 esp = (struct scsi_extended_sense *)xb->xb_sense_data; 30110 esp->es_class = CLASS_EXTENDED_SENSE; 30111 } 30112 30113 /* set un if it was on fifo */ 30114 if (fi_un != NULL) { 30115 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb"); 30116 SD_CONDSET(un, un, un_ctype, "un_ctype"); 30117 SD_CONDSET(un, un, un_reset_retry_count, 30118 "un_reset_retry_count"); 30119 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type"); 30120 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status"); 30121 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled"); 30122 SD_CONDSET(un, un, un_f_allow_bus_device_reset, 30123 "un_f_allow_bus_device_reset"); 30124 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing"); 30125 30126 } 30127 30128 /* copy in auto request sense if it was on fifo */ 30129 if (fi_arq != NULL) { 30130 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq)); 30131 } 30132 30133 /* free structs */ 30134 if (un->sd_fi_fifo_pkt[i] != NULL) { 30135 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt)); 30136 } 30137 if (un->sd_fi_fifo_xb[i] != NULL) { 30138 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb)); 30139 } 30140 if (un->sd_fi_fifo_un[i] != NULL) { 30141 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un)); 30142 } 30143 if (un->sd_fi_fifo_arq[i] != NULL) { 30144 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); 30145 } 30146 30147 /* 30148 * kmem_free does not gurantee to set to NULL 30149 * since we uses these to determine if we set 30150 * values or not lets confirm they are always 30151 * NULL after free 30152 */ 30153 un->sd_fi_fifo_pkt[i] = NULL; 30154 un->sd_fi_fifo_un[i] = NULL; 30155 un->sd_fi_fifo_xb[i] = NULL; 30156 un->sd_fi_fifo_arq[i] = NULL; 30157 30158 un->sd_fi_fifo_start++; 30159 30160 mutex_exit(SD_MUTEX(un)); 30161 30162 SD_INFO(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n"); 30163 } 30164 30165 #endif /* SD_FAULT_INJECTION */ 30166 30167 /* 30168 * This routine is invoked in sd_unit_attach(). Before calling it, the 30169 * properties in conf file should be processed already, and "hotpluggable" 30170 * property was processed also. 30171 * 30172 * The sd driver distinguishes 3 different type of devices: removable media, 30173 * non-removable media, and hotpluggable. Below the differences are defined: 30174 * 30175 * 1. Device ID 30176 * 30177 * The device ID of a device is used to identify this device. Refer to 30178 * ddi_devid_register(9F). 30179 * 30180 * For a non-removable media disk device which can provide 0x80 or 0x83 30181 * VPD page (refer to INQUIRY command of SCSI SPC specification), a unique 30182 * device ID is created to identify this device. For other non-removable 30183 * media devices, a default device ID is created only if this device has 30184 * at least 2 alter cylinders. Otherwise, this device has no devid. 30185 * 30186 * ------------------------------------------------------- 30187 * removable media hotpluggable | Can Have Device ID 30188 * ------------------------------------------------------- 30189 * false false | Yes 30190 * false true | Yes 30191 * true x | No 30192 * ------------------------------------------------------ 30193 * 30194 * 30195 * 2. SCSI group 4 commands 30196 * 30197 * In SCSI specs, only some commands in group 4 command set can use 30198 * 8-byte addresses that can be used to access >2TB storage spaces. 30199 * Other commands have no such capability. Without supporting group4, 30200 * it is impossible to make full use of storage spaces of a disk with 30201 * capacity larger than 2TB. 30202 * 30203 * ----------------------------------------------- 30204 * removable media hotpluggable LP64 | Group 30205 * ----------------------------------------------- 30206 * false false false | 1 30207 * false false true | 4 30208 * false true false | 1 30209 * false true true | 4 30210 * true x x | 5 30211 * ----------------------------------------------- 30212 * 30213 * 30214 * 3. Check for VTOC Label 30215 * 30216 * If a direct-access disk has no EFI label, sd will check if it has a 30217 * valid VTOC label. Now, sd also does that check for removable media 30218 * and hotpluggable devices. 30219 * 30220 * -------------------------------------------------------------- 30221 * Direct-Access removable media hotpluggable | Check Label 30222 * ------------------------------------------------------------- 30223 * false false false | No 30224 * false false true | No 30225 * false true false | Yes 30226 * false true true | Yes 30227 * true x x | Yes 30228 * -------------------------------------------------------------- 30229 * 30230 * 30231 * 4. Building default VTOC label 30232 * 30233 * As section 3 says, sd checks if some kinds of devices have VTOC label. 30234 * If those devices have no valid VTOC label, sd(7d) will attempt to 30235 * create default VTOC for them. Currently sd creates default VTOC label 30236 * for all devices on x86 platform (VTOC_16), but only for removable 30237 * media devices on SPARC (VTOC_8). 30238 * 30239 * ----------------------------------------------------------- 30240 * removable media hotpluggable platform | Default Label 30241 * ----------------------------------------------------------- 30242 * false false sparc | No 30243 * false true x86 | Yes 30244 * false true sparc | Yes 30245 * true x x | Yes 30246 * ---------------------------------------------------------- 30247 * 30248 * 30249 * 5. Supported blocksizes of target devices 30250 * 30251 * Sd supports non-512-byte blocksize for removable media devices only. 30252 * For other devices, only 512-byte blocksize is supported. This may be 30253 * changed in near future because some RAID devices require non-512-byte 30254 * blocksize 30255 * 30256 * ----------------------------------------------------------- 30257 * removable media hotpluggable | non-512-byte blocksize 30258 * ----------------------------------------------------------- 30259 * false false | No 30260 * false true | No 30261 * true x | Yes 30262 * ----------------------------------------------------------- 30263 * 30264 * 30265 * 6. Automatic mount & unmount 30266 * 30267 * Sd(7d) driver provides DKIOCREMOVABLE ioctl. This ioctl is used to query 30268 * if a device is removable media device. It return 1 for removable media 30269 * devices, and 0 for others. 30270 * 30271 * The automatic mounting subsystem should distinguish between the types 30272 * of devices and apply automounting policies to each. 30273 * 30274 * 30275 * 7. fdisk partition management 30276 * 30277 * Fdisk is traditional partition method on x86 platform. Sd(7d) driver 30278 * just supports fdisk partitions on x86 platform. On sparc platform, sd 30279 * doesn't support fdisk partitions at all. Note: pcfs(7fs) can recognize 30280 * fdisk partitions on both x86 and SPARC platform. 30281 * 30282 * ----------------------------------------------------------- 30283 * platform removable media USB/1394 | fdisk supported 30284 * ----------------------------------------------------------- 30285 * x86 X X | true 30286 * ------------------------------------------------------------ 30287 * sparc X X | false 30288 * ------------------------------------------------------------ 30289 * 30290 * 30291 * 8. MBOOT/MBR 30292 * 30293 * Although sd(7d) doesn't support fdisk on SPARC platform, it does support 30294 * read/write mboot for removable media devices on sparc platform. 30295 * 30296 * ----------------------------------------------------------- 30297 * platform removable media USB/1394 | mboot supported 30298 * ----------------------------------------------------------- 30299 * x86 X X | true 30300 * ------------------------------------------------------------ 30301 * sparc false false | false 30302 * sparc false true | true 30303 * sparc true false | true 30304 * sparc true true | true 30305 * ------------------------------------------------------------ 30306 * 30307 * 30308 * 9. error handling during opening device 30309 * 30310 * If failed to open a disk device, an errno is returned. For some kinds 30311 * of errors, different errno is returned depending on if this device is 30312 * a removable media device. This brings USB/1394 hard disks in line with 30313 * expected hard disk behavior. It is not expected that this breaks any 30314 * application. 30315 * 30316 * ------------------------------------------------------ 30317 * removable media hotpluggable | errno 30318 * ------------------------------------------------------ 30319 * false false | EIO 30320 * false true | EIO 30321 * true x | ENXIO 30322 * ------------------------------------------------------ 30323 * 30324 * 30325 * 11. ioctls: DKIOCEJECT, CDROMEJECT 30326 * 30327 * These IOCTLs are applicable only to removable media devices. 30328 * 30329 * ----------------------------------------------------------- 30330 * removable media hotpluggable |DKIOCEJECT, CDROMEJECT 30331 * ----------------------------------------------------------- 30332 * false false | No 30333 * false true | No 30334 * true x | Yes 30335 * ----------------------------------------------------------- 30336 * 30337 * 30338 * 12. Kstats for partitions 30339 * 30340 * sd creates partition kstat for non-removable media devices. USB and 30341 * Firewire hard disks now have partition kstats 30342 * 30343 * ------------------------------------------------------ 30344 * removable media hotpluggable | kstat 30345 * ------------------------------------------------------ 30346 * false false | Yes 30347 * false true | Yes 30348 * true x | No 30349 * ------------------------------------------------------ 30350 * 30351 * 30352 * 13. Removable media & hotpluggable properties 30353 * 30354 * Sd driver creates a "removable-media" property for removable media 30355 * devices. Parent nexus drivers create a "hotpluggable" property if 30356 * it supports hotplugging. 30357 * 30358 * --------------------------------------------------------------------- 30359 * removable media hotpluggable | "removable-media" " hotpluggable" 30360 * --------------------------------------------------------------------- 30361 * false false | No No 30362 * false true | No Yes 30363 * true false | Yes No 30364 * true true | Yes Yes 30365 * --------------------------------------------------------------------- 30366 * 30367 * 30368 * 14. Power Management 30369 * 30370 * sd only power manages removable media devices or devices that support 30371 * LOG_SENSE or have a "pm-capable" property (PSARC/2002/250) 30372 * 30373 * A parent nexus that supports hotplugging can also set "pm-capable" 30374 * if the disk can be power managed. 30375 * 30376 * ------------------------------------------------------------ 30377 * removable media hotpluggable pm-capable | power manage 30378 * ------------------------------------------------------------ 30379 * false false false | No 30380 * false false true | Yes 30381 * false true false | No 30382 * false true true | Yes 30383 * true x x | Yes 30384 * ------------------------------------------------------------ 30385 * 30386 * USB and firewire hard disks can now be power managed independently 30387 * of the framebuffer 30388 * 30389 * 30390 * 15. Support for USB disks with capacity larger than 1TB 30391 * 30392 * Currently, sd doesn't permit a fixed disk device with capacity 30393 * larger than 1TB to be used in a 32-bit operating system environment. 30394 * However, sd doesn't do that for removable media devices. Instead, it 30395 * assumes that removable media devices cannot have a capacity larger 30396 * than 1TB. Therefore, using those devices on 32-bit system is partially 30397 * supported, which can cause some unexpected results. 30398 * 30399 * --------------------------------------------------------------------- 30400 * removable media USB/1394 | Capacity > 1TB | Used in 32-bit env 30401 * --------------------------------------------------------------------- 30402 * false false | true | no 30403 * false true | true | no 30404 * true false | true | Yes 30405 * true true | true | Yes 30406 * --------------------------------------------------------------------- 30407 * 30408 * 30409 * 16. Check write-protection at open time 30410 * 30411 * When a removable media device is being opened for writing without NDELAY 30412 * flag, sd will check if this device is writable. If attempting to open 30413 * without NDELAY flag a write-protected device, this operation will abort. 30414 * 30415 * ------------------------------------------------------------ 30416 * removable media USB/1394 | WP Check 30417 * ------------------------------------------------------------ 30418 * false false | No 30419 * false true | No 30420 * true false | Yes 30421 * true true | Yes 30422 * ------------------------------------------------------------ 30423 * 30424 * 30425 * 17. syslog when corrupted VTOC is encountered 30426 * 30427 * Currently, if an invalid VTOC is encountered, sd only print syslog 30428 * for fixed SCSI disks. 30429 * ------------------------------------------------------------ 30430 * removable media USB/1394 | print syslog 30431 * ------------------------------------------------------------ 30432 * false false | Yes 30433 * false true | No 30434 * true false | No 30435 * true true | No 30436 * ------------------------------------------------------------ 30437 */ 30438 static void 30439 sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi) 30440 { 30441 int pm_cap; 30442 30443 ASSERT(un->un_sd); 30444 ASSERT(un->un_sd->sd_inq); 30445 30446 /* 30447 * Enable SYNC CACHE support for all devices. 30448 */ 30449 un->un_f_sync_cache_supported = TRUE; 30450 30451 /* 30452 * Set the sync cache required flag to false. 30453 * This would ensure that there is no SYNC CACHE 30454 * sent when there are no writes 30455 */ 30456 un->un_f_sync_cache_required = FALSE; 30457 30458 if (un->un_sd->sd_inq->inq_rmb) { 30459 /* 30460 * The media of this device is removable. And for this kind 30461 * of devices, it is possible to change medium after opening 30462 * devices. Thus we should support this operation. 30463 */ 30464 un->un_f_has_removable_media = TRUE; 30465 30466 /* 30467 * support non-512-byte blocksize of removable media devices 30468 */ 30469 un->un_f_non_devbsize_supported = TRUE; 30470 30471 /* 30472 * Assume that all removable media devices support DOOR_LOCK 30473 */ 30474 un->un_f_doorlock_supported = TRUE; 30475 30476 /* 30477 * For a removable media device, it is possible to be opened 30478 * with NDELAY flag when there is no media in drive, in this 30479 * case we don't care if device is writable. But if without 30480 * NDELAY flag, we need to check if media is write-protected. 30481 */ 30482 un->un_f_chk_wp_open = TRUE; 30483 30484 /* 30485 * need to start a SCSI watch thread to monitor media state, 30486 * when media is being inserted or ejected, notify syseventd. 30487 */ 30488 un->un_f_monitor_media_state = TRUE; 30489 30490 /* 30491 * Some devices don't support START_STOP_UNIT command. 30492 * Therefore, we'd better check if a device supports it 30493 * before sending it. 30494 */ 30495 un->un_f_check_start_stop = TRUE; 30496 30497 /* 30498 * support eject media ioctl: 30499 * FDEJECT, DKIOCEJECT, CDROMEJECT 30500 */ 30501 un->un_f_eject_media_supported = TRUE; 30502 30503 /* 30504 * Because many removable-media devices don't support 30505 * LOG_SENSE, we couldn't use this command to check if 30506 * a removable media device support power-management. 30507 * We assume that they support power-management via 30508 * START_STOP_UNIT command and can be spun up and down 30509 * without limitations. 30510 */ 30511 un->un_f_pm_supported = TRUE; 30512 30513 /* 30514 * Need to create a zero length (Boolean) property 30515 * removable-media for the removable media devices. 30516 * Note that the return value of the property is not being 30517 * checked, since if unable to create the property 30518 * then do not want the attach to fail altogether. Consistent 30519 * with other property creation in attach. 30520 */ 30521 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, 30522 DDI_PROP_CANSLEEP, "removable-media", NULL, 0); 30523 30524 } else { 30525 /* 30526 * create device ID for device 30527 */ 30528 un->un_f_devid_supported = TRUE; 30529 30530 /* 30531 * Spin up non-removable-media devices once it is attached 30532 */ 30533 un->un_f_attach_spinup = TRUE; 30534 30535 /* 30536 * According to SCSI specification, Sense data has two kinds of 30537 * format: fixed format, and descriptor format. At present, we 30538 * don't support descriptor format sense data for removable 30539 * media. 30540 */ 30541 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT) { 30542 un->un_f_descr_format_supported = TRUE; 30543 } 30544 30545 /* 30546 * kstats are created only for non-removable media devices. 30547 * 30548 * Set this in sd.conf to 0 in order to disable kstats. The 30549 * default is 1, so they are enabled by default. 30550 */ 30551 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY, 30552 SD_DEVINFO(un), DDI_PROP_DONTPASS, 30553 "enable-partition-kstats", 1)); 30554 30555 /* 30556 * Check if HBA has set the "pm-capable" property. 30557 * If "pm-capable" exists and is non-zero then we can 30558 * power manage the device without checking the start/stop 30559 * cycle count log sense page. 30560 * 30561 * If "pm-capable" exists and is set to be false (0), 30562 * then we should not power manage the device. 30563 * 30564 * If "pm-capable" doesn't exist then pm_cap will 30565 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case, 30566 * sd will check the start/stop cycle count log sense page 30567 * and power manage the device if the cycle count limit has 30568 * not been exceeded. 30569 */ 30570 pm_cap = ddi_prop_get_int(DDI_DEV_T_ANY, devi, 30571 DDI_PROP_DONTPASS, "pm-capable", SD_PM_CAPABLE_UNDEFINED); 30572 if (SD_PM_CAPABLE_IS_UNDEFINED(pm_cap)) { 30573 un->un_f_log_sense_supported = TRUE; 30574 if (!un->un_f_power_condition_disabled && 30575 SD_INQUIRY(un)->inq_ansi == 6) { 30576 un->un_f_power_condition_supported = TRUE; 30577 } 30578 } else { 30579 /* 30580 * pm-capable property exists. 30581 * 30582 * Convert "TRUE" values for pm_cap to 30583 * SD_PM_CAPABLE_IS_TRUE to make it easier to check 30584 * later. "TRUE" values are any values defined in 30585 * inquiry.h. 30586 */ 30587 if (SD_PM_CAPABLE_IS_FALSE(pm_cap)) { 30588 un->un_f_log_sense_supported = FALSE; 30589 } else { 30590 /* SD_PM_CAPABLE_IS_TRUE case */ 30591 un->un_f_pm_supported = TRUE; 30592 if (!un->un_f_power_condition_disabled && 30593 SD_PM_CAPABLE_IS_SPC_4(pm_cap)) { 30594 un->un_f_power_condition_supported = 30595 TRUE; 30596 } 30597 if (SD_PM_CAP_LOG_SUPPORTED(pm_cap)) { 30598 un->un_f_log_sense_supported = TRUE; 30599 un->un_f_pm_log_sense_smart = 30600 SD_PM_CAP_SMART_LOG(pm_cap); 30601 } 30602 } 30603 30604 SD_INFO(SD_LOG_ATTACH_DETACH, un, 30605 "sd_unit_attach: un:0x%p pm-capable " 30606 "property set to %d.\n", un, un->un_f_pm_supported); 30607 } 30608 } 30609 30610 if (un->un_f_is_hotpluggable) { 30611 30612 /* 30613 * Have to watch hotpluggable devices as well, since 30614 * that's the only way for userland applications to 30615 * detect hot removal while device is busy/mounted. 30616 */ 30617 un->un_f_monitor_media_state = TRUE; 30618 30619 un->un_f_check_start_stop = TRUE; 30620 30621 } 30622 } 30623 30624 /* 30625 * sd_tg_rdwr: 30626 * Provides rdwr access for cmlb via sd_tgops. The start_block is 30627 * in sys block size, req_length in bytes. 30628 * 30629 */ 30630 static int 30631 sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 30632 diskaddr_t start_block, size_t reqlength, void *tg_cookie) 30633 { 30634 struct sd_lun *un; 30635 int path_flag = (int)(uintptr_t)tg_cookie; 30636 char *dkl = NULL; 30637 diskaddr_t real_addr = start_block; 30638 diskaddr_t first_byte, end_block; 30639 30640 size_t buffer_size = reqlength; 30641 int rval = 0; 30642 diskaddr_t cap; 30643 uint32_t lbasize; 30644 sd_ssc_t *ssc; 30645 30646 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 30647 if (un == NULL) 30648 return (ENXIO); 30649 30650 if (cmd != TG_READ && cmd != TG_WRITE) 30651 return (EINVAL); 30652 30653 ssc = sd_ssc_init(un); 30654 mutex_enter(SD_MUTEX(un)); 30655 if (un->un_f_tgt_blocksize_is_valid == FALSE) { 30656 mutex_exit(SD_MUTEX(un)); 30657 rval = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap, 30658 &lbasize, path_flag); 30659 if (rval != 0) 30660 goto done1; 30661 mutex_enter(SD_MUTEX(un)); 30662 sd_update_block_info(un, lbasize, cap); 30663 if ((un->un_f_tgt_blocksize_is_valid == FALSE)) { 30664 mutex_exit(SD_MUTEX(un)); 30665 rval = EIO; 30666 goto done; 30667 } 30668 } 30669 30670 if (NOT_DEVBSIZE(un)) { 30671 /* 30672 * sys_blocksize != tgt_blocksize, need to re-adjust 30673 * blkno and save the index to beginning of dk_label 30674 */ 30675 first_byte = SD_SYSBLOCKS2BYTES(start_block); 30676 real_addr = first_byte / un->un_tgt_blocksize; 30677 30678 end_block = (first_byte + reqlength + 30679 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize; 30680 30681 /* round up buffer size to multiple of target block size */ 30682 buffer_size = (end_block - real_addr) * un->un_tgt_blocksize; 30683 30684 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_tg_rdwr", 30685 "label_addr: 0x%x allocation size: 0x%x\n", 30686 real_addr, buffer_size); 30687 30688 if (((first_byte % un->un_tgt_blocksize) != 0) || 30689 (reqlength % un->un_tgt_blocksize) != 0) 30690 /* the request is not aligned */ 30691 dkl = kmem_zalloc(buffer_size, KM_SLEEP); 30692 } 30693 30694 /* 30695 * The MMC standard allows READ CAPACITY to be 30696 * inaccurate by a bounded amount (in the interest of 30697 * response latency). As a result, failed READs are 30698 * commonplace (due to the reading of metadata and not 30699 * data). Depending on the per-Vendor/drive Sense data, 30700 * the failed READ can cause many (unnecessary) retries. 30701 */ 30702 30703 if (ISCD(un) && (cmd == TG_READ) && 30704 (un->un_f_blockcount_is_valid == TRUE) && 30705 ((start_block == (un->un_blockcount - 1))|| 30706 (start_block == (un->un_blockcount - 2)))) { 30707 path_flag = SD_PATH_DIRECT_PRIORITY; 30708 } 30709 30710 mutex_exit(SD_MUTEX(un)); 30711 if (cmd == TG_READ) { 30712 rval = sd_send_scsi_READ(ssc, (dkl != NULL)? dkl: bufaddr, 30713 buffer_size, real_addr, path_flag); 30714 if (dkl != NULL) 30715 bcopy(dkl + SD_TGTBYTEOFFSET(un, start_block, 30716 real_addr), bufaddr, reqlength); 30717 } else { 30718 if (dkl) { 30719 rval = sd_send_scsi_READ(ssc, dkl, buffer_size, 30720 real_addr, path_flag); 30721 if (rval) { 30722 goto done1; 30723 } 30724 bcopy(bufaddr, dkl + SD_TGTBYTEOFFSET(un, start_block, 30725 real_addr), reqlength); 30726 } 30727 rval = sd_send_scsi_WRITE(ssc, (dkl != NULL)? dkl: bufaddr, 30728 buffer_size, real_addr, path_flag); 30729 } 30730 30731 done1: 30732 if (dkl != NULL) 30733 kmem_free(dkl, buffer_size); 30734 30735 if (rval != 0) { 30736 if (rval == EIO) 30737 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 30738 else 30739 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 30740 } 30741 done: 30742 sd_ssc_fini(ssc); 30743 return (rval); 30744 } 30745 30746 30747 static int 30748 sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie) 30749 { 30750 30751 struct sd_lun *un; 30752 diskaddr_t cap; 30753 uint32_t lbasize; 30754 int path_flag = (int)(uintptr_t)tg_cookie; 30755 int ret = 0; 30756 30757 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 30758 if (un == NULL) 30759 return (ENXIO); 30760 30761 switch (cmd) { 30762 case TG_GETPHYGEOM: 30763 case TG_GETVIRTGEOM: 30764 case TG_GETCAPACITY: 30765 case TG_GETBLOCKSIZE: 30766 mutex_enter(SD_MUTEX(un)); 30767 30768 if ((un->un_f_blockcount_is_valid == TRUE) && 30769 (un->un_f_tgt_blocksize_is_valid == TRUE)) { 30770 cap = un->un_blockcount; 30771 lbasize = un->un_tgt_blocksize; 30772 mutex_exit(SD_MUTEX(un)); 30773 } else { 30774 sd_ssc_t *ssc; 30775 mutex_exit(SD_MUTEX(un)); 30776 ssc = sd_ssc_init(un); 30777 ret = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap, 30778 &lbasize, path_flag); 30779 if (ret != 0) { 30780 if (ret == EIO) 30781 sd_ssc_assessment(ssc, 30782 SD_FMT_STATUS_CHECK); 30783 else 30784 sd_ssc_assessment(ssc, 30785 SD_FMT_IGNORE); 30786 sd_ssc_fini(ssc); 30787 return (ret); 30788 } 30789 sd_ssc_fini(ssc); 30790 mutex_enter(SD_MUTEX(un)); 30791 sd_update_block_info(un, lbasize, cap); 30792 if ((un->un_f_blockcount_is_valid == FALSE) || 30793 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 30794 mutex_exit(SD_MUTEX(un)); 30795 return (EIO); 30796 } 30797 mutex_exit(SD_MUTEX(un)); 30798 } 30799 30800 if (cmd == TG_GETCAPACITY) { 30801 *(diskaddr_t *)arg = cap; 30802 return (0); 30803 } 30804 30805 if (cmd == TG_GETBLOCKSIZE) { 30806 *(uint32_t *)arg = lbasize; 30807 return (0); 30808 } 30809 30810 if (cmd == TG_GETPHYGEOM) 30811 ret = sd_get_physical_geometry(un, (cmlb_geom_t *)arg, 30812 cap, lbasize, path_flag); 30813 else 30814 /* TG_GETVIRTGEOM */ 30815 ret = sd_get_virtual_geometry(un, 30816 (cmlb_geom_t *)arg, cap, lbasize); 30817 30818 return (ret); 30819 30820 case TG_GETATTR: 30821 mutex_enter(SD_MUTEX(un)); 30822 ((tg_attribute_t *)arg)->media_is_writable = 30823 un->un_f_mmc_writable_media; 30824 mutex_exit(SD_MUTEX(un)); 30825 return (0); 30826 default: 30827 return (ENOTTY); 30828 30829 } 30830 } 30831 30832 /* 30833 * Function: sd_ssc_ereport_post 30834 * 30835 * Description: Will be called when SD driver need to post an ereport. 30836 * 30837 * Context: Kernel thread or interrupt context. 30838 */ 30839 static void 30840 sd_ssc_ereport_post(sd_ssc_t *ssc, enum sd_driver_assessment drv_assess) 30841 { 30842 int uscsi_path_instance = 0; 30843 uchar_t uscsi_pkt_reason; 30844 uint32_t uscsi_pkt_state; 30845 uint32_t uscsi_pkt_statistics; 30846 uint64_t uscsi_ena; 30847 uchar_t op_code; 30848 uint8_t *sensep; 30849 union scsi_cdb *cdbp; 30850 uint_t cdblen = 0; 30851 uint_t senlen = 0; 30852 struct sd_lun *un; 30853 dev_info_t *dip; 30854 char *devid; 30855 int ssc_invalid_flags = SSC_FLAGS_INVALID_PKT_REASON | 30856 SSC_FLAGS_INVALID_STATUS | 30857 SSC_FLAGS_INVALID_SENSE | 30858 SSC_FLAGS_INVALID_DATA; 30859 char assessment[16]; 30860 30861 ASSERT(ssc != NULL); 30862 ASSERT(ssc->ssc_uscsi_cmd != NULL); 30863 ASSERT(ssc->ssc_uscsi_info != NULL); 30864 30865 un = ssc->ssc_un; 30866 ASSERT(un != NULL); 30867 30868 dip = un->un_sd->sd_dev; 30869 30870 /* 30871 * Get the devid: 30872 * devid will only be passed to non-transport error reports. 30873 */ 30874 devid = DEVI(dip)->devi_devid_str; 30875 30876 /* 30877 * If we are syncing or dumping, the command will not be executed 30878 * so we bypass this situation. 30879 */ 30880 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 30881 (un->un_state == SD_STATE_DUMPING)) 30882 return; 30883 30884 uscsi_pkt_reason = ssc->ssc_uscsi_info->ui_pkt_reason; 30885 uscsi_path_instance = ssc->ssc_uscsi_cmd->uscsi_path_instance; 30886 uscsi_pkt_state = ssc->ssc_uscsi_info->ui_pkt_state; 30887 uscsi_pkt_statistics = ssc->ssc_uscsi_info->ui_pkt_statistics; 30888 uscsi_ena = ssc->ssc_uscsi_info->ui_ena; 30889 30890 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 30891 cdbp = (union scsi_cdb *)ssc->ssc_uscsi_cmd->uscsi_cdb; 30892 30893 /* In rare cases, EG:DOORLOCK, the cdb could be NULL */ 30894 if (cdbp == NULL) { 30895 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 30896 "sd_ssc_ereport_post meet empty cdb\n"); 30897 return; 30898 } 30899 30900 op_code = cdbp->scc_cmd; 30901 30902 cdblen = (int)ssc->ssc_uscsi_cmd->uscsi_cdblen; 30903 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 30904 ssc->ssc_uscsi_cmd->uscsi_rqresid); 30905 30906 if (senlen > 0) 30907 ASSERT(sensep != NULL); 30908 30909 /* 30910 * Initialize drv_assess to corresponding values. 30911 * SD_FM_DRV_FATAL will be mapped to "fail" or "fatal" depending 30912 * on the sense-key returned back. 30913 */ 30914 switch (drv_assess) { 30915 case SD_FM_DRV_RECOVERY: 30916 (void) sprintf(assessment, "%s", "recovered"); 30917 break; 30918 case SD_FM_DRV_RETRY: 30919 (void) sprintf(assessment, "%s", "retry"); 30920 break; 30921 case SD_FM_DRV_NOTICE: 30922 (void) sprintf(assessment, "%s", "info"); 30923 break; 30924 case SD_FM_DRV_FATAL: 30925 default: 30926 (void) sprintf(assessment, "%s", "unknown"); 30927 } 30928 /* 30929 * If drv_assess == SD_FM_DRV_RECOVERY, this should be a recovered 30930 * command, we will post ereport.io.scsi.cmd.disk.recovered. 30931 * driver-assessment will always be "recovered" here. 30932 */ 30933 if (drv_assess == SD_FM_DRV_RECOVERY) { 30934 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 30935 "cmd.disk.recovered", uscsi_ena, devid, DDI_NOSLEEP, 30936 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30937 "driver-assessment", DATA_TYPE_STRING, assessment, 30938 "op-code", DATA_TYPE_UINT8, op_code, 30939 "cdb", DATA_TYPE_UINT8_ARRAY, 30940 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30941 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 30942 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 30943 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics, 30944 NULL); 30945 return; 30946 } 30947 30948 /* 30949 * If there is un-expected/un-decodable data, we should post 30950 * ereport.io.scsi.cmd.disk.dev.uderr. 30951 * driver-assessment will be set based on parameter drv_assess. 30952 * SSC_FLAGS_INVALID_SENSE - invalid sense data sent back. 30953 * SSC_FLAGS_INVALID_PKT_REASON - invalid pkt-reason encountered. 30954 * SSC_FLAGS_INVALID_STATUS - invalid stat-code encountered. 30955 * SSC_FLAGS_INVALID_DATA - invalid data sent back. 30956 */ 30957 if (ssc->ssc_flags & ssc_invalid_flags) { 30958 if (ssc->ssc_flags & SSC_FLAGS_INVALID_SENSE) { 30959 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 30960 "cmd.disk.dev.uderr", uscsi_ena, devid, DDI_NOSLEEP, 30961 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30962 "driver-assessment", DATA_TYPE_STRING, 30963 drv_assess == SD_FM_DRV_FATAL ? 30964 "fail" : assessment, 30965 "op-code", DATA_TYPE_UINT8, op_code, 30966 "cdb", DATA_TYPE_UINT8_ARRAY, 30967 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30968 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 30969 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 30970 "pkt-stats", DATA_TYPE_UINT32, 30971 uscsi_pkt_statistics, 30972 "stat-code", DATA_TYPE_UINT8, 30973 ssc->ssc_uscsi_cmd->uscsi_status, 30974 "un-decode-info", DATA_TYPE_STRING, 30975 ssc->ssc_info, 30976 "un-decode-value", DATA_TYPE_UINT8_ARRAY, 30977 senlen, sensep, 30978 NULL); 30979 } else { 30980 /* 30981 * For other type of invalid data, the 30982 * un-decode-value field would be empty because the 30983 * un-decodable content could be seen from upper 30984 * level payload or inside un-decode-info. 30985 */ 30986 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 30987 "cmd.disk.dev.uderr", uscsi_ena, devid, DDI_NOSLEEP, 30988 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30989 "driver-assessment", DATA_TYPE_STRING, 30990 drv_assess == SD_FM_DRV_FATAL ? 30991 "fail" : assessment, 30992 "op-code", DATA_TYPE_UINT8, op_code, 30993 "cdb", DATA_TYPE_UINT8_ARRAY, 30994 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30995 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 30996 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 30997 "pkt-stats", DATA_TYPE_UINT32, 30998 uscsi_pkt_statistics, 30999 "stat-code", DATA_TYPE_UINT8, 31000 ssc->ssc_uscsi_cmd->uscsi_status, 31001 "un-decode-info", DATA_TYPE_STRING, 31002 ssc->ssc_info, 31003 "un-decode-value", DATA_TYPE_UINT8_ARRAY, 31004 0, NULL, 31005 NULL); 31006 } 31007 ssc->ssc_flags &= ~ssc_invalid_flags; 31008 return; 31009 } 31010 31011 if (uscsi_pkt_reason != CMD_CMPLT || 31012 (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT)) { 31013 /* 31014 * pkt-reason != CMD_CMPLT or SSC_FLAGS_TRAN_ABORT was 31015 * set inside sd_start_cmds due to errors(bad packet or 31016 * fatal transport error), we should take it as a 31017 * transport error, so we post ereport.io.scsi.cmd.disk.tran. 31018 * driver-assessment will be set based on drv_assess. 31019 * We will set devid to NULL because it is a transport 31020 * error. 31021 */ 31022 if (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT) 31023 ssc->ssc_flags &= ~SSC_FLAGS_TRAN_ABORT; 31024 31025 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 31026 "cmd.disk.tran", uscsi_ena, NULL, DDI_NOSLEEP, FM_VERSION, 31027 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 31028 "driver-assessment", DATA_TYPE_STRING, 31029 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment, 31030 "op-code", DATA_TYPE_UINT8, op_code, 31031 "cdb", DATA_TYPE_UINT8_ARRAY, 31032 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 31033 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 31034 "pkt-state", DATA_TYPE_UINT8, uscsi_pkt_state, 31035 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics, 31036 NULL); 31037 } else { 31038 /* 31039 * If we got here, we have a completed command, and we need 31040 * to further investigate the sense data to see what kind 31041 * of ereport we should post. 31042 * Post ereport.io.scsi.cmd.disk.dev.rqs.merr 31043 * if sense-key == 0x3. 31044 * Post ereport.io.scsi.cmd.disk.dev.rqs.derr otherwise. 31045 * driver-assessment will be set based on the parameter 31046 * drv_assess. 31047 */ 31048 if (senlen > 0) { 31049 /* 31050 * Here we have sense data available. 31051 */ 31052 uint8_t sense_key; 31053 sense_key = scsi_sense_key(sensep); 31054 if (sense_key == 0x3) { 31055 /* 31056 * sense-key == 0x3(medium error), 31057 * driver-assessment should be "fatal" if 31058 * drv_assess is SD_FM_DRV_FATAL. 31059 */ 31060 scsi_fm_ereport_post(un->un_sd, 31061 uscsi_path_instance, 31062 "cmd.disk.dev.rqs.merr", 31063 uscsi_ena, devid, DDI_NOSLEEP, FM_VERSION, 31064 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 31065 "driver-assessment", 31066 DATA_TYPE_STRING, 31067 drv_assess == SD_FM_DRV_FATAL ? 31068 "fatal" : assessment, 31069 "op-code", 31070 DATA_TYPE_UINT8, op_code, 31071 "cdb", 31072 DATA_TYPE_UINT8_ARRAY, cdblen, 31073 ssc->ssc_uscsi_cmd->uscsi_cdb, 31074 "pkt-reason", 31075 DATA_TYPE_UINT8, uscsi_pkt_reason, 31076 "pkt-state", 31077 DATA_TYPE_UINT8, uscsi_pkt_state, 31078 "pkt-stats", 31079 DATA_TYPE_UINT32, 31080 uscsi_pkt_statistics, 31081 "stat-code", 31082 DATA_TYPE_UINT8, 31083 ssc->ssc_uscsi_cmd->uscsi_status, 31084 "key", 31085 DATA_TYPE_UINT8, 31086 scsi_sense_key(sensep), 31087 "asc", 31088 DATA_TYPE_UINT8, 31089 scsi_sense_asc(sensep), 31090 "ascq", 31091 DATA_TYPE_UINT8, 31092 scsi_sense_ascq(sensep), 31093 "sense-data", 31094 DATA_TYPE_UINT8_ARRAY, 31095 senlen, sensep, 31096 "lba", 31097 DATA_TYPE_UINT64, 31098 ssc->ssc_uscsi_info->ui_lba, 31099 NULL); 31100 } else { 31101 /* 31102 * if sense-key == 0x4(hardware 31103 * error), driver-assessment should 31104 * be "fatal" if drv_assess is 31105 * SD_FM_DRV_FATAL. 31106 */ 31107 scsi_fm_ereport_post(un->un_sd, 31108 uscsi_path_instance, 31109 "cmd.disk.dev.rqs.derr", 31110 uscsi_ena, devid, DDI_NOSLEEP, 31111 FM_VERSION, 31112 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 31113 "driver-assessment", 31114 DATA_TYPE_STRING, 31115 drv_assess == SD_FM_DRV_FATAL ? 31116 (sense_key == 0x4 ? 31117 "fatal" : "fail") : assessment, 31118 "op-code", 31119 DATA_TYPE_UINT8, op_code, 31120 "cdb", 31121 DATA_TYPE_UINT8_ARRAY, cdblen, 31122 ssc->ssc_uscsi_cmd->uscsi_cdb, 31123 "pkt-reason", 31124 DATA_TYPE_UINT8, uscsi_pkt_reason, 31125 "pkt-state", 31126 DATA_TYPE_UINT8, uscsi_pkt_state, 31127 "pkt-stats", 31128 DATA_TYPE_UINT32, 31129 uscsi_pkt_statistics, 31130 "stat-code", 31131 DATA_TYPE_UINT8, 31132 ssc->ssc_uscsi_cmd->uscsi_status, 31133 "key", 31134 DATA_TYPE_UINT8, 31135 scsi_sense_key(sensep), 31136 "asc", 31137 DATA_TYPE_UINT8, 31138 scsi_sense_asc(sensep), 31139 "ascq", 31140 DATA_TYPE_UINT8, 31141 scsi_sense_ascq(sensep), 31142 "sense-data", 31143 DATA_TYPE_UINT8_ARRAY, 31144 senlen, sensep, 31145 NULL); 31146 } 31147 } else { 31148 /* 31149 * For stat_code == STATUS_GOOD, this is not a 31150 * hardware error. 31151 */ 31152 if (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD) 31153 return; 31154 31155 /* 31156 * Post ereport.io.scsi.cmd.disk.dev.serr if we got the 31157 * stat-code but with sense data unavailable. 31158 * driver-assessment will be set based on parameter 31159 * drv_assess. 31160 */ 31161 scsi_fm_ereport_post(un->un_sd, 31162 uscsi_path_instance, "cmd.disk.dev.serr", uscsi_ena, 31163 devid, DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 31164 FM_EREPORT_VERS0, 31165 "driver-assessment", DATA_TYPE_STRING, 31166 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment, 31167 "op-code", DATA_TYPE_UINT8, op_code, 31168 "cdb", 31169 DATA_TYPE_UINT8_ARRAY, 31170 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 31171 "pkt-reason", 31172 DATA_TYPE_UINT8, uscsi_pkt_reason, 31173 "pkt-state", 31174 DATA_TYPE_UINT8, uscsi_pkt_state, 31175 "pkt-stats", 31176 DATA_TYPE_UINT32, uscsi_pkt_statistics, 31177 "stat-code", 31178 DATA_TYPE_UINT8, 31179 ssc->ssc_uscsi_cmd->uscsi_status, 31180 NULL); 31181 } 31182 } 31183 } 31184 31185 /* 31186 * Function: sd_ssc_extract_info 31187 * 31188 * Description: Extract information available to help generate ereport. 31189 * 31190 * Context: Kernel thread or interrupt context. 31191 */ 31192 static void 31193 sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un, struct scsi_pkt *pktp, 31194 struct buf *bp, struct sd_xbuf *xp) 31195 { 31196 size_t senlen = 0; 31197 union scsi_cdb *cdbp; 31198 int path_instance; 31199 /* 31200 * Need scsi_cdb_size array to determine the cdb length. 31201 */ 31202 extern uchar_t scsi_cdb_size[]; 31203 31204 ASSERT(un != NULL); 31205 ASSERT(pktp != NULL); 31206 ASSERT(bp != NULL); 31207 ASSERT(xp != NULL); 31208 ASSERT(ssc != NULL); 31209 ASSERT(mutex_owned(SD_MUTEX(un))); 31210 31211 /* 31212 * Transfer the cdb buffer pointer here. 31213 */ 31214 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 31215 31216 ssc->ssc_uscsi_cmd->uscsi_cdblen = scsi_cdb_size[GETGROUP(cdbp)]; 31217 ssc->ssc_uscsi_cmd->uscsi_cdb = (caddr_t)cdbp; 31218 31219 /* 31220 * Transfer the sense data buffer pointer if sense data is available, 31221 * calculate the sense data length first. 31222 */ 31223 if ((xp->xb_sense_state & STATE_XARQ_DONE) || 31224 (xp->xb_sense_state & STATE_ARQ_DONE)) { 31225 /* 31226 * For arq case, we will enter here. 31227 */ 31228 if (xp->xb_sense_state & STATE_XARQ_DONE) { 31229 senlen = MAX_SENSE_LENGTH - xp->xb_sense_resid; 31230 } else { 31231 senlen = SENSE_LENGTH; 31232 } 31233 } else { 31234 /* 31235 * For non-arq case, we will enter this branch. 31236 */ 31237 if (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK && 31238 (xp->xb_sense_state & STATE_XFERRED_DATA)) { 31239 senlen = SENSE_LENGTH - xp->xb_sense_resid; 31240 } 31241 31242 } 31243 31244 ssc->ssc_uscsi_cmd->uscsi_rqlen = (senlen & 0xff); 31245 ssc->ssc_uscsi_cmd->uscsi_rqresid = 0; 31246 ssc->ssc_uscsi_cmd->uscsi_rqbuf = (caddr_t)xp->xb_sense_data; 31247 31248 ssc->ssc_uscsi_cmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 31249 31250 /* 31251 * Only transfer path_instance when scsi_pkt was properly allocated. 31252 */ 31253 path_instance = pktp->pkt_path_instance; 31254 if (scsi_pkt_allocated_correctly(pktp) && path_instance) 31255 ssc->ssc_uscsi_cmd->uscsi_path_instance = path_instance; 31256 else 31257 ssc->ssc_uscsi_cmd->uscsi_path_instance = 0; 31258 31259 /* 31260 * Copy in the other fields we may need when posting ereport. 31261 */ 31262 ssc->ssc_uscsi_info->ui_pkt_reason = pktp->pkt_reason; 31263 ssc->ssc_uscsi_info->ui_pkt_state = pktp->pkt_state; 31264 ssc->ssc_uscsi_info->ui_pkt_statistics = pktp->pkt_statistics; 31265 ssc->ssc_uscsi_info->ui_lba = (uint64_t)SD_GET_BLKNO(bp); 31266 31267 /* 31268 * For partially read/write command, we will not create ena 31269 * in case of a successful command be reconized as recovered. 31270 */ 31271 if ((pktp->pkt_reason == CMD_CMPLT) && 31272 (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD) && 31273 (senlen == 0)) { 31274 return; 31275 } 31276 31277 /* 31278 * To associate ereports of a single command execution flow, we 31279 * need a shared ena for a specific command. 31280 */ 31281 if (xp->xb_ena == 0) 31282 xp->xb_ena = fm_ena_generate(0, FM_ENA_FMT1); 31283 ssc->ssc_uscsi_info->ui_ena = xp->xb_ena; 31284 } 31285