1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved. 24 */ 25 /* 26 * Copyright (c) 2011 Bayard G. Bell. All rights reserved. 27 * Copyright (c) 2012, 2016 by Delphix. All rights reserved. 28 * Copyright 2012 DEY Storage Systems, Inc. All rights reserved. 29 * Copyright 2019 Joyent, Inc. 30 * Copyright 2019 Racktop Systems 31 * Copyright 2022 OmniOS Community Edition (OmniOSce) Association. 32 * Copyright 2022 Tintri by DDN, Inc. All rights reserved. 33 */ 34 /* 35 * Copyright 2011 cyril.galibern@opensvc.com 36 */ 37 38 /* 39 * SCSI disk target driver. 40 */ 41 #include <sys/scsi/scsi.h> 42 #include <sys/dkbad.h> 43 #include <sys/dklabel.h> 44 #include <sys/dkio.h> 45 #include <sys/fdio.h> 46 #include <sys/cdio.h> 47 #include <sys/mhd.h> 48 #include <sys/vtoc.h> 49 #include <sys/dktp/fdisk.h> 50 #include <sys/kstat.h> 51 #include <sys/vtrace.h> 52 #include <sys/note.h> 53 #include <sys/thread.h> 54 #include <sys/proc.h> 55 #include <sys/efi_partition.h> 56 #include <sys/var.h> 57 #include <sys/aio_req.h> 58 #include <sys/dkioc_free_util.h> 59 60 #ifdef __lock_lint 61 #define _LP64 62 #define __amd64 63 #endif 64 65 #if (defined(__fibre)) 66 /* Note: is there a leadville version of the following? */ 67 #include <sys/fc4/fcal_linkapp.h> 68 #endif 69 #include <sys/taskq.h> 70 #include <sys/uuid.h> 71 #include <sys/byteorder.h> 72 #include <sys/sdt.h> 73 74 #include "sd_xbuf.h" 75 76 #include <sys/scsi/targets/sddef.h> 77 #include <sys/cmlb.h> 78 #include <sys/sysevent/eventdefs.h> 79 #include <sys/sysevent/dev.h> 80 81 #include <sys/fm/protocol.h> 82 83 /* 84 * Loadable module info. 85 */ 86 #if (defined(__fibre)) 87 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver" 88 #else /* !__fibre */ 89 #define SD_MODULE_NAME "SCSI Disk Driver" 90 #endif /* !__fibre */ 91 92 /* 93 * Define the interconnect type, to allow the driver to distinguish 94 * between parallel SCSI (sd) and fibre channel (ssd) behaviors. 95 * 96 * This is really for backward compatibility. In the future, the driver 97 * should actually check the "interconnect-type" property as reported by 98 * the HBA; however at present this property is not defined by all HBAs, 99 * so we will use this #define (1) to permit the driver to run in 100 * backward-compatibility mode; and (2) to print a notification message 101 * if an FC HBA does not support the "interconnect-type" property. The 102 * behavior of the driver will be to assume parallel SCSI behaviors unless 103 * the "interconnect-type" property is defined by the HBA **AND** has a 104 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or 105 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre 106 * Channel behaviors (as per the old ssd). (Note that the 107 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and 108 * will result in the driver assuming parallel SCSI behaviors.) 109 * 110 * (see common/sys/scsi/impl/services.h) 111 * 112 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default 113 * since some FC HBAs may already support that, and there is some code in 114 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the 115 * default would confuse that code, and besides things should work fine 116 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the 117 * "interconnect_type" property. 118 * 119 */ 120 #if (defined(__fibre)) 121 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE 122 #else 123 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL 124 #endif 125 126 /* 127 * The name of the driver, established from the module name in _init. 128 */ 129 static char *sd_label = NULL; 130 131 /* 132 * Driver name is unfortunately prefixed on some driver.conf properties. 133 */ 134 #if (defined(__fibre)) 135 #define sd_max_xfer_size ssd_max_xfer_size 136 #define sd_config_list ssd_config_list 137 static char *sd_max_xfer_size = "ssd_max_xfer_size"; 138 static char *sd_config_list = "ssd-config-list"; 139 #else 140 static char *sd_max_xfer_size = "sd_max_xfer_size"; 141 static char *sd_config_list = "sd-config-list"; 142 #endif 143 144 /* 145 * Driver global variables 146 */ 147 148 #if (defined(__fibre)) 149 /* 150 * These #defines are to avoid namespace collisions that occur because this 151 * code is currently used to compile two separate driver modules: sd and ssd. 152 * All global variables need to be treated this way (even if declared static) 153 * in order to allow the debugger to resolve the names properly. 154 * It is anticipated that in the near future the ssd module will be obsoleted, 155 * at which time this namespace issue should go away. 156 */ 157 #define sd_state ssd_state 158 #define sd_io_time ssd_io_time 159 #define sd_failfast_enable ssd_failfast_enable 160 #define sd_ua_retry_count ssd_ua_retry_count 161 #define sd_report_pfa ssd_report_pfa 162 #define sd_max_throttle ssd_max_throttle 163 #define sd_min_throttle ssd_min_throttle 164 #define sd_rot_delay ssd_rot_delay 165 166 #define sd_retry_on_reservation_conflict \ 167 ssd_retry_on_reservation_conflict 168 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay 169 #define sd_resv_conflict_name ssd_resv_conflict_name 170 171 #define sd_component_mask ssd_component_mask 172 #define sd_level_mask ssd_level_mask 173 #define sd_debug_un ssd_debug_un 174 #define sd_error_level ssd_error_level 175 176 #define sd_xbuf_active_limit ssd_xbuf_active_limit 177 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit 178 179 #define sd_tr ssd_tr 180 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout 181 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout 182 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable 183 #define sd_check_media_time ssd_check_media_time 184 #define sd_wait_cmds_complete ssd_wait_cmds_complete 185 #define sd_log_buf ssd_log_buf 186 #define sd_log_mutex ssd_log_mutex 187 188 #define sd_disk_table ssd_disk_table 189 #define sd_disk_table_size ssd_disk_table_size 190 #define sd_sense_mutex ssd_sense_mutex 191 #define sd_cdbtab ssd_cdbtab 192 193 #define sd_cb_ops ssd_cb_ops 194 #define sd_ops ssd_ops 195 #define sd_additional_codes ssd_additional_codes 196 #define sd_tgops ssd_tgops 197 198 #define sd_minor_data ssd_minor_data 199 #define sd_minor_data_efi ssd_minor_data_efi 200 201 #define sd_tq ssd_tq 202 #define sd_wmr_tq ssd_wmr_tq 203 #define sd_taskq_name ssd_taskq_name 204 #define sd_wmr_taskq_name ssd_wmr_taskq_name 205 #define sd_taskq_minalloc ssd_taskq_minalloc 206 #define sd_taskq_maxalloc ssd_taskq_maxalloc 207 208 #define sd_dump_format_string ssd_dump_format_string 209 210 #define sd_iostart_chain ssd_iostart_chain 211 #define sd_iodone_chain ssd_iodone_chain 212 213 #define sd_pm_idletime ssd_pm_idletime 214 215 #define sd_force_pm_supported ssd_force_pm_supported 216 217 #define sd_dtype_optical_bind ssd_dtype_optical_bind 218 219 #define sd_ssc_init ssd_ssc_init 220 #define sd_ssc_send ssd_ssc_send 221 #define sd_ssc_fini ssd_ssc_fini 222 #define sd_ssc_assessment ssd_ssc_assessment 223 #define sd_ssc_post ssd_ssc_post 224 #define sd_ssc_print ssd_ssc_print 225 #define sd_ssc_ereport_post ssd_ssc_ereport_post 226 #define sd_ssc_set_info ssd_ssc_set_info 227 #define sd_ssc_extract_info ssd_ssc_extract_info 228 229 #endif 230 231 #ifdef SDDEBUG 232 int sd_force_pm_supported = 0; 233 #endif /* SDDEBUG */ 234 235 void *sd_state = NULL; 236 int sd_io_time = SD_IO_TIME; 237 int sd_failfast_enable = 1; 238 int sd_ua_retry_count = SD_UA_RETRY_COUNT; 239 int sd_report_pfa = 1; 240 int sd_max_throttle = SD_MAX_THROTTLE; 241 int sd_min_throttle = SD_MIN_THROTTLE; 242 int sd_rot_delay = 4; /* Default 4ms Rotation delay */ 243 int sd_qfull_throttle_enable = TRUE; 244 245 int sd_retry_on_reservation_conflict = 1; 246 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 247 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay)) 248 249 static int sd_dtype_optical_bind = -1; 250 251 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */ 252 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict"; 253 254 /* 255 * Global data for debug logging. To enable debug printing, sd_component_mask 256 * and sd_level_mask should be set to the desired bit patterns as outlined in 257 * sddef.h. 258 */ 259 uint_t sd_component_mask = 0x0; 260 uint_t sd_level_mask = 0x0; 261 struct sd_lun *sd_debug_un = NULL; 262 uint_t sd_error_level = SCSI_ERR_RETRYABLE; 263 264 /* Note: these may go away in the future... */ 265 static uint32_t sd_xbuf_active_limit = 512; 266 static uint32_t sd_xbuf_reserve_limit = 16; 267 268 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 }; 269 270 /* 271 * Timer value used to reset the throttle after it has been reduced 272 * (typically in response to TRAN_BUSY or STATUS_QFULL) 273 */ 274 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT; 275 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT; 276 277 /* 278 * Interval value associated with the media change scsi watch. 279 */ 280 static int sd_check_media_time = 3000000; 281 282 /* 283 * Wait value used for in progress operations during a DDI_SUSPEND 284 */ 285 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE; 286 287 /* 288 * Global buffer and mutex for debug logging 289 */ 290 static char sd_log_buf[1024]; 291 static kmutex_t sd_log_mutex; 292 293 /* 294 * Structs and globals for recording attached lun information. 295 * This maintains a chain. Each node in the chain represents a SCSI controller. 296 * The structure records the number of luns attached to each target connected 297 * with the controller. 298 * For parallel scsi device only. 299 */ 300 struct sd_scsi_hba_tgt_lun { 301 struct sd_scsi_hba_tgt_lun *next; 302 dev_info_t *pdip; 303 int nlun[NTARGETS_WIDE]; 304 }; 305 306 /* 307 * Flag to indicate the lun is attached or detached 308 */ 309 #define SD_SCSI_LUN_ATTACH 0 310 #define SD_SCSI_LUN_DETACH 1 311 312 static kmutex_t sd_scsi_target_lun_mutex; 313 static struct sd_scsi_hba_tgt_lun *sd_scsi_target_lun_head = NULL; 314 315 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 316 sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip)) 317 318 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 319 sd_scsi_target_lun_head)) 320 321 /* 322 * "Smart" Probe Caching structs, globals, #defines, etc. 323 * For parallel scsi and non-self-identify device only. 324 */ 325 326 /* 327 * The following resources and routines are implemented to support 328 * "smart" probing, which caches the scsi_probe() results in an array, 329 * in order to help avoid long probe times. 330 */ 331 struct sd_scsi_probe_cache { 332 struct sd_scsi_probe_cache *next; 333 dev_info_t *pdip; 334 int cache[NTARGETS_WIDE]; 335 }; 336 337 static kmutex_t sd_scsi_probe_cache_mutex; 338 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; 339 340 /* 341 * Really we only need protection on the head of the linked list, but 342 * better safe than sorry. 343 */ 344 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 345 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip)) 346 347 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 348 sd_scsi_probe_cache_head)) 349 350 /* 351 * Power attribute table 352 */ 353 static sd_power_attr_ss sd_pwr_ss = { 354 { "NAME=spindle-motor", "0=off", "1=on", NULL }, 355 {0, 100}, 356 {30, 0}, 357 {20000, 0} 358 }; 359 360 static sd_power_attr_pc sd_pwr_pc = { 361 { "NAME=spindle-motor", "0=stopped", "1=standby", "2=idle", 362 "3=active", NULL }, 363 {0, 0, 0, 100}, 364 {90, 90, 20, 0}, 365 {15000, 15000, 1000, 0} 366 }; 367 368 /* 369 * Power level to power condition 370 */ 371 static int sd_pl2pc[] = { 372 SD_TARGET_START_VALID, 373 SD_TARGET_STANDBY, 374 SD_TARGET_IDLE, 375 SD_TARGET_ACTIVE 376 }; 377 378 /* 379 * Vendor specific data name property declarations 380 */ 381 382 #if defined(__fibre) || defined(__x86) 383 384 static sd_tunables seagate_properties = { 385 SEAGATE_THROTTLE_VALUE, 386 0, 387 0, 388 0, 389 0, 390 0, 391 0, 392 0, 393 0 394 }; 395 396 397 static sd_tunables fujitsu_properties = { 398 FUJITSU_THROTTLE_VALUE, 399 0, 400 0, 401 0, 402 0, 403 0, 404 0, 405 0, 406 0 407 }; 408 409 static sd_tunables ibm_properties = { 410 IBM_THROTTLE_VALUE, 411 0, 412 0, 413 0, 414 0, 415 0, 416 0, 417 0, 418 0 419 }; 420 421 static sd_tunables sve_properties = { 422 SVE_THROTTLE_VALUE, 423 0, 424 0, 425 SVE_BUSY_RETRIES, 426 SVE_RESET_RETRY_COUNT, 427 SVE_RESERVE_RELEASE_TIME, 428 SVE_MIN_THROTTLE_VALUE, 429 SVE_DISKSORT_DISABLED_FLAG, 430 0 431 }; 432 433 static sd_tunables maserati_properties = { 434 0, 435 0, 436 0, 437 0, 438 0, 439 0, 440 0, 441 MASERATI_DISKSORT_DISABLED_FLAG, 442 MASERATI_LUN_RESET_ENABLED_FLAG 443 }; 444 445 static sd_tunables pirus_properties = { 446 PIRUS_THROTTLE_VALUE, 447 0, 448 PIRUS_NRR_COUNT, 449 PIRUS_BUSY_RETRIES, 450 PIRUS_RESET_RETRY_COUNT, 451 0, 452 PIRUS_MIN_THROTTLE_VALUE, 453 PIRUS_DISKSORT_DISABLED_FLAG, 454 PIRUS_LUN_RESET_ENABLED_FLAG 455 }; 456 457 #endif 458 459 #if (defined(__sparc) && !defined(__fibre)) || \ 460 (defined(__x86)) 461 462 463 static sd_tunables elite_properties = { 464 ELITE_THROTTLE_VALUE, 465 0, 466 0, 467 0, 468 0, 469 0, 470 0, 471 0, 472 0 473 }; 474 475 static sd_tunables st31200n_properties = { 476 ST31200N_THROTTLE_VALUE, 477 0, 478 0, 479 0, 480 0, 481 0, 482 0, 483 0, 484 0 485 }; 486 487 #endif /* Fibre or not */ 488 489 static sd_tunables lsi_properties_scsi = { 490 LSI_THROTTLE_VALUE, 491 0, 492 LSI_NOTREADY_RETRIES, 493 0, 494 0, 495 0, 496 0, 497 0, 498 0 499 }; 500 501 static sd_tunables symbios_properties = { 502 SYMBIOS_THROTTLE_VALUE, 503 0, 504 SYMBIOS_NOTREADY_RETRIES, 505 0, 506 0, 507 0, 508 0, 509 0, 510 0 511 }; 512 513 static sd_tunables lsi_properties = { 514 0, 515 0, 516 LSI_NOTREADY_RETRIES, 517 0, 518 0, 519 0, 520 0, 521 0, 522 0 523 }; 524 525 static sd_tunables lsi_oem_properties = { 526 0, 527 0, 528 LSI_OEM_NOTREADY_RETRIES, 529 0, 530 0, 531 0, 532 0, 533 0, 534 0, 535 1 536 }; 537 538 539 540 #if (defined(SD_PROP_TST)) 541 542 #define SD_TST_CTYPE_VAL CTYPE_CDROM 543 #define SD_TST_THROTTLE_VAL 16 544 #define SD_TST_NOTREADY_VAL 12 545 #define SD_TST_BUSY_VAL 60 546 #define SD_TST_RST_RETRY_VAL 36 547 #define SD_TST_RSV_REL_TIME 60 548 549 static sd_tunables tst_properties = { 550 SD_TST_THROTTLE_VAL, 551 SD_TST_CTYPE_VAL, 552 SD_TST_NOTREADY_VAL, 553 SD_TST_BUSY_VAL, 554 SD_TST_RST_RETRY_VAL, 555 SD_TST_RSV_REL_TIME, 556 0, 557 0, 558 0 559 }; 560 #endif 561 562 /* This is similar to the ANSI toupper implementation */ 563 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C)) 564 565 /* 566 * Static Driver Configuration Table 567 * 568 * This is the table of disks which need throttle adjustment (or, perhaps 569 * something else as defined by the flags at a future time.) device_id 570 * is a string consisting of concatenated vid (vendor), pid (product/model) 571 * and revision strings as defined in the scsi_inquiry structure. Offsets of 572 * the parts of the string are as defined by the sizes in the scsi_inquiry 573 * structure. Device type is searched as far as the device_id string is 574 * defined. Flags defines which values are to be set in the driver from the 575 * properties list. 576 * 577 * Entries below which begin and end with a "*" are a special case. 578 * These do not have a specific vendor, and the string which follows 579 * can appear anywhere in the 16 byte PID portion of the inquiry data. 580 * 581 * Entries below which begin and end with a " " (blank) are a special 582 * case. The comparison function will treat multiple consecutive blanks 583 * as equivalent to a single blank. For example, this causes a 584 * sd_disk_table entry of " NEC CDROM " to match a device's id string 585 * of "NEC CDROM". 586 * 587 * Note: The MD21 controller type has been obsoleted. 588 * ST318202F is a Legacy device 589 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been 590 * made with an FC connection. The entries here are a legacy. 591 */ 592 static sd_disk_config_t sd_disk_table[] = { 593 #if defined(__fibre) || defined(__x86) 594 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 595 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 596 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 597 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 598 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 599 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 600 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 601 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 602 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 603 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 604 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 605 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 606 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 607 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 608 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 609 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 610 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 611 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 612 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 613 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 614 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 615 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 616 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 617 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties }, 618 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties }, 619 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties }, 620 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties }, 621 { "IBM 1724-100", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 622 { "IBM 1726-2xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 623 { "IBM 1726-22x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 624 { "IBM 1726-4xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 625 { "IBM 1726-42x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 626 { "IBM 1726-3xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 627 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 628 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 629 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 630 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 631 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 632 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 633 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 634 { "IBM 1814", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 635 { "IBM 1814-200", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 636 { "IBM 1818", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 637 { "DELL MD3000", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 638 { "DELL MD3000i", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 639 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 640 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 641 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 642 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 643 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT | 644 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 645 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT | 646 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 647 { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties }, 648 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, 649 { "SUN SESS01", SD_CONF_BSET_THROTTLE | 650 SD_CONF_BSET_BSY_RETRY_COUNT| 651 SD_CONF_BSET_RST_RETRIES| 652 SD_CONF_BSET_RSV_REL_TIME| 653 SD_CONF_BSET_MIN_THROTTLE| 654 SD_CONF_BSET_DISKSORT_DISABLED, 655 &sve_properties }, 656 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED | 657 SD_CONF_BSET_LUN_RESET_ENABLED, 658 &maserati_properties }, 659 { "SUN SE6920", SD_CONF_BSET_THROTTLE | 660 SD_CONF_BSET_NRR_COUNT| 661 SD_CONF_BSET_BSY_RETRY_COUNT| 662 SD_CONF_BSET_RST_RETRIES| 663 SD_CONF_BSET_MIN_THROTTLE| 664 SD_CONF_BSET_DISKSORT_DISABLED| 665 SD_CONF_BSET_LUN_RESET_ENABLED, 666 &pirus_properties }, 667 { "SUN SE6940", SD_CONF_BSET_THROTTLE | 668 SD_CONF_BSET_NRR_COUNT| 669 SD_CONF_BSET_BSY_RETRY_COUNT| 670 SD_CONF_BSET_RST_RETRIES| 671 SD_CONF_BSET_MIN_THROTTLE| 672 SD_CONF_BSET_DISKSORT_DISABLED| 673 SD_CONF_BSET_LUN_RESET_ENABLED, 674 &pirus_properties }, 675 { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE | 676 SD_CONF_BSET_NRR_COUNT| 677 SD_CONF_BSET_BSY_RETRY_COUNT| 678 SD_CONF_BSET_RST_RETRIES| 679 SD_CONF_BSET_MIN_THROTTLE| 680 SD_CONF_BSET_DISKSORT_DISABLED| 681 SD_CONF_BSET_LUN_RESET_ENABLED, 682 &pirus_properties }, 683 { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE | 684 SD_CONF_BSET_NRR_COUNT| 685 SD_CONF_BSET_BSY_RETRY_COUNT| 686 SD_CONF_BSET_RST_RETRIES| 687 SD_CONF_BSET_MIN_THROTTLE| 688 SD_CONF_BSET_DISKSORT_DISABLED| 689 SD_CONF_BSET_LUN_RESET_ENABLED, 690 &pirus_properties }, 691 { "SUN PSX1000", SD_CONF_BSET_THROTTLE | 692 SD_CONF_BSET_NRR_COUNT| 693 SD_CONF_BSET_BSY_RETRY_COUNT| 694 SD_CONF_BSET_RST_RETRIES| 695 SD_CONF_BSET_MIN_THROTTLE| 696 SD_CONF_BSET_DISKSORT_DISABLED| 697 SD_CONF_BSET_LUN_RESET_ENABLED, 698 &pirus_properties }, 699 { "SUN SE6330", SD_CONF_BSET_THROTTLE | 700 SD_CONF_BSET_NRR_COUNT| 701 SD_CONF_BSET_BSY_RETRY_COUNT| 702 SD_CONF_BSET_RST_RETRIES| 703 SD_CONF_BSET_MIN_THROTTLE| 704 SD_CONF_BSET_DISKSORT_DISABLED| 705 SD_CONF_BSET_LUN_RESET_ENABLED, 706 &pirus_properties }, 707 { "SUN STK6580_6780", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 708 { "SUN SUN_6180", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 709 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 710 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 711 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 712 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 713 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties }, 714 #endif /* fibre or NON-sparc platforms */ 715 #if ((defined(__sparc) && !defined(__fibre)) ||\ 716 (defined(__x86))) 717 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties }, 718 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties }, 719 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL }, 720 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL }, 721 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL }, 722 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL }, 723 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, 724 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, 725 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, 726 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, 727 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, 728 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL }, 729 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT, 730 &symbios_properties }, 731 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT, 732 &lsi_properties_scsi }, 733 #if defined(__x86) 734 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD 735 | SD_CONF_BSET_READSUB_BCD 736 | SD_CONF_BSET_READ_TOC_ADDR_BCD 737 | SD_CONF_BSET_NO_READ_HEADER 738 | SD_CONF_BSET_READ_CD_XD4), NULL }, 739 740 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD 741 | SD_CONF_BSET_READSUB_BCD 742 | SD_CONF_BSET_READ_TOC_ADDR_BCD 743 | SD_CONF_BSET_NO_READ_HEADER 744 | SD_CONF_BSET_READ_CD_XD4), NULL }, 745 #endif /* __x86 */ 746 #endif /* sparc NON-fibre or NON-sparc platforms */ 747 748 #if (defined(SD_PROP_TST)) 749 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE 750 | SD_CONF_BSET_CTYPE 751 | SD_CONF_BSET_NRR_COUNT 752 | SD_CONF_BSET_FAB_DEVID 753 | SD_CONF_BSET_NOCACHE 754 | SD_CONF_BSET_BSY_RETRY_COUNT 755 | SD_CONF_BSET_PLAYMSF_BCD 756 | SD_CONF_BSET_READSUB_BCD 757 | SD_CONF_BSET_READ_TOC_TRK_BCD 758 | SD_CONF_BSET_READ_TOC_ADDR_BCD 759 | SD_CONF_BSET_NO_READ_HEADER 760 | SD_CONF_BSET_READ_CD_XD4 761 | SD_CONF_BSET_RST_RETRIES 762 | SD_CONF_BSET_RSV_REL_TIME 763 | SD_CONF_BSET_TUR_CHECK), &tst_properties}, 764 #endif 765 }; 766 767 static const int sd_disk_table_size = 768 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t); 769 770 /* 771 * Emulation mode disk drive VID/PID table 772 */ 773 static char sd_flash_dev_table[][25] = { 774 "ATA MARVELL SD88SA02", 775 "MARVELL SD88SA02", 776 "TOSHIBA THNSNV05", 777 }; 778 779 static const int sd_flash_dev_table_size = 780 sizeof (sd_flash_dev_table) / sizeof (sd_flash_dev_table[0]); 781 782 #define SD_INTERCONNECT_PARALLEL 0 783 #define SD_INTERCONNECT_FABRIC 1 784 #define SD_INTERCONNECT_FIBRE 2 785 #define SD_INTERCONNECT_SSA 3 786 #define SD_INTERCONNECT_SATA 4 787 #define SD_INTERCONNECT_SAS 5 788 789 #define SD_IS_PARALLEL_SCSI(un) \ 790 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL) 791 #define SD_IS_SERIAL(un) \ 792 (((un)->un_interconnect_type == SD_INTERCONNECT_SATA) ||\ 793 ((un)->un_interconnect_type == SD_INTERCONNECT_SAS)) 794 795 /* 796 * Definitions used by device id registration routines 797 */ 798 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */ 799 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */ 800 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */ 801 802 static kmutex_t sd_sense_mutex = {0}; 803 804 /* 805 * Macros for updates of the driver state 806 */ 807 #define New_state(un, s) \ 808 (un)->un_last_state = (un)->un_state, (un)->un_state = (s) 809 #define Restore_state(un) \ 810 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); } 811 812 static struct sd_cdbinfo sd_cdbtab[] = { 813 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, }, 814 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, }, 815 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, }, 816 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, }, 817 }; 818 819 /* 820 * Specifies the number of seconds that must have elapsed since the last 821 * cmd. has completed for a device to be declared idle to the PM framework. 822 */ 823 static int sd_pm_idletime = 1; 824 825 /* 826 * Internal function prototypes 827 */ 828 829 #if (defined(__fibre)) 830 /* 831 * These #defines are to avoid namespace collisions that occur because this 832 * code is currently used to compile two separate driver modules: sd and ssd. 833 * All function names need to be treated this way (even if declared static) 834 * in order to allow the debugger to resolve the names properly. 835 * It is anticipated that in the near future the ssd module will be obsoleted, 836 * at which time this ugliness should go away. 837 */ 838 #define sd_log_trace ssd_log_trace 839 #define sd_log_info ssd_log_info 840 #define sd_log_err ssd_log_err 841 #define sdprobe ssdprobe 842 #define sdinfo ssdinfo 843 #define sd_prop_op ssd_prop_op 844 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init 845 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini 846 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache 847 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache 848 #define sd_scsi_target_lun_init ssd_scsi_target_lun_init 849 #define sd_scsi_target_lun_fini ssd_scsi_target_lun_fini 850 #define sd_scsi_get_target_lun_count ssd_scsi_get_target_lun_count 851 #define sd_scsi_update_lun_on_target ssd_scsi_update_lun_on_target 852 #define sd_spin_up_unit ssd_spin_up_unit 853 #define sd_enable_descr_sense ssd_enable_descr_sense 854 #define sd_reenable_dsense_task ssd_reenable_dsense_task 855 #define sd_set_mmc_caps ssd_set_mmc_caps 856 #define sd_read_unit_properties ssd_read_unit_properties 857 #define sd_process_sdconf_file ssd_process_sdconf_file 858 #define sd_process_sdconf_table ssd_process_sdconf_table 859 #define sd_sdconf_id_match ssd_sdconf_id_match 860 #define sd_blank_cmp ssd_blank_cmp 861 #define sd_chk_vers1_data ssd_chk_vers1_data 862 #define sd_set_vers1_properties ssd_set_vers1_properties 863 #define sd_check_bdc_vpd ssd_check_bdc_vpd 864 #define sd_check_emulation_mode ssd_check_emulation_mode 865 866 #define sd_get_physical_geometry ssd_get_physical_geometry 867 #define sd_get_virtual_geometry ssd_get_virtual_geometry 868 #define sd_update_block_info ssd_update_block_info 869 #define sd_register_devid ssd_register_devid 870 #define sd_get_devid ssd_get_devid 871 #define sd_create_devid ssd_create_devid 872 #define sd_write_deviceid ssd_write_deviceid 873 #define sd_check_vpd_page_support ssd_check_vpd_page_support 874 #define sd_setup_pm ssd_setup_pm 875 #define sd_create_pm_components ssd_create_pm_components 876 #define sd_ddi_suspend ssd_ddi_suspend 877 #define sd_ddi_resume ssd_ddi_resume 878 #define sd_pm_state_change ssd_pm_state_change 879 #define sdpower ssdpower 880 #define sdattach ssdattach 881 #define sddetach ssddetach 882 #define sd_unit_attach ssd_unit_attach 883 #define sd_unit_detach ssd_unit_detach 884 #define sd_set_unit_attributes ssd_set_unit_attributes 885 #define sd_create_errstats ssd_create_errstats 886 #define sd_set_errstats ssd_set_errstats 887 #define sd_set_pstats ssd_set_pstats 888 #define sddump ssddump 889 #define sd_scsi_poll ssd_scsi_poll 890 #define sd_send_polled_RQS ssd_send_polled_RQS 891 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll 892 #define sd_init_event_callbacks ssd_init_event_callbacks 893 #define sd_event_callback ssd_event_callback 894 #define sd_cache_control ssd_cache_control 895 #define sd_get_write_cache_enabled ssd_get_write_cache_enabled 896 #define sd_get_write_cache_changeable ssd_get_write_cache_changeable 897 #define sd_get_nv_sup ssd_get_nv_sup 898 #define sd_make_device ssd_make_device 899 #define sdopen ssdopen 900 #define sdclose ssdclose 901 #define sd_ready_and_valid ssd_ready_and_valid 902 #define sdmin ssdmin 903 #define sdread ssdread 904 #define sdwrite ssdwrite 905 #define sdaread ssdaread 906 #define sdawrite ssdawrite 907 #define sdstrategy ssdstrategy 908 #define sdioctl ssdioctl 909 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart 910 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart 911 #define sd_checksum_iostart ssd_checksum_iostart 912 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart 913 #define sd_pm_iostart ssd_pm_iostart 914 #define sd_core_iostart ssd_core_iostart 915 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone 916 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone 917 #define sd_checksum_iodone ssd_checksum_iodone 918 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone 919 #define sd_pm_iodone ssd_pm_iodone 920 #define sd_initpkt_for_buf ssd_initpkt_for_buf 921 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf 922 #define sd_setup_rw_pkt ssd_setup_rw_pkt 923 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt 924 #define sd_buf_iodone ssd_buf_iodone 925 #define sd_uscsi_strategy ssd_uscsi_strategy 926 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi 927 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi 928 #define sd_uscsi_iodone ssd_uscsi_iodone 929 #define sd_xbuf_strategy ssd_xbuf_strategy 930 #define sd_xbuf_init ssd_xbuf_init 931 #define sd_pm_entry ssd_pm_entry 932 #define sd_pm_exit ssd_pm_exit 933 934 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler 935 #define sd_pm_timeout_handler ssd_pm_timeout_handler 936 937 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq 938 #define sdintr ssdintr 939 #define sd_start_cmds ssd_start_cmds 940 #define sd_send_scsi_cmd ssd_send_scsi_cmd 941 #define sd_bioclone_alloc ssd_bioclone_alloc 942 #define sd_bioclone_free ssd_bioclone_free 943 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc 944 #define sd_shadow_buf_free ssd_shadow_buf_free 945 #define sd_print_transport_rejected_message \ 946 ssd_print_transport_rejected_message 947 #define sd_retry_command ssd_retry_command 948 #define sd_set_retry_bp ssd_set_retry_bp 949 #define sd_send_request_sense_command ssd_send_request_sense_command 950 #define sd_start_retry_command ssd_start_retry_command 951 #define sd_start_direct_priority_command \ 952 ssd_start_direct_priority_command 953 #define sd_return_failed_command ssd_return_failed_command 954 #define sd_return_failed_command_no_restart \ 955 ssd_return_failed_command_no_restart 956 #define sd_return_command ssd_return_command 957 #define sd_sync_with_callback ssd_sync_with_callback 958 #define sdrunout ssdrunout 959 #define sd_mark_rqs_busy ssd_mark_rqs_busy 960 #define sd_mark_rqs_idle ssd_mark_rqs_idle 961 #define sd_reduce_throttle ssd_reduce_throttle 962 #define sd_restore_throttle ssd_restore_throttle 963 #define sd_print_incomplete_msg ssd_print_incomplete_msg 964 #define sd_init_cdb_limits ssd_init_cdb_limits 965 #define sd_pkt_status_good ssd_pkt_status_good 966 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition 967 #define sd_pkt_status_busy ssd_pkt_status_busy 968 #define sd_pkt_status_reservation_conflict \ 969 ssd_pkt_status_reservation_conflict 970 #define sd_pkt_status_qfull ssd_pkt_status_qfull 971 #define sd_handle_request_sense ssd_handle_request_sense 972 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense 973 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg 974 #define sd_validate_sense_data ssd_validate_sense_data 975 #define sd_decode_sense ssd_decode_sense 976 #define sd_print_sense_msg ssd_print_sense_msg 977 #define sd_sense_key_no_sense ssd_sense_key_no_sense 978 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error 979 #define sd_sense_key_not_ready ssd_sense_key_not_ready 980 #define sd_sense_key_medium_or_hardware_error \ 981 ssd_sense_key_medium_or_hardware_error 982 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request 983 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention 984 #define sd_sense_key_fail_command ssd_sense_key_fail_command 985 #define sd_sense_key_blank_check ssd_sense_key_blank_check 986 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command 987 #define sd_sense_key_default ssd_sense_key_default 988 #define sd_print_retry_msg ssd_print_retry_msg 989 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg 990 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete 991 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err 992 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset 993 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted 994 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout 995 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free 996 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject 997 #define sd_pkt_reason_default ssd_pkt_reason_default 998 #define sd_reset_target ssd_reset_target 999 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback 1000 #define sd_start_stop_unit_task ssd_start_stop_unit_task 1001 #define sd_taskq_create ssd_taskq_create 1002 #define sd_taskq_delete ssd_taskq_delete 1003 #define sd_target_change_task ssd_target_change_task 1004 #define sd_log_dev_status_event ssd_log_dev_status_event 1005 #define sd_log_lun_expansion_event ssd_log_lun_expansion_event 1006 #define sd_log_eject_request_event ssd_log_eject_request_event 1007 #define sd_media_change_task ssd_media_change_task 1008 #define sd_handle_mchange ssd_handle_mchange 1009 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK 1010 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY 1011 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16 1012 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION 1013 #define sd_send_scsi_feature_GET_CONFIGURATION \ 1014 sd_send_scsi_feature_GET_CONFIGURATION 1015 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT 1016 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY 1017 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY 1018 #define sd_send_scsi_PERSISTENT_RESERVE_IN \ 1019 ssd_send_scsi_PERSISTENT_RESERVE_IN 1020 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \ 1021 ssd_send_scsi_PERSISTENT_RESERVE_OUT 1022 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE 1023 #define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \ 1024 ssd_send_scsi_SYNCHRONIZE_CACHE_biodone 1025 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE 1026 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT 1027 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR 1028 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE 1029 #define sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION \ 1030 ssd_send_scsi_GET_EVENT_STATUS_NOTIFICATION 1031 #define sd_gesn_media_data_valid ssd_gesn_media_data_valid 1032 #define sd_alloc_rqs ssd_alloc_rqs 1033 #define sd_free_rqs ssd_free_rqs 1034 #define sd_dump_memory ssd_dump_memory 1035 #define sd_get_media_info_com ssd_get_media_info_com 1036 #define sd_get_media_info ssd_get_media_info 1037 #define sd_get_media_info_ext ssd_get_media_info_ext 1038 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info 1039 #define sd_nvpair_str_decode ssd_nvpair_str_decode 1040 #define sd_set_properties ssd_set_properties 1041 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf 1042 #define sd_setup_next_xfer ssd_setup_next_xfer 1043 #define sd_dkio_get_temp ssd_dkio_get_temp 1044 #define sd_check_mhd ssd_check_mhd 1045 #define sd_mhd_watch_cb ssd_mhd_watch_cb 1046 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete 1047 #define sd_sname ssd_sname 1048 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover 1049 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread 1050 #define sd_take_ownership ssd_take_ownership 1051 #define sd_reserve_release ssd_reserve_release 1052 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req 1053 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb 1054 #define sd_persistent_reservation_in_read_keys \ 1055 ssd_persistent_reservation_in_read_keys 1056 #define sd_persistent_reservation_in_read_resv \ 1057 ssd_persistent_reservation_in_read_resv 1058 #define sd_mhdioc_takeown ssd_mhdioc_takeown 1059 #define sd_mhdioc_failfast ssd_mhdioc_failfast 1060 #define sd_mhdioc_release ssd_mhdioc_release 1061 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid 1062 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys 1063 #define sd_mhdioc_inresv ssd_mhdioc_inresv 1064 #define sr_change_blkmode ssr_change_blkmode 1065 #define sr_change_speed ssr_change_speed 1066 #define sr_atapi_change_speed ssr_atapi_change_speed 1067 #define sr_pause_resume ssr_pause_resume 1068 #define sr_play_msf ssr_play_msf 1069 #define sr_play_trkind ssr_play_trkind 1070 #define sr_read_all_subcodes ssr_read_all_subcodes 1071 #define sr_read_subchannel ssr_read_subchannel 1072 #define sr_read_tocentry ssr_read_tocentry 1073 #define sr_read_tochdr ssr_read_tochdr 1074 #define sr_read_cdda ssr_read_cdda 1075 #define sr_read_cdxa ssr_read_cdxa 1076 #define sr_read_mode1 ssr_read_mode1 1077 #define sr_read_mode2 ssr_read_mode2 1078 #define sr_read_cd_mode2 ssr_read_cd_mode2 1079 #define sr_sector_mode ssr_sector_mode 1080 #define sr_eject ssr_eject 1081 #define sr_ejected ssr_ejected 1082 #define sr_check_wp ssr_check_wp 1083 #define sd_watch_request_submit ssd_watch_request_submit 1084 #define sd_check_media ssd_check_media 1085 #define sd_media_watch_cb ssd_media_watch_cb 1086 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast 1087 #define sr_volume_ctrl ssr_volume_ctrl 1088 #define sr_read_sony_session_offset ssr_read_sony_session_offset 1089 #define sd_log_page_supported ssd_log_page_supported 1090 #define sd_check_for_writable_cd ssd_check_for_writable_cd 1091 #define sd_wm_cache_constructor ssd_wm_cache_constructor 1092 #define sd_wm_cache_destructor ssd_wm_cache_destructor 1093 #define sd_range_lock ssd_range_lock 1094 #define sd_get_range ssd_get_range 1095 #define sd_free_inlist_wmap ssd_free_inlist_wmap 1096 #define sd_range_unlock ssd_range_unlock 1097 #define sd_read_modify_write_task ssd_read_modify_write_task 1098 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw 1099 1100 #define sd_iostart_chain ssd_iostart_chain 1101 #define sd_iodone_chain ssd_iodone_chain 1102 #define sd_initpkt_map ssd_initpkt_map 1103 #define sd_destroypkt_map ssd_destroypkt_map 1104 #define sd_chain_type_map ssd_chain_type_map 1105 #define sd_chain_index_map ssd_chain_index_map 1106 1107 #define sd_failfast_flushctl ssd_failfast_flushctl 1108 #define sd_failfast_flushq ssd_failfast_flushq 1109 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback 1110 1111 #define sd_is_lsi ssd_is_lsi 1112 #define sd_tg_rdwr ssd_tg_rdwr 1113 #define sd_tg_getinfo ssd_tg_getinfo 1114 #define sd_rmw_msg_print_handler ssd_rmw_msg_print_handler 1115 1116 #endif /* #if (defined(__fibre)) */ 1117 1118 typedef struct unmap_param_hdr_s { 1119 uint16_t uph_data_len; 1120 uint16_t uph_descr_data_len; 1121 uint32_t uph_reserved; 1122 } unmap_param_hdr_t; 1123 1124 typedef struct unmap_blk_descr_s { 1125 uint64_t ubd_lba; 1126 uint32_t ubd_lba_cnt; 1127 uint32_t ubd_reserved; 1128 } unmap_blk_descr_t; 1129 1130 /* Max number of block descriptors in UNMAP command */ 1131 #define SD_UNMAP_MAX_DESCR \ 1132 ((UINT16_MAX - sizeof (unmap_param_hdr_t)) / sizeof (unmap_blk_descr_t)) 1133 /* Max size of the UNMAP parameter list in bytes */ 1134 #define SD_UNMAP_PARAM_LIST_MAXSZ (sizeof (unmap_param_hdr_t) + \ 1135 SD_UNMAP_MAX_DESCR * sizeof (unmap_blk_descr_t)) 1136 1137 int _init(void); 1138 int _fini(void); 1139 int _info(struct modinfo *modinfop); 1140 1141 /*PRINTFLIKE3*/ 1142 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1143 /*PRINTFLIKE3*/ 1144 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1145 /*PRINTFLIKE3*/ 1146 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1147 1148 static int sdprobe(dev_info_t *devi); 1149 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 1150 void **result); 1151 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1152 int mod_flags, char *name, caddr_t valuep, int *lengthp); 1153 1154 /* 1155 * Smart probe for parallel scsi 1156 */ 1157 static void sd_scsi_probe_cache_init(void); 1158 static void sd_scsi_probe_cache_fini(void); 1159 static void sd_scsi_clear_probe_cache(void); 1160 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)()); 1161 1162 /* 1163 * Attached luns on target for parallel scsi 1164 */ 1165 static void sd_scsi_target_lun_init(void); 1166 static void sd_scsi_target_lun_fini(void); 1167 static int sd_scsi_get_target_lun_count(dev_info_t *dip, int target); 1168 static void sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag); 1169 1170 static int sd_spin_up_unit(sd_ssc_t *ssc); 1171 1172 /* 1173 * Using sd_ssc_init to establish sd_ssc_t struct 1174 * Using sd_ssc_send to send uscsi internal command 1175 * Using sd_ssc_fini to free sd_ssc_t struct 1176 */ 1177 static sd_ssc_t *sd_ssc_init(struct sd_lun *un); 1178 static int sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, 1179 int flag, enum uio_seg dataspace, int path_flag); 1180 static void sd_ssc_fini(sd_ssc_t *ssc); 1181 1182 /* 1183 * Using sd_ssc_assessment to set correct type-of-assessment 1184 * Using sd_ssc_post to post ereport & system log 1185 * sd_ssc_post will call sd_ssc_print to print system log 1186 * sd_ssc_post will call sd_ssd_ereport_post to post ereport 1187 */ 1188 static void sd_ssc_assessment(sd_ssc_t *ssc, 1189 enum sd_type_assessment tp_assess); 1190 1191 static void sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess); 1192 static void sd_ssc_print(sd_ssc_t *ssc, int sd_severity); 1193 static void sd_ssc_ereport_post(sd_ssc_t *ssc, 1194 enum sd_driver_assessment drv_assess); 1195 1196 /* 1197 * Using sd_ssc_set_info to mark an un-decodable-data error. 1198 * Using sd_ssc_extract_info to transfer information from internal 1199 * data structures to sd_ssc_t. 1200 */ 1201 static void sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, uint_t comp, 1202 const char *fmt, ...); 1203 static void sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un, 1204 struct scsi_pkt *pktp, struct buf *bp, struct sd_xbuf *xp); 1205 1206 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1207 enum uio_seg dataspace, int path_flag); 1208 1209 #ifdef _LP64 1210 static void sd_enable_descr_sense(sd_ssc_t *ssc); 1211 static void sd_reenable_dsense_task(void *arg); 1212 #endif /* _LP64 */ 1213 1214 static void sd_set_mmc_caps(sd_ssc_t *ssc); 1215 1216 static void sd_read_unit_properties(struct sd_lun *un); 1217 static int sd_process_sdconf_file(struct sd_lun *un); 1218 static void sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str); 1219 static void sd_set_properties(struct sd_lun *un, char *name, char *value); 1220 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags, 1221 int *data_list, sd_tunables *values); 1222 static void sd_process_sdconf_table(struct sd_lun *un); 1223 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen); 1224 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen); 1225 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 1226 int list_len, char *dataname_ptr); 1227 static void sd_set_vers1_properties(struct sd_lun *un, int flags, 1228 sd_tunables *prop_list); 1229 1230 static void sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi, 1231 int reservation_flag); 1232 static int sd_get_devid(sd_ssc_t *ssc); 1233 static ddi_devid_t sd_create_devid(sd_ssc_t *ssc); 1234 static int sd_write_deviceid(sd_ssc_t *ssc); 1235 static int sd_check_vpd_page_support(sd_ssc_t *ssc); 1236 1237 static void sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi); 1238 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); 1239 1240 static int sd_ddi_suspend(dev_info_t *devi); 1241 static int sd_ddi_resume(dev_info_t *devi); 1242 static int sd_pm_state_change(struct sd_lun *un, int level, int flag); 1243 static int sdpower(dev_info_t *devi, int component, int level); 1244 1245 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 1246 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 1247 static int sd_unit_attach(dev_info_t *devi); 1248 static int sd_unit_detach(dev_info_t *devi); 1249 1250 static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi); 1251 static void sd_create_errstats(struct sd_lun *un, int instance); 1252 static void sd_set_errstats(struct sd_lun *un); 1253 static void sd_set_pstats(struct sd_lun *un); 1254 1255 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 1256 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt); 1257 static int sd_send_polled_RQS(struct sd_lun *un); 1258 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt); 1259 1260 #if (defined(__fibre)) 1261 /* 1262 * Event callbacks (photon) 1263 */ 1264 static void sd_init_event_callbacks(struct sd_lun *un); 1265 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *); 1266 #endif 1267 1268 /* 1269 * Defines for sd_cache_control 1270 */ 1271 1272 #define SD_CACHE_ENABLE 1 1273 #define SD_CACHE_DISABLE 0 1274 #define SD_CACHE_NOCHANGE -1 1275 1276 static int sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag); 1277 static int sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled); 1278 static void sd_get_write_cache_changeable(sd_ssc_t *ssc, int *is_changeable); 1279 static void sd_get_nv_sup(sd_ssc_t *ssc); 1280 static dev_t sd_make_device(dev_info_t *devi); 1281 static void sd_check_bdc_vpd(sd_ssc_t *ssc); 1282 static void sd_check_emulation_mode(sd_ssc_t *ssc); 1283 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize, 1284 uint64_t capacity); 1285 1286 /* 1287 * Driver entry point functions. 1288 */ 1289 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 1290 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 1291 static int sd_ready_and_valid(sd_ssc_t *ssc, int part); 1292 1293 static void sdmin(struct buf *bp); 1294 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p); 1295 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 1296 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1297 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1298 1299 static int sdstrategy(struct buf *bp); 1300 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 1301 1302 /* 1303 * Function prototypes for layering functions in the iostart chain. 1304 */ 1305 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un, 1306 struct buf *bp); 1307 static void sd_mapblocksize_iostart(int index, struct sd_lun *un, 1308 struct buf *bp); 1309 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp); 1310 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un, 1311 struct buf *bp); 1312 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp); 1313 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp); 1314 1315 /* 1316 * Function prototypes for layering functions in the iodone chain. 1317 */ 1318 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp); 1319 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp); 1320 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un, 1321 struct buf *bp); 1322 static void sd_mapblocksize_iodone(int index, struct sd_lun *un, 1323 struct buf *bp); 1324 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp); 1325 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un, 1326 struct buf *bp); 1327 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp); 1328 1329 /* 1330 * Prototypes for functions to support buf(9S) based IO. 1331 */ 1332 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg); 1333 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **); 1334 static void sd_destroypkt_for_buf(struct buf *); 1335 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp, 1336 struct buf *bp, int flags, 1337 int (*callback)(caddr_t), caddr_t callback_arg, 1338 diskaddr_t lba, uint32_t blockcount); 1339 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp, 1340 struct buf *bp, diskaddr_t lba, uint32_t blockcount); 1341 1342 /* 1343 * Prototypes for functions to support USCSI IO. 1344 */ 1345 static int sd_uscsi_strategy(struct buf *bp); 1346 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **); 1347 static void sd_destroypkt_for_uscsi(struct buf *); 1348 1349 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 1350 uchar_t chain_type, void *pktinfop); 1351 1352 static int sd_pm_entry(struct sd_lun *un); 1353 static void sd_pm_exit(struct sd_lun *un); 1354 1355 static void sd_pm_idletimeout_handler(void *arg); 1356 1357 /* 1358 * sd_core internal functions (used at the sd_core_io layer). 1359 */ 1360 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp); 1361 static void sdintr(struct scsi_pkt *pktp); 1362 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp); 1363 1364 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1365 enum uio_seg dataspace, int path_flag); 1366 1367 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen, 1368 daddr_t blkno, int (*func)(struct buf *)); 1369 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen, 1370 uint_t bflags, daddr_t blkno, int (*func)(struct buf *)); 1371 static void sd_bioclone_free(struct buf *bp); 1372 static void sd_shadow_buf_free(struct buf *bp); 1373 1374 static void sd_print_transport_rejected_message(struct sd_lun *un, 1375 struct sd_xbuf *xp, int code); 1376 static void sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, 1377 void *arg, int code); 1378 static void sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, 1379 void *arg, int code); 1380 static void sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, 1381 void *arg, int code); 1382 1383 static void sd_retry_command(struct sd_lun *un, struct buf *bp, 1384 int retry_check_flag, 1385 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int c), 1386 void *user_arg, int failure_code, clock_t retry_delay, 1387 void (*statp)(kstat_io_t *)); 1388 1389 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, 1390 clock_t retry_delay, void (*statp)(kstat_io_t *)); 1391 1392 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 1393 struct scsi_pkt *pktp); 1394 static void sd_start_retry_command(void *arg); 1395 static void sd_start_direct_priority_command(void *arg); 1396 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, 1397 int errcode); 1398 static void sd_return_failed_command_no_restart(struct sd_lun *un, 1399 struct buf *bp, int errcode); 1400 static void sd_return_command(struct sd_lun *un, struct buf *bp); 1401 static void sd_sync_with_callback(struct sd_lun *un); 1402 static int sdrunout(caddr_t arg); 1403 1404 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp); 1405 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp); 1406 1407 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type); 1408 static void sd_restore_throttle(void *arg); 1409 1410 static void sd_init_cdb_limits(struct sd_lun *un); 1411 1412 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 1413 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1414 1415 /* 1416 * Error handling functions 1417 */ 1418 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 1419 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1420 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, 1421 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1422 static void sd_pkt_status_reservation_conflict(struct sd_lun *un, 1423 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1424 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 1425 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1426 1427 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp, 1428 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1429 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 1430 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1431 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp, 1432 struct sd_xbuf *xp, size_t actual_len); 1433 static void sd_decode_sense(struct sd_lun *un, struct buf *bp, 1434 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1435 1436 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp, 1437 void *arg, int code); 1438 1439 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 1440 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1441 static void sd_sense_key_recoverable_error(struct sd_lun *un, 1442 uint8_t *sense_datap, 1443 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1444 static void sd_sense_key_not_ready(struct sd_lun *un, 1445 uint8_t *sense_datap, 1446 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1447 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 1448 uint8_t *sense_datap, 1449 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1450 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 1451 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1452 static void sd_sense_key_unit_attention(struct sd_lun *un, 1453 uint8_t *sense_datap, 1454 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1455 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 1456 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1457 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 1458 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1459 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 1460 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1461 static void sd_sense_key_default(struct sd_lun *un, 1462 uint8_t *sense_datap, 1463 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1464 1465 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp, 1466 void *arg, int flag); 1467 1468 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 1469 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1470 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 1471 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1472 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 1473 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1474 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 1475 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1476 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 1477 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1478 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 1479 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1480 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 1481 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1482 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 1483 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1484 1485 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp); 1486 1487 static void sd_start_stop_unit_callback(void *arg); 1488 static void sd_start_stop_unit_task(void *arg); 1489 1490 static void sd_taskq_create(void); 1491 static void sd_taskq_delete(void); 1492 static void sd_target_change_task(void *arg); 1493 static void sd_log_dev_status_event(struct sd_lun *un, char *esc, int km_flag); 1494 static void sd_log_lun_expansion_event(struct sd_lun *un, int km_flag); 1495 static void sd_log_eject_request_event(struct sd_lun *un, int km_flag); 1496 static void sd_media_change_task(void *arg); 1497 1498 static int sd_handle_mchange(struct sd_lun *un); 1499 static int sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag); 1500 static int sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp, 1501 uint32_t *lbap, int path_flag); 1502 static int sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp, 1503 uint32_t *lbap, uint32_t *psp, int path_flag); 1504 static int sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int pc_flag, 1505 int flag, int path_flag); 1506 static int sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr, 1507 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp); 1508 static int sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag); 1509 static int sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc, 1510 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp); 1511 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, 1512 uchar_t usr_cmd, uchar_t *usr_bufp); 1513 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, 1514 struct dk_callback *dkc); 1515 static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp); 1516 static int sd_send_scsi_UNMAP(dev_t dev, sd_ssc_t *ssc, dkioc_free_list_t *dfl, 1517 int flag); 1518 static int sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, 1519 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1520 uchar_t *bufaddr, uint_t buflen, int path_flag); 1521 static int sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc, 1522 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1523 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag); 1524 static int sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize, 1525 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag); 1526 static int sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize, 1527 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag); 1528 static int sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr, 1529 size_t buflen, daddr_t start_block, int path_flag); 1530 #define sd_send_scsi_READ(ssc, bufaddr, buflen, start_block, path_flag) \ 1531 sd_send_scsi_RDWR(ssc, SCMD_READ, bufaddr, buflen, start_block, \ 1532 path_flag) 1533 #define sd_send_scsi_WRITE(ssc, bufaddr, buflen, start_block, path_flag)\ 1534 sd_send_scsi_RDWR(ssc, SCMD_WRITE, bufaddr, buflen, start_block,\ 1535 path_flag) 1536 1537 static int sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr, 1538 uint16_t buflen, uchar_t page_code, uchar_t page_control, 1539 uint16_t param_ptr, int path_flag); 1540 static int sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION(sd_ssc_t *ssc, 1541 uchar_t *bufaddr, size_t buflen, uchar_t class_req); 1542 static boolean_t sd_gesn_media_data_valid(uchar_t *data); 1543 1544 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un); 1545 static void sd_free_rqs(struct sd_lun *un); 1546 1547 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, 1548 uchar_t *data, int len, int fmt); 1549 static void sd_panic_for_res_conflict(struct sd_lun *un); 1550 1551 /* 1552 * Disk Ioctl Function Prototypes 1553 */ 1554 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag); 1555 static int sd_get_media_info_ext(dev_t dev, caddr_t arg, int flag); 1556 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag); 1557 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag); 1558 1559 /* 1560 * Multi-host Ioctl Prototypes 1561 */ 1562 static int sd_check_mhd(dev_t dev, int interval); 1563 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1564 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt); 1565 static char *sd_sname(uchar_t status); 1566 static void sd_mhd_resvd_recover(void *arg); 1567 static void sd_resv_reclaim_thread(); 1568 static int sd_take_ownership(dev_t dev, struct mhioctkown *p); 1569 static int sd_reserve_release(dev_t dev, int cmd); 1570 static void sd_rmv_resv_reclaim_req(dev_t dev); 1571 static void sd_mhd_reset_notify_cb(caddr_t arg); 1572 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un, 1573 mhioc_inkeys_t *usrp, int flag); 1574 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un, 1575 mhioc_inresvs_t *usrp, int flag); 1576 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag); 1577 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag); 1578 static int sd_mhdioc_release(dev_t dev); 1579 static int sd_mhdioc_register_devid(dev_t dev); 1580 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag); 1581 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag); 1582 1583 /* 1584 * SCSI removable prototypes 1585 */ 1586 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag); 1587 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1588 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1589 static int sr_pause_resume(dev_t dev, int mode); 1590 static int sr_play_msf(dev_t dev, caddr_t data, int flag); 1591 static int sr_play_trkind(dev_t dev, caddr_t data, int flag); 1592 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag); 1593 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag); 1594 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag); 1595 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag); 1596 static int sr_read_cdda(dev_t dev, caddr_t data, int flag); 1597 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag); 1598 static int sr_read_mode1(dev_t dev, caddr_t data, int flag); 1599 static int sr_read_mode2(dev_t dev, caddr_t data, int flag); 1600 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag); 1601 static int sr_sector_mode(dev_t dev, uint32_t blksize); 1602 static int sr_eject(dev_t dev); 1603 static void sr_ejected(register struct sd_lun *un); 1604 static int sr_check_wp(dev_t dev); 1605 static opaque_t sd_watch_request_submit(struct sd_lun *un); 1606 static int sd_check_media(dev_t dev, enum dkio_state state); 1607 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1608 static void sd_delayed_cv_broadcast(void *arg); 1609 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag); 1610 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag); 1611 1612 static int sd_log_page_supported(sd_ssc_t *ssc, int log_page); 1613 1614 /* 1615 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions. 1616 */ 1617 static void sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag); 1618 static int sd_wm_cache_constructor(void *wm, void *un, int flags); 1619 static void sd_wm_cache_destructor(void *wm, void *un); 1620 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb, 1621 daddr_t endb, ushort_t typ); 1622 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb, 1623 daddr_t endb); 1624 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp); 1625 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm); 1626 static void sd_read_modify_write_task(void * arg); 1627 static int 1628 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 1629 struct buf **bpp); 1630 1631 1632 /* 1633 * Function prototypes for failfast support. 1634 */ 1635 static void sd_failfast_flushq(struct sd_lun *un); 1636 static int sd_failfast_flushq_callback(struct buf *bp); 1637 1638 /* 1639 * Function prototypes to check for lsi devices 1640 */ 1641 static void sd_is_lsi(struct sd_lun *un); 1642 1643 /* 1644 * Function prototypes for partial DMA support 1645 */ 1646 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 1647 struct scsi_pkt *pkt, struct sd_xbuf *xp); 1648 1649 1650 /* Function prototypes for cmlb */ 1651 static int sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 1652 diskaddr_t start_block, size_t reqlength, void *tg_cookie); 1653 1654 static int sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie); 1655 1656 /* 1657 * For printing RMW warning message timely 1658 */ 1659 static void sd_rmw_msg_print_handler(void *arg); 1660 1661 /* 1662 * Constants for failfast support: 1663 * 1664 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO 1665 * failfast processing being performed. 1666 * 1667 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing 1668 * failfast processing on all bufs with B_FAILFAST set. 1669 */ 1670 1671 #define SD_FAILFAST_INACTIVE 0 1672 #define SD_FAILFAST_ACTIVE 1 1673 1674 /* 1675 * Bitmask to control behavior of buf(9S) flushes when a transition to 1676 * the failfast state occurs. Optional bits include: 1677 * 1678 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that 1679 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will 1680 * be flushed. 1681 * 1682 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the 1683 * driver, in addition to the regular wait queue. This includes the xbuf 1684 * queues. When clear, only the driver's wait queue will be flushed. 1685 */ 1686 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 1687 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 1688 1689 /* 1690 * The default behavior is to only flush bufs that have B_FAILFAST set, but 1691 * to flush all queues within the driver. 1692 */ 1693 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES; 1694 1695 1696 /* 1697 * SD Testing Fault Injection 1698 */ 1699 #ifdef SD_FAULT_INJECTION 1700 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); 1701 static void sd_faultinjection(struct scsi_pkt *pktp); 1702 static void sd_injection_log(char *buf, struct sd_lun *un); 1703 #endif 1704 1705 /* 1706 * Device driver ops vector 1707 */ 1708 static struct cb_ops sd_cb_ops = { 1709 sdopen, /* open */ 1710 sdclose, /* close */ 1711 sdstrategy, /* strategy */ 1712 nodev, /* print */ 1713 sddump, /* dump */ 1714 sdread, /* read */ 1715 sdwrite, /* write */ 1716 sdioctl, /* ioctl */ 1717 nodev, /* devmap */ 1718 nodev, /* mmap */ 1719 nodev, /* segmap */ 1720 nochpoll, /* poll */ 1721 sd_prop_op, /* cb_prop_op */ 1722 0, /* streamtab */ 1723 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */ 1724 CB_REV, /* cb_rev */ 1725 sdaread, /* async I/O read entry point */ 1726 sdawrite /* async I/O write entry point */ 1727 }; 1728 1729 struct dev_ops sd_ops = { 1730 DEVO_REV, /* devo_rev, */ 1731 0, /* refcnt */ 1732 sdinfo, /* info */ 1733 nulldev, /* identify */ 1734 sdprobe, /* probe */ 1735 sdattach, /* attach */ 1736 sddetach, /* detach */ 1737 nodev, /* reset */ 1738 &sd_cb_ops, /* driver operations */ 1739 NULL, /* bus operations */ 1740 sdpower, /* power */ 1741 ddi_quiesce_not_needed, /* quiesce */ 1742 }; 1743 1744 /* 1745 * This is the loadable module wrapper. 1746 */ 1747 #include <sys/modctl.h> 1748 1749 static struct modldrv modldrv = { 1750 &mod_driverops, /* Type of module. This one is a driver */ 1751 SD_MODULE_NAME, /* Module name. */ 1752 &sd_ops /* driver ops */ 1753 }; 1754 1755 static struct modlinkage modlinkage = { 1756 MODREV_1, &modldrv, NULL 1757 }; 1758 1759 static cmlb_tg_ops_t sd_tgops = { 1760 TG_DK_OPS_VERSION_1, 1761 sd_tg_rdwr, 1762 sd_tg_getinfo 1763 }; 1764 1765 static struct scsi_asq_key_strings sd_additional_codes[] = { 1766 0x81, 0, "Logical Unit is Reserved", 1767 0x85, 0, "Audio Address Not Valid", 1768 0xb6, 0, "Media Load Mechanism Failed", 1769 0xB9, 0, "Audio Play Operation Aborted", 1770 0xbf, 0, "Buffer Overflow for Read All Subcodes Command", 1771 0x53, 2, "Medium removal prevented", 1772 0x6f, 0, "Authentication failed during key exchange", 1773 0x6f, 1, "Key not present", 1774 0x6f, 2, "Key not established", 1775 0x6f, 3, "Read without proper authentication", 1776 0x6f, 4, "Mismatched region to this logical unit", 1777 0x6f, 5, "Region reset count error", 1778 0xffff, 0x0, NULL 1779 }; 1780 1781 1782 /* 1783 * Struct for passing printing information for sense data messages 1784 */ 1785 struct sd_sense_info { 1786 int ssi_severity; 1787 int ssi_pfa_flag; 1788 }; 1789 1790 /* 1791 * Table of function pointers for iostart-side routines. Separate "chains" 1792 * of layered function calls are formed by placing the function pointers 1793 * sequentially in the desired order. Functions are called according to an 1794 * incrementing table index ordering. The last function in each chain must 1795 * be sd_core_iostart(). The corresponding iodone-side routines are expected 1796 * in the sd_iodone_chain[] array. 1797 * 1798 * Note: It may seem more natural to organize both the iostart and iodone 1799 * functions together, into an array of structures (or some similar 1800 * organization) with a common index, rather than two separate arrays which 1801 * must be maintained in synchronization. The purpose of this division is 1802 * to achieve improved performance: individual arrays allows for more 1803 * effective cache line utilization on certain platforms. 1804 */ 1805 1806 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp); 1807 1808 1809 static sd_chain_t sd_iostart_chain[] = { 1810 1811 /* Chain for buf IO for disk drive targets (PM enabled) */ 1812 sd_mapblockaddr_iostart, /* Index: 0 */ 1813 sd_pm_iostart, /* Index: 1 */ 1814 sd_core_iostart, /* Index: 2 */ 1815 1816 /* Chain for buf IO for disk drive targets (PM disabled) */ 1817 sd_mapblockaddr_iostart, /* Index: 3 */ 1818 sd_core_iostart, /* Index: 4 */ 1819 1820 /* 1821 * Chain for buf IO for removable-media or large sector size 1822 * disk drive targets with RMW needed (PM enabled) 1823 */ 1824 sd_mapblockaddr_iostart, /* Index: 5 */ 1825 sd_mapblocksize_iostart, /* Index: 6 */ 1826 sd_pm_iostart, /* Index: 7 */ 1827 sd_core_iostart, /* Index: 8 */ 1828 1829 /* 1830 * Chain for buf IO for removable-media or large sector size 1831 * disk drive targets with RMW needed (PM disabled) 1832 */ 1833 sd_mapblockaddr_iostart, /* Index: 9 */ 1834 sd_mapblocksize_iostart, /* Index: 10 */ 1835 sd_core_iostart, /* Index: 11 */ 1836 1837 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1838 sd_mapblockaddr_iostart, /* Index: 12 */ 1839 sd_checksum_iostart, /* Index: 13 */ 1840 sd_pm_iostart, /* Index: 14 */ 1841 sd_core_iostart, /* Index: 15 */ 1842 1843 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1844 sd_mapblockaddr_iostart, /* Index: 16 */ 1845 sd_checksum_iostart, /* Index: 17 */ 1846 sd_core_iostart, /* Index: 18 */ 1847 1848 /* Chain for USCSI commands (all targets) */ 1849 sd_pm_iostart, /* Index: 19 */ 1850 sd_core_iostart, /* Index: 20 */ 1851 1852 /* Chain for checksumming USCSI commands (all targets) */ 1853 sd_checksum_uscsi_iostart, /* Index: 21 */ 1854 sd_pm_iostart, /* Index: 22 */ 1855 sd_core_iostart, /* Index: 23 */ 1856 1857 /* Chain for "direct" USCSI commands (all targets) */ 1858 sd_core_iostart, /* Index: 24 */ 1859 1860 /* Chain for "direct priority" USCSI commands (all targets) */ 1861 sd_core_iostart, /* Index: 25 */ 1862 1863 /* 1864 * Chain for buf IO for large sector size disk drive targets 1865 * with RMW needed with checksumming (PM enabled) 1866 */ 1867 sd_mapblockaddr_iostart, /* Index: 26 */ 1868 sd_mapblocksize_iostart, /* Index: 27 */ 1869 sd_checksum_iostart, /* Index: 28 */ 1870 sd_pm_iostart, /* Index: 29 */ 1871 sd_core_iostart, /* Index: 30 */ 1872 1873 /* 1874 * Chain for buf IO for large sector size disk drive targets 1875 * with RMW needed with checksumming (PM disabled) 1876 */ 1877 sd_mapblockaddr_iostart, /* Index: 31 */ 1878 sd_mapblocksize_iostart, /* Index: 32 */ 1879 sd_checksum_iostart, /* Index: 33 */ 1880 sd_core_iostart, /* Index: 34 */ 1881 1882 }; 1883 1884 /* 1885 * Macros to locate the first function of each iostart chain in the 1886 * sd_iostart_chain[] array. These are located by the index in the array. 1887 */ 1888 #define SD_CHAIN_DISK_IOSTART 0 1889 #define SD_CHAIN_DISK_IOSTART_NO_PM 3 1890 #define SD_CHAIN_MSS_DISK_IOSTART 5 1891 #define SD_CHAIN_RMMEDIA_IOSTART 5 1892 #define SD_CHAIN_MSS_DISK_IOSTART_NO_PM 9 1893 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9 1894 #define SD_CHAIN_CHKSUM_IOSTART 12 1895 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16 1896 #define SD_CHAIN_USCSI_CMD_IOSTART 19 1897 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21 1898 #define SD_CHAIN_DIRECT_CMD_IOSTART 24 1899 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25 1900 #define SD_CHAIN_MSS_CHKSUM_IOSTART 26 1901 #define SD_CHAIN_MSS_CHKSUM_IOSTART_NO_PM 31 1902 1903 1904 /* 1905 * Table of function pointers for the iodone-side routines for the driver- 1906 * internal layering mechanism. The calling sequence for iodone routines 1907 * uses a decrementing table index, so the last routine called in a chain 1908 * must be at the lowest array index location for that chain. The last 1909 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs) 1910 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering 1911 * of the functions in an iodone side chain must correspond to the ordering 1912 * of the iostart routines for that chain. Note that there is no iodone 1913 * side routine that corresponds to sd_core_iostart(), so there is no 1914 * entry in the table for this. 1915 */ 1916 1917 static sd_chain_t sd_iodone_chain[] = { 1918 1919 /* Chain for buf IO for disk drive targets (PM enabled) */ 1920 sd_buf_iodone, /* Index: 0 */ 1921 sd_mapblockaddr_iodone, /* Index: 1 */ 1922 sd_pm_iodone, /* Index: 2 */ 1923 1924 /* Chain for buf IO for disk drive targets (PM disabled) */ 1925 sd_buf_iodone, /* Index: 3 */ 1926 sd_mapblockaddr_iodone, /* Index: 4 */ 1927 1928 /* 1929 * Chain for buf IO for removable-media or large sector size 1930 * disk drive targets with RMW needed (PM enabled) 1931 */ 1932 sd_buf_iodone, /* Index: 5 */ 1933 sd_mapblockaddr_iodone, /* Index: 6 */ 1934 sd_mapblocksize_iodone, /* Index: 7 */ 1935 sd_pm_iodone, /* Index: 8 */ 1936 1937 /* 1938 * Chain for buf IO for removable-media or large sector size 1939 * disk drive targets with RMW needed (PM disabled) 1940 */ 1941 sd_buf_iodone, /* Index: 9 */ 1942 sd_mapblockaddr_iodone, /* Index: 10 */ 1943 sd_mapblocksize_iodone, /* Index: 11 */ 1944 1945 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1946 sd_buf_iodone, /* Index: 12 */ 1947 sd_mapblockaddr_iodone, /* Index: 13 */ 1948 sd_checksum_iodone, /* Index: 14 */ 1949 sd_pm_iodone, /* Index: 15 */ 1950 1951 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1952 sd_buf_iodone, /* Index: 16 */ 1953 sd_mapblockaddr_iodone, /* Index: 17 */ 1954 sd_checksum_iodone, /* Index: 18 */ 1955 1956 /* Chain for USCSI commands (non-checksum targets) */ 1957 sd_uscsi_iodone, /* Index: 19 */ 1958 sd_pm_iodone, /* Index: 20 */ 1959 1960 /* Chain for USCSI commands (checksum targets) */ 1961 sd_uscsi_iodone, /* Index: 21 */ 1962 sd_checksum_uscsi_iodone, /* Index: 22 */ 1963 sd_pm_iodone, /* Index: 22 */ 1964 1965 /* Chain for "direct" USCSI commands (all targets) */ 1966 sd_uscsi_iodone, /* Index: 24 */ 1967 1968 /* Chain for "direct priority" USCSI commands (all targets) */ 1969 sd_uscsi_iodone, /* Index: 25 */ 1970 1971 /* 1972 * Chain for buf IO for large sector size disk drive targets 1973 * with checksumming (PM enabled) 1974 */ 1975 sd_buf_iodone, /* Index: 26 */ 1976 sd_mapblockaddr_iodone, /* Index: 27 */ 1977 sd_mapblocksize_iodone, /* Index: 28 */ 1978 sd_checksum_iodone, /* Index: 29 */ 1979 sd_pm_iodone, /* Index: 30 */ 1980 1981 /* 1982 * Chain for buf IO for large sector size disk drive targets 1983 * with checksumming (PM disabled) 1984 */ 1985 sd_buf_iodone, /* Index: 31 */ 1986 sd_mapblockaddr_iodone, /* Index: 32 */ 1987 sd_mapblocksize_iodone, /* Index: 33 */ 1988 sd_checksum_iodone, /* Index: 34 */ 1989 }; 1990 1991 1992 /* 1993 * Macros to locate the "first" function in the sd_iodone_chain[] array for 1994 * each iodone-side chain. These are located by the array index, but as the 1995 * iodone side functions are called in a decrementing-index order, the 1996 * highest index number in each chain must be specified (as these correspond 1997 * to the first function in the iodone chain that will be called by the core 1998 * at IO completion time). 1999 */ 2000 2001 #define SD_CHAIN_DISK_IODONE 2 2002 #define SD_CHAIN_DISK_IODONE_NO_PM 4 2003 #define SD_CHAIN_RMMEDIA_IODONE 8 2004 #define SD_CHAIN_MSS_DISK_IODONE 8 2005 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11 2006 #define SD_CHAIN_MSS_DISK_IODONE_NO_PM 11 2007 #define SD_CHAIN_CHKSUM_IODONE 15 2008 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18 2009 #define SD_CHAIN_USCSI_CMD_IODONE 20 2010 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22 2011 #define SD_CHAIN_DIRECT_CMD_IODONE 24 2012 #define SD_CHAIN_PRIORITY_CMD_IODONE 25 2013 #define SD_CHAIN_MSS_CHKSUM_IODONE 30 2014 #define SD_CHAIN_MSS_CHKSUM_IODONE_NO_PM 34 2015 2016 2017 2018 /* 2019 * Array to map a layering chain index to the appropriate initpkt routine. 2020 * The redundant entries are present so that the index used for accessing 2021 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 2022 * with this table as well. 2023 */ 2024 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **); 2025 2026 static sd_initpkt_t sd_initpkt_map[] = { 2027 2028 /* Chain for buf IO for disk drive targets (PM enabled) */ 2029 sd_initpkt_for_buf, /* Index: 0 */ 2030 sd_initpkt_for_buf, /* Index: 1 */ 2031 sd_initpkt_for_buf, /* Index: 2 */ 2032 2033 /* Chain for buf IO for disk drive targets (PM disabled) */ 2034 sd_initpkt_for_buf, /* Index: 3 */ 2035 sd_initpkt_for_buf, /* Index: 4 */ 2036 2037 /* 2038 * Chain for buf IO for removable-media or large sector size 2039 * disk drive targets (PM enabled) 2040 */ 2041 sd_initpkt_for_buf, /* Index: 5 */ 2042 sd_initpkt_for_buf, /* Index: 6 */ 2043 sd_initpkt_for_buf, /* Index: 7 */ 2044 sd_initpkt_for_buf, /* Index: 8 */ 2045 2046 /* 2047 * Chain for buf IO for removable-media or large sector size 2048 * disk drive targets (PM disabled) 2049 */ 2050 sd_initpkt_for_buf, /* Index: 9 */ 2051 sd_initpkt_for_buf, /* Index: 10 */ 2052 sd_initpkt_for_buf, /* Index: 11 */ 2053 2054 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2055 sd_initpkt_for_buf, /* Index: 12 */ 2056 sd_initpkt_for_buf, /* Index: 13 */ 2057 sd_initpkt_for_buf, /* Index: 14 */ 2058 sd_initpkt_for_buf, /* Index: 15 */ 2059 2060 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2061 sd_initpkt_for_buf, /* Index: 16 */ 2062 sd_initpkt_for_buf, /* Index: 17 */ 2063 sd_initpkt_for_buf, /* Index: 18 */ 2064 2065 /* Chain for USCSI commands (non-checksum targets) */ 2066 sd_initpkt_for_uscsi, /* Index: 19 */ 2067 sd_initpkt_for_uscsi, /* Index: 20 */ 2068 2069 /* Chain for USCSI commands (checksum targets) */ 2070 sd_initpkt_for_uscsi, /* Index: 21 */ 2071 sd_initpkt_for_uscsi, /* Index: 22 */ 2072 sd_initpkt_for_uscsi, /* Index: 22 */ 2073 2074 /* Chain for "direct" USCSI commands (all targets) */ 2075 sd_initpkt_for_uscsi, /* Index: 24 */ 2076 2077 /* Chain for "direct priority" USCSI commands (all targets) */ 2078 sd_initpkt_for_uscsi, /* Index: 25 */ 2079 2080 /* 2081 * Chain for buf IO for large sector size disk drive targets 2082 * with checksumming (PM enabled) 2083 */ 2084 sd_initpkt_for_buf, /* Index: 26 */ 2085 sd_initpkt_for_buf, /* Index: 27 */ 2086 sd_initpkt_for_buf, /* Index: 28 */ 2087 sd_initpkt_for_buf, /* Index: 29 */ 2088 sd_initpkt_for_buf, /* Index: 30 */ 2089 2090 /* 2091 * Chain for buf IO for large sector size disk drive targets 2092 * with checksumming (PM disabled) 2093 */ 2094 sd_initpkt_for_buf, /* Index: 31 */ 2095 sd_initpkt_for_buf, /* Index: 32 */ 2096 sd_initpkt_for_buf, /* Index: 33 */ 2097 sd_initpkt_for_buf, /* Index: 34 */ 2098 }; 2099 2100 2101 /* 2102 * Array to map a layering chain index to the appropriate destroypktpkt routine. 2103 * The redundant entries are present so that the index used for accessing 2104 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 2105 * with this table as well. 2106 */ 2107 typedef void (*sd_destroypkt_t)(struct buf *); 2108 2109 static sd_destroypkt_t sd_destroypkt_map[] = { 2110 2111 /* Chain for buf IO for disk drive targets (PM enabled) */ 2112 sd_destroypkt_for_buf, /* Index: 0 */ 2113 sd_destroypkt_for_buf, /* Index: 1 */ 2114 sd_destroypkt_for_buf, /* Index: 2 */ 2115 2116 /* Chain for buf IO for disk drive targets (PM disabled) */ 2117 sd_destroypkt_for_buf, /* Index: 3 */ 2118 sd_destroypkt_for_buf, /* Index: 4 */ 2119 2120 /* 2121 * Chain for buf IO for removable-media or large sector size 2122 * disk drive targets (PM enabled) 2123 */ 2124 sd_destroypkt_for_buf, /* Index: 5 */ 2125 sd_destroypkt_for_buf, /* Index: 6 */ 2126 sd_destroypkt_for_buf, /* Index: 7 */ 2127 sd_destroypkt_for_buf, /* Index: 8 */ 2128 2129 /* 2130 * Chain for buf IO for removable-media or large sector size 2131 * disk drive targets (PM disabled) 2132 */ 2133 sd_destroypkt_for_buf, /* Index: 9 */ 2134 sd_destroypkt_for_buf, /* Index: 10 */ 2135 sd_destroypkt_for_buf, /* Index: 11 */ 2136 2137 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2138 sd_destroypkt_for_buf, /* Index: 12 */ 2139 sd_destroypkt_for_buf, /* Index: 13 */ 2140 sd_destroypkt_for_buf, /* Index: 14 */ 2141 sd_destroypkt_for_buf, /* Index: 15 */ 2142 2143 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2144 sd_destroypkt_for_buf, /* Index: 16 */ 2145 sd_destroypkt_for_buf, /* Index: 17 */ 2146 sd_destroypkt_for_buf, /* Index: 18 */ 2147 2148 /* Chain for USCSI commands (non-checksum targets) */ 2149 sd_destroypkt_for_uscsi, /* Index: 19 */ 2150 sd_destroypkt_for_uscsi, /* Index: 20 */ 2151 2152 /* Chain for USCSI commands (checksum targets) */ 2153 sd_destroypkt_for_uscsi, /* Index: 21 */ 2154 sd_destroypkt_for_uscsi, /* Index: 22 */ 2155 sd_destroypkt_for_uscsi, /* Index: 22 */ 2156 2157 /* Chain for "direct" USCSI commands (all targets) */ 2158 sd_destroypkt_for_uscsi, /* Index: 24 */ 2159 2160 /* Chain for "direct priority" USCSI commands (all targets) */ 2161 sd_destroypkt_for_uscsi, /* Index: 25 */ 2162 2163 /* 2164 * Chain for buf IO for large sector size disk drive targets 2165 * with checksumming (PM disabled) 2166 */ 2167 sd_destroypkt_for_buf, /* Index: 26 */ 2168 sd_destroypkt_for_buf, /* Index: 27 */ 2169 sd_destroypkt_for_buf, /* Index: 28 */ 2170 sd_destroypkt_for_buf, /* Index: 29 */ 2171 sd_destroypkt_for_buf, /* Index: 30 */ 2172 2173 /* 2174 * Chain for buf IO for large sector size disk drive targets 2175 * with checksumming (PM enabled) 2176 */ 2177 sd_destroypkt_for_buf, /* Index: 31 */ 2178 sd_destroypkt_for_buf, /* Index: 32 */ 2179 sd_destroypkt_for_buf, /* Index: 33 */ 2180 sd_destroypkt_for_buf, /* Index: 34 */ 2181 }; 2182 2183 2184 2185 /* 2186 * Array to map a layering chain index to the appropriate chain "type". 2187 * The chain type indicates a specific property/usage of the chain. 2188 * The redundant entries are present so that the index used for accessing 2189 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 2190 * with this table as well. 2191 */ 2192 2193 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */ 2194 #define SD_CHAIN_BUFIO 1 /* regular buf IO */ 2195 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */ 2196 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */ 2197 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */ 2198 /* (for error recovery) */ 2199 2200 static int sd_chain_type_map[] = { 2201 2202 /* Chain for buf IO for disk drive targets (PM enabled) */ 2203 SD_CHAIN_BUFIO, /* Index: 0 */ 2204 SD_CHAIN_BUFIO, /* Index: 1 */ 2205 SD_CHAIN_BUFIO, /* Index: 2 */ 2206 2207 /* Chain for buf IO for disk drive targets (PM disabled) */ 2208 SD_CHAIN_BUFIO, /* Index: 3 */ 2209 SD_CHAIN_BUFIO, /* Index: 4 */ 2210 2211 /* 2212 * Chain for buf IO for removable-media or large sector size 2213 * disk drive targets (PM enabled) 2214 */ 2215 SD_CHAIN_BUFIO, /* Index: 5 */ 2216 SD_CHAIN_BUFIO, /* Index: 6 */ 2217 SD_CHAIN_BUFIO, /* Index: 7 */ 2218 SD_CHAIN_BUFIO, /* Index: 8 */ 2219 2220 /* 2221 * Chain for buf IO for removable-media or large sector size 2222 * disk drive targets (PM disabled) 2223 */ 2224 SD_CHAIN_BUFIO, /* Index: 9 */ 2225 SD_CHAIN_BUFIO, /* Index: 10 */ 2226 SD_CHAIN_BUFIO, /* Index: 11 */ 2227 2228 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2229 SD_CHAIN_BUFIO, /* Index: 12 */ 2230 SD_CHAIN_BUFIO, /* Index: 13 */ 2231 SD_CHAIN_BUFIO, /* Index: 14 */ 2232 SD_CHAIN_BUFIO, /* Index: 15 */ 2233 2234 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2235 SD_CHAIN_BUFIO, /* Index: 16 */ 2236 SD_CHAIN_BUFIO, /* Index: 17 */ 2237 SD_CHAIN_BUFIO, /* Index: 18 */ 2238 2239 /* Chain for USCSI commands (non-checksum targets) */ 2240 SD_CHAIN_USCSI, /* Index: 19 */ 2241 SD_CHAIN_USCSI, /* Index: 20 */ 2242 2243 /* Chain for USCSI commands (checksum targets) */ 2244 SD_CHAIN_USCSI, /* Index: 21 */ 2245 SD_CHAIN_USCSI, /* Index: 22 */ 2246 SD_CHAIN_USCSI, /* Index: 23 */ 2247 2248 /* Chain for "direct" USCSI commands (all targets) */ 2249 SD_CHAIN_DIRECT, /* Index: 24 */ 2250 2251 /* Chain for "direct priority" USCSI commands (all targets) */ 2252 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */ 2253 2254 /* 2255 * Chain for buf IO for large sector size disk drive targets 2256 * with checksumming (PM enabled) 2257 */ 2258 SD_CHAIN_BUFIO, /* Index: 26 */ 2259 SD_CHAIN_BUFIO, /* Index: 27 */ 2260 SD_CHAIN_BUFIO, /* Index: 28 */ 2261 SD_CHAIN_BUFIO, /* Index: 29 */ 2262 SD_CHAIN_BUFIO, /* Index: 30 */ 2263 2264 /* 2265 * Chain for buf IO for large sector size disk drive targets 2266 * with checksumming (PM disabled) 2267 */ 2268 SD_CHAIN_BUFIO, /* Index: 31 */ 2269 SD_CHAIN_BUFIO, /* Index: 32 */ 2270 SD_CHAIN_BUFIO, /* Index: 33 */ 2271 SD_CHAIN_BUFIO, /* Index: 34 */ 2272 }; 2273 2274 2275 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */ 2276 #define SD_IS_BUFIO(xp) \ 2277 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO) 2278 2279 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */ 2280 #define SD_IS_DIRECT_PRIORITY(xp) \ 2281 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY) 2282 2283 2284 2285 /* 2286 * Struct, array, and macros to map a specific chain to the appropriate 2287 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays. 2288 * 2289 * The sd_chain_index_map[] array is used at attach time to set the various 2290 * un_xxx_chain type members of the sd_lun softstate to the specific layering 2291 * chain to be used with the instance. This allows different instances to use 2292 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart 2293 * and xb_chain_iodone index values in the sd_xbuf are initialized to these 2294 * values at sd_xbuf init time, this allows (1) layering chains may be changed 2295 * dynamically & without the use of locking; and (2) a layer may update the 2296 * xb_chain_io[start|done] member in a given xbuf with its current index value, 2297 * to allow for deferred processing of an IO within the same chain from a 2298 * different execution context. 2299 */ 2300 2301 struct sd_chain_index { 2302 int sci_iostart_index; 2303 int sci_iodone_index; 2304 }; 2305 2306 static struct sd_chain_index sd_chain_index_map[] = { 2307 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE }, 2308 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM }, 2309 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE }, 2310 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM }, 2311 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE }, 2312 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM }, 2313 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE }, 2314 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE }, 2315 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE }, 2316 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE }, 2317 { SD_CHAIN_MSS_CHKSUM_IOSTART, SD_CHAIN_MSS_CHKSUM_IODONE }, 2318 { SD_CHAIN_MSS_CHKSUM_IOSTART_NO_PM, SD_CHAIN_MSS_CHKSUM_IODONE_NO_PM }, 2319 2320 }; 2321 2322 2323 /* 2324 * The following are indexes into the sd_chain_index_map[] array. 2325 */ 2326 2327 /* un->un_buf_chain_type must be set to one of these */ 2328 #define SD_CHAIN_INFO_DISK 0 2329 #define SD_CHAIN_INFO_DISK_NO_PM 1 2330 #define SD_CHAIN_INFO_RMMEDIA 2 2331 #define SD_CHAIN_INFO_MSS_DISK 2 2332 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3 2333 #define SD_CHAIN_INFO_MSS_DSK_NO_PM 3 2334 #define SD_CHAIN_INFO_CHKSUM 4 2335 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5 2336 #define SD_CHAIN_INFO_MSS_DISK_CHKSUM 10 2337 #define SD_CHAIN_INFO_MSS_DISK_CHKSUM_NO_PM 11 2338 2339 /* un->un_uscsi_chain_type must be set to one of these */ 2340 #define SD_CHAIN_INFO_USCSI_CMD 6 2341 /* USCSI with PM disabled is the same as DIRECT */ 2342 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8 2343 #define SD_CHAIN_INFO_USCSI_CHKSUM 7 2344 2345 /* un->un_direct_chain_type must be set to one of these */ 2346 #define SD_CHAIN_INFO_DIRECT_CMD 8 2347 2348 /* un->un_priority_chain_type must be set to one of these */ 2349 #define SD_CHAIN_INFO_PRIORITY_CMD 9 2350 2351 /* size for devid inquiries */ 2352 #define MAX_INQUIRY_SIZE 0xF0 2353 2354 /* 2355 * Macros used by functions to pass a given buf(9S) struct along to the 2356 * next function in the layering chain for further processing. 2357 * 2358 * In the following macros, passing more than three arguments to the called 2359 * routines causes the optimizer for the SPARC compiler to stop doing tail 2360 * call elimination which results in significant performance degradation. 2361 */ 2362 #define SD_BEGIN_IOSTART(index, un, bp) \ 2363 ((*(sd_iostart_chain[index]))(index, un, bp)) 2364 2365 #define SD_BEGIN_IODONE(index, un, bp) \ 2366 ((*(sd_iodone_chain[index]))(index, un, bp)) 2367 2368 #define SD_NEXT_IOSTART(index, un, bp) \ 2369 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp)) 2370 2371 #define SD_NEXT_IODONE(index, un, bp) \ 2372 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp)) 2373 2374 /* 2375 * Function: _init 2376 * 2377 * Description: This is the driver _init(9E) entry point. 2378 * 2379 * Return Code: Returns the value from mod_install(9F) or 2380 * ddi_soft_state_init(9F) as appropriate. 2381 * 2382 * Context: Called when driver module loaded. 2383 */ 2384 2385 int 2386 _init(void) 2387 { 2388 int err; 2389 2390 /* establish driver name from module name */ 2391 sd_label = (char *)mod_modname(&modlinkage); 2392 2393 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun), 2394 SD_MAXUNIT); 2395 if (err != 0) { 2396 return (err); 2397 } 2398 2399 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL); 2400 2401 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL); 2402 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL); 2403 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL); 2404 2405 /* 2406 * it's ok to init here even for fibre device 2407 */ 2408 sd_scsi_probe_cache_init(); 2409 2410 sd_scsi_target_lun_init(); 2411 2412 /* 2413 * Creating taskq before mod_install ensures that all callers (threads) 2414 * that enter the module after a successful mod_install encounter 2415 * a valid taskq. 2416 */ 2417 sd_taskq_create(); 2418 2419 err = mod_install(&modlinkage); 2420 if (err != 0) { 2421 /* delete taskq if install fails */ 2422 sd_taskq_delete(); 2423 2424 mutex_destroy(&sd_log_mutex); 2425 2426 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2427 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2428 cv_destroy(&sd_tr.srq_inprocess_cv); 2429 2430 sd_scsi_probe_cache_fini(); 2431 2432 sd_scsi_target_lun_fini(); 2433 2434 ddi_soft_state_fini(&sd_state); 2435 2436 return (err); 2437 } 2438 2439 return (err); 2440 } 2441 2442 2443 /* 2444 * Function: _fini 2445 * 2446 * Description: This is the driver _fini(9E) entry point. 2447 * 2448 * Return Code: Returns the value from mod_remove(9F) 2449 * 2450 * Context: Called when driver module is unloaded. 2451 */ 2452 2453 int 2454 _fini(void) 2455 { 2456 int err; 2457 2458 if ((err = mod_remove(&modlinkage)) != 0) { 2459 return (err); 2460 } 2461 2462 sd_taskq_delete(); 2463 2464 mutex_destroy(&sd_log_mutex); 2465 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2466 2467 sd_scsi_probe_cache_fini(); 2468 2469 sd_scsi_target_lun_fini(); 2470 2471 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2472 cv_destroy(&sd_tr.srq_inprocess_cv); 2473 2474 ddi_soft_state_fini(&sd_state); 2475 2476 return (err); 2477 } 2478 2479 2480 /* 2481 * Function: _info 2482 * 2483 * Description: This is the driver _info(9E) entry point. 2484 * 2485 * Arguments: modinfop - pointer to the driver modinfo structure 2486 * 2487 * Return Code: Returns the value from mod_info(9F). 2488 * 2489 * Context: Kernel thread context 2490 */ 2491 2492 int 2493 _info(struct modinfo *modinfop) 2494 { 2495 return (mod_info(&modlinkage, modinfop)); 2496 } 2497 2498 2499 /* 2500 * The following routines implement the driver message logging facility. 2501 * They provide component- and level- based debug output filtering. 2502 * Output may also be restricted to messages for a single instance by 2503 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set 2504 * to NULL, then messages for all instances are printed. 2505 * 2506 * These routines have been cloned from each other due to the language 2507 * constraints of macros and variable argument list processing. 2508 */ 2509 2510 2511 /* 2512 * Function: sd_log_err 2513 * 2514 * Description: This routine is called by the SD_ERROR macro for debug 2515 * logging of error conditions. 2516 * 2517 * Arguments: comp - driver component being logged 2518 * dev - pointer to driver info structure 2519 * fmt - error string and format to be logged 2520 */ 2521 2522 static void 2523 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...) 2524 { 2525 va_list ap; 2526 dev_info_t *dev; 2527 2528 ASSERT(un != NULL); 2529 dev = SD_DEVINFO(un); 2530 ASSERT(dev != NULL); 2531 2532 /* 2533 * Filter messages based on the global component and level masks. 2534 * Also print if un matches the value of sd_debug_un, or if 2535 * sd_debug_un is set to NULL. 2536 */ 2537 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) && 2538 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2539 mutex_enter(&sd_log_mutex); 2540 va_start(ap, fmt); 2541 (void) vsprintf(sd_log_buf, fmt, ap); 2542 va_end(ap); 2543 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2544 mutex_exit(&sd_log_mutex); 2545 } 2546 #ifdef SD_FAULT_INJECTION 2547 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2548 if (un->sd_injection_mask & comp) { 2549 mutex_enter(&sd_log_mutex); 2550 va_start(ap, fmt); 2551 (void) vsprintf(sd_log_buf, fmt, ap); 2552 va_end(ap); 2553 sd_injection_log(sd_log_buf, un); 2554 mutex_exit(&sd_log_mutex); 2555 } 2556 #endif 2557 } 2558 2559 2560 /* 2561 * Function: sd_log_info 2562 * 2563 * Description: This routine is called by the SD_INFO macro for debug 2564 * logging of general purpose informational conditions. 2565 * 2566 * Arguments: comp - driver component being logged 2567 * dev - pointer to driver info structure 2568 * fmt - info string and format to be logged 2569 */ 2570 2571 static void 2572 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...) 2573 { 2574 va_list ap; 2575 dev_info_t *dev; 2576 2577 ASSERT(un != NULL); 2578 dev = SD_DEVINFO(un); 2579 ASSERT(dev != NULL); 2580 2581 /* 2582 * Filter messages based on the global component and level masks. 2583 * Also print if un matches the value of sd_debug_un, or if 2584 * sd_debug_un is set to NULL. 2585 */ 2586 if ((sd_component_mask & component) && 2587 (sd_level_mask & SD_LOGMASK_INFO) && 2588 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2589 mutex_enter(&sd_log_mutex); 2590 va_start(ap, fmt); 2591 (void) vsprintf(sd_log_buf, fmt, ap); 2592 va_end(ap); 2593 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2594 mutex_exit(&sd_log_mutex); 2595 } 2596 #ifdef SD_FAULT_INJECTION 2597 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2598 if (un->sd_injection_mask & component) { 2599 mutex_enter(&sd_log_mutex); 2600 va_start(ap, fmt); 2601 (void) vsprintf(sd_log_buf, fmt, ap); 2602 va_end(ap); 2603 sd_injection_log(sd_log_buf, un); 2604 mutex_exit(&sd_log_mutex); 2605 } 2606 #endif 2607 } 2608 2609 2610 /* 2611 * Function: sd_log_trace 2612 * 2613 * Description: This routine is called by the SD_TRACE macro for debug 2614 * logging of trace conditions (i.e. function entry/exit). 2615 * 2616 * Arguments: comp - driver component being logged 2617 * dev - pointer to driver info structure 2618 * fmt - trace string and format to be logged 2619 */ 2620 2621 static void 2622 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...) 2623 { 2624 va_list ap; 2625 dev_info_t *dev; 2626 2627 ASSERT(un != NULL); 2628 dev = SD_DEVINFO(un); 2629 ASSERT(dev != NULL); 2630 2631 /* 2632 * Filter messages based on the global component and level masks. 2633 * Also print if un matches the value of sd_debug_un, or if 2634 * sd_debug_un is set to NULL. 2635 */ 2636 if ((sd_component_mask & component) && 2637 (sd_level_mask & SD_LOGMASK_TRACE) && 2638 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2639 mutex_enter(&sd_log_mutex); 2640 va_start(ap, fmt); 2641 (void) vsprintf(sd_log_buf, fmt, ap); 2642 va_end(ap); 2643 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2644 mutex_exit(&sd_log_mutex); 2645 } 2646 #ifdef SD_FAULT_INJECTION 2647 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2648 if (un->sd_injection_mask & component) { 2649 mutex_enter(&sd_log_mutex); 2650 va_start(ap, fmt); 2651 (void) vsprintf(sd_log_buf, fmt, ap); 2652 va_end(ap); 2653 sd_injection_log(sd_log_buf, un); 2654 mutex_exit(&sd_log_mutex); 2655 } 2656 #endif 2657 } 2658 2659 2660 /* 2661 * Function: sdprobe 2662 * 2663 * Description: This is the driver probe(9e) entry point function. 2664 * 2665 * Arguments: devi - opaque device info handle 2666 * 2667 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful. 2668 * DDI_PROBE_FAILURE: If the probe failed. 2669 * DDI_PROBE_PARTIAL: If the instance is not present now, 2670 * but may be present in the future. 2671 */ 2672 2673 static int 2674 sdprobe(dev_info_t *devi) 2675 { 2676 struct scsi_device *devp; 2677 int rval; 2678 int instance = ddi_get_instance(devi); 2679 2680 /* 2681 * if it wasn't for pln, sdprobe could actually be nulldev 2682 * in the "__fibre" case. 2683 */ 2684 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) { 2685 return (DDI_PROBE_DONTCARE); 2686 } 2687 2688 devp = ddi_get_driver_private(devi); 2689 2690 if (devp == NULL) { 2691 /* Ooops... nexus driver is mis-configured... */ 2692 return (DDI_PROBE_FAILURE); 2693 } 2694 2695 if (ddi_get_soft_state(sd_state, instance) != NULL) { 2696 return (DDI_PROBE_PARTIAL); 2697 } 2698 2699 /* 2700 * Call the SCSA utility probe routine to see if we actually 2701 * have a target at this SCSI nexus. 2702 */ 2703 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) { 2704 case SCSIPROBE_EXISTS: 2705 switch (devp->sd_inq->inq_dtype) { 2706 case DTYPE_DIRECT: 2707 rval = DDI_PROBE_SUCCESS; 2708 break; 2709 case DTYPE_RODIRECT: 2710 /* CDs etc. Can be removable media */ 2711 rval = DDI_PROBE_SUCCESS; 2712 break; 2713 case DTYPE_OPTICAL: 2714 /* 2715 * Rewritable optical driver HP115AA 2716 * Can also be removable media 2717 */ 2718 2719 /* 2720 * Do not attempt to bind to DTYPE_OPTICAL if 2721 * pre solaris 9 sparc sd behavior is required 2722 * 2723 * If first time through and sd_dtype_optical_bind 2724 * has not been set in /etc/system check properties 2725 */ 2726 2727 if (sd_dtype_optical_bind < 0) { 2728 sd_dtype_optical_bind = ddi_prop_get_int 2729 (DDI_DEV_T_ANY, devi, 0, 2730 "optical-device-bind", 1); 2731 } 2732 2733 if (sd_dtype_optical_bind == 0) { 2734 rval = DDI_PROBE_FAILURE; 2735 } else { 2736 rval = DDI_PROBE_SUCCESS; 2737 } 2738 break; 2739 2740 case DTYPE_NOTPRESENT: 2741 default: 2742 rval = DDI_PROBE_FAILURE; 2743 break; 2744 } 2745 break; 2746 default: 2747 rval = DDI_PROBE_PARTIAL; 2748 break; 2749 } 2750 2751 /* 2752 * This routine checks for resource allocation prior to freeing, 2753 * so it will take care of the "smart probing" case where a 2754 * scsi_probe() may or may not have been issued and will *not* 2755 * free previously-freed resources. 2756 */ 2757 scsi_unprobe(devp); 2758 return (rval); 2759 } 2760 2761 2762 /* 2763 * Function: sdinfo 2764 * 2765 * Description: This is the driver getinfo(9e) entry point function. 2766 * Given the device number, return the devinfo pointer from 2767 * the scsi_device structure or the instance number 2768 * associated with the dev_t. 2769 * 2770 * Arguments: dip - pointer to device info structure 2771 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO, 2772 * DDI_INFO_DEVT2INSTANCE) 2773 * arg - driver dev_t 2774 * resultp - user buffer for request response 2775 * 2776 * Return Code: DDI_SUCCESS 2777 * DDI_FAILURE 2778 */ 2779 /* ARGSUSED */ 2780 static int 2781 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2782 { 2783 struct sd_lun *un; 2784 dev_t dev; 2785 int instance; 2786 int error; 2787 2788 switch (infocmd) { 2789 case DDI_INFO_DEVT2DEVINFO: 2790 dev = (dev_t)arg; 2791 instance = SDUNIT(dev); 2792 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 2793 return (DDI_FAILURE); 2794 } 2795 *result = (void *) SD_DEVINFO(un); 2796 error = DDI_SUCCESS; 2797 break; 2798 case DDI_INFO_DEVT2INSTANCE: 2799 dev = (dev_t)arg; 2800 instance = SDUNIT(dev); 2801 *result = (void *)(uintptr_t)instance; 2802 error = DDI_SUCCESS; 2803 break; 2804 default: 2805 error = DDI_FAILURE; 2806 } 2807 return (error); 2808 } 2809 2810 /* 2811 * Function: sd_prop_op 2812 * 2813 * Description: This is the driver prop_op(9e) entry point function. 2814 * Return the number of blocks for the partition in question 2815 * or forward the request to the property facilities. 2816 * 2817 * Arguments: dev - device number 2818 * dip - pointer to device info structure 2819 * prop_op - property operator 2820 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent 2821 * name - pointer to property name 2822 * valuep - pointer or address of the user buffer 2823 * lengthp - property length 2824 * 2825 * Return Code: DDI_PROP_SUCCESS 2826 * DDI_PROP_NOT_FOUND 2827 * DDI_PROP_UNDEFINED 2828 * DDI_PROP_NO_MEMORY 2829 * DDI_PROP_BUF_TOO_SMALL 2830 */ 2831 2832 static int 2833 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 2834 char *name, caddr_t valuep, int *lengthp) 2835 { 2836 struct sd_lun *un; 2837 2838 if ((un = ddi_get_soft_state(sd_state, ddi_get_instance(dip))) == NULL) 2839 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 2840 name, valuep, lengthp)); 2841 2842 return (cmlb_prop_op(un->un_cmlbhandle, 2843 dev, dip, prop_op, mod_flags, name, valuep, lengthp, 2844 SDPART(dev), (void *)SD_PATH_DIRECT)); 2845 } 2846 2847 /* 2848 * The following functions are for smart probing: 2849 * sd_scsi_probe_cache_init() 2850 * sd_scsi_probe_cache_fini() 2851 * sd_scsi_clear_probe_cache() 2852 * sd_scsi_probe_with_cache() 2853 */ 2854 2855 /* 2856 * Function: sd_scsi_probe_cache_init 2857 * 2858 * Description: Initializes the probe response cache mutex and head pointer. 2859 * 2860 * Context: Kernel thread context 2861 */ 2862 2863 static void 2864 sd_scsi_probe_cache_init(void) 2865 { 2866 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL); 2867 sd_scsi_probe_cache_head = NULL; 2868 } 2869 2870 2871 /* 2872 * Function: sd_scsi_probe_cache_fini 2873 * 2874 * Description: Frees all resources associated with the probe response cache. 2875 * 2876 * Context: Kernel thread context 2877 */ 2878 2879 static void 2880 sd_scsi_probe_cache_fini(void) 2881 { 2882 struct sd_scsi_probe_cache *cp; 2883 struct sd_scsi_probe_cache *ncp; 2884 2885 /* Clean up our smart probing linked list */ 2886 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) { 2887 ncp = cp->next; 2888 kmem_free(cp, sizeof (struct sd_scsi_probe_cache)); 2889 } 2890 sd_scsi_probe_cache_head = NULL; 2891 mutex_destroy(&sd_scsi_probe_cache_mutex); 2892 } 2893 2894 2895 /* 2896 * Function: sd_scsi_clear_probe_cache 2897 * 2898 * Description: This routine clears the probe response cache. This is 2899 * done when open() returns ENXIO so that when deferred 2900 * attach is attempted (possibly after a device has been 2901 * turned on) we will retry the probe. Since we don't know 2902 * which target we failed to open, we just clear the 2903 * entire cache. 2904 * 2905 * Context: Kernel thread context 2906 */ 2907 2908 static void 2909 sd_scsi_clear_probe_cache(void) 2910 { 2911 struct sd_scsi_probe_cache *cp; 2912 int i; 2913 2914 mutex_enter(&sd_scsi_probe_cache_mutex); 2915 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2916 /* 2917 * Reset all entries to SCSIPROBE_EXISTS. This will 2918 * force probing to be performed the next time 2919 * sd_scsi_probe_with_cache is called. 2920 */ 2921 for (i = 0; i < NTARGETS_WIDE; i++) { 2922 cp->cache[i] = SCSIPROBE_EXISTS; 2923 } 2924 } 2925 mutex_exit(&sd_scsi_probe_cache_mutex); 2926 } 2927 2928 2929 /* 2930 * Function: sd_scsi_probe_with_cache 2931 * 2932 * Description: This routine implements support for a scsi device probe 2933 * with cache. The driver maintains a cache of the target 2934 * responses to scsi probes. If we get no response from a 2935 * target during a probe inquiry, we remember that, and we 2936 * avoid additional calls to scsi_probe on non-zero LUNs 2937 * on the same target until the cache is cleared. By doing 2938 * so we avoid the 1/4 sec selection timeout for nonzero 2939 * LUNs. lun0 of a target is always probed. 2940 * 2941 * Arguments: devp - Pointer to a scsi_device(9S) structure 2942 * waitfunc - indicates what the allocator routines should 2943 * do when resources are not available. This value 2944 * is passed on to scsi_probe() when that routine 2945 * is called. 2946 * 2947 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache; 2948 * otherwise the value returned by scsi_probe(9F). 2949 * 2950 * Context: Kernel thread context 2951 */ 2952 2953 static int 2954 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)()) 2955 { 2956 struct sd_scsi_probe_cache *cp; 2957 dev_info_t *pdip = ddi_get_parent(devp->sd_dev); 2958 int lun, tgt; 2959 2960 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2961 SCSI_ADDR_PROP_LUN, 0); 2962 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2963 SCSI_ADDR_PROP_TARGET, -1); 2964 2965 /* Make sure caching enabled and target in range */ 2966 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) { 2967 /* do it the old way (no cache) */ 2968 return (scsi_probe(devp, waitfn)); 2969 } 2970 2971 mutex_enter(&sd_scsi_probe_cache_mutex); 2972 2973 /* Find the cache for this scsi bus instance */ 2974 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2975 if (cp->pdip == pdip) { 2976 break; 2977 } 2978 } 2979 2980 /* If we can't find a cache for this pdip, create one */ 2981 if (cp == NULL) { 2982 int i; 2983 2984 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache), 2985 KM_SLEEP); 2986 cp->pdip = pdip; 2987 cp->next = sd_scsi_probe_cache_head; 2988 sd_scsi_probe_cache_head = cp; 2989 for (i = 0; i < NTARGETS_WIDE; i++) { 2990 cp->cache[i] = SCSIPROBE_EXISTS; 2991 } 2992 } 2993 2994 mutex_exit(&sd_scsi_probe_cache_mutex); 2995 2996 /* Recompute the cache for this target if LUN zero */ 2997 if (lun == 0) { 2998 cp->cache[tgt] = SCSIPROBE_EXISTS; 2999 } 3000 3001 /* Don't probe if cache remembers a NORESP from a previous LUN. */ 3002 if (cp->cache[tgt] != SCSIPROBE_EXISTS) { 3003 return (SCSIPROBE_NORESP); 3004 } 3005 3006 /* Do the actual probe; save & return the result */ 3007 return (cp->cache[tgt] = scsi_probe(devp, waitfn)); 3008 } 3009 3010 3011 /* 3012 * Function: sd_scsi_target_lun_init 3013 * 3014 * Description: Initializes the attached lun chain mutex and head pointer. 3015 * 3016 * Context: Kernel thread context 3017 */ 3018 3019 static void 3020 sd_scsi_target_lun_init(void) 3021 { 3022 mutex_init(&sd_scsi_target_lun_mutex, NULL, MUTEX_DRIVER, NULL); 3023 sd_scsi_target_lun_head = NULL; 3024 } 3025 3026 3027 /* 3028 * Function: sd_scsi_target_lun_fini 3029 * 3030 * Description: Frees all resources associated with the attached lun 3031 * chain 3032 * 3033 * Context: Kernel thread context 3034 */ 3035 3036 static void 3037 sd_scsi_target_lun_fini(void) 3038 { 3039 struct sd_scsi_hba_tgt_lun *cp; 3040 struct sd_scsi_hba_tgt_lun *ncp; 3041 3042 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = ncp) { 3043 ncp = cp->next; 3044 kmem_free(cp, sizeof (struct sd_scsi_hba_tgt_lun)); 3045 } 3046 sd_scsi_target_lun_head = NULL; 3047 mutex_destroy(&sd_scsi_target_lun_mutex); 3048 } 3049 3050 3051 /* 3052 * Function: sd_scsi_get_target_lun_count 3053 * 3054 * Description: This routine will check in the attached lun chain to see 3055 * how many luns are attached on the required SCSI controller 3056 * and target. Currently, some capabilities like tagged queue 3057 * are supported per target based by HBA. So all luns in a 3058 * target have the same capabilities. Based on this assumption, 3059 * sd should only set these capabilities once per target. This 3060 * function is called when sd needs to decide how many luns 3061 * already attached on a target. 3062 * 3063 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 3064 * controller device. 3065 * target - The target ID on the controller's SCSI bus. 3066 * 3067 * Return Code: The number of luns attached on the required target and 3068 * controller. 3069 * -1 if target ID is not in parallel SCSI scope or the given 3070 * dip is not in the chain. 3071 * 3072 * Context: Kernel thread context 3073 */ 3074 3075 static int 3076 sd_scsi_get_target_lun_count(dev_info_t *dip, int target) 3077 { 3078 struct sd_scsi_hba_tgt_lun *cp; 3079 3080 if ((target < 0) || (target >= NTARGETS_WIDE)) { 3081 return (-1); 3082 } 3083 3084 mutex_enter(&sd_scsi_target_lun_mutex); 3085 3086 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 3087 if (cp->pdip == dip) { 3088 break; 3089 } 3090 } 3091 3092 mutex_exit(&sd_scsi_target_lun_mutex); 3093 3094 if (cp == NULL) { 3095 return (-1); 3096 } 3097 3098 return (cp->nlun[target]); 3099 } 3100 3101 3102 /* 3103 * Function: sd_scsi_update_lun_on_target 3104 * 3105 * Description: This routine is used to update the attached lun chain when a 3106 * lun is attached or detached on a target. 3107 * 3108 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 3109 * controller device. 3110 * target - The target ID on the controller's SCSI bus. 3111 * flag - Indicate the lun is attached or detached. 3112 * 3113 * Context: Kernel thread context 3114 */ 3115 3116 static void 3117 sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag) 3118 { 3119 struct sd_scsi_hba_tgt_lun *cp; 3120 3121 mutex_enter(&sd_scsi_target_lun_mutex); 3122 3123 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 3124 if (cp->pdip == dip) { 3125 break; 3126 } 3127 } 3128 3129 if ((cp == NULL) && (flag == SD_SCSI_LUN_ATTACH)) { 3130 cp = kmem_zalloc(sizeof (struct sd_scsi_hba_tgt_lun), 3131 KM_SLEEP); 3132 cp->pdip = dip; 3133 cp->next = sd_scsi_target_lun_head; 3134 sd_scsi_target_lun_head = cp; 3135 } 3136 3137 mutex_exit(&sd_scsi_target_lun_mutex); 3138 3139 if (cp != NULL) { 3140 if (flag == SD_SCSI_LUN_ATTACH) { 3141 cp->nlun[target] ++; 3142 } else { 3143 cp->nlun[target] --; 3144 } 3145 } 3146 } 3147 3148 3149 /* 3150 * Function: sd_spin_up_unit 3151 * 3152 * Description: Issues the following commands to spin-up the device: 3153 * START STOP UNIT, and INQUIRY. 3154 * 3155 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3156 * structure for this target. 3157 * 3158 * Return Code: 0 - success 3159 * EIO - failure 3160 * EACCES - reservation conflict 3161 * 3162 * Context: Kernel thread context 3163 */ 3164 3165 static int 3166 sd_spin_up_unit(sd_ssc_t *ssc) 3167 { 3168 size_t resid = 0; 3169 int has_conflict = FALSE; 3170 uchar_t *bufaddr; 3171 int status; 3172 struct sd_lun *un; 3173 3174 ASSERT(ssc != NULL); 3175 un = ssc->ssc_un; 3176 ASSERT(un != NULL); 3177 3178 /* 3179 * Send a throwaway START UNIT command. 3180 * 3181 * If we fail on this, we don't care presently what precisely 3182 * is wrong. EMC's arrays will also fail this with a check 3183 * condition (0x2/0x4/0x3) if the device is "inactive," but 3184 * we don't want to fail the attach because it may become 3185 * "active" later. 3186 * We don't know if power condition is supported or not at 3187 * this stage, use START STOP bit. 3188 */ 3189 status = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 3190 SD_TARGET_START, SD_PATH_DIRECT); 3191 3192 if (status != 0) { 3193 if (status == EACCES) 3194 has_conflict = TRUE; 3195 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3196 } 3197 3198 /* 3199 * Send another INQUIRY command to the target. This is necessary for 3200 * non-removable media direct access devices because their INQUIRY data 3201 * may not be fully qualified until they are spun up (perhaps via the 3202 * START command above). Note: This seems to be needed for some 3203 * legacy devices only.) The INQUIRY command should succeed even if a 3204 * Reservation Conflict is present. 3205 */ 3206 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP); 3207 3208 if (sd_send_scsi_INQUIRY(ssc, bufaddr, SUN_INQSIZE, 0, 0, &resid) 3209 != 0) { 3210 kmem_free(bufaddr, SUN_INQSIZE); 3211 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 3212 return (EIO); 3213 } 3214 3215 /* 3216 * If we got enough INQUIRY data, copy it over the old INQUIRY data. 3217 * Note that this routine does not return a failure here even if the 3218 * INQUIRY command did not return any data. This is a legacy behavior. 3219 */ 3220 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) { 3221 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE); 3222 } 3223 3224 kmem_free(bufaddr, SUN_INQSIZE); 3225 3226 /* If we hit a reservation conflict above, tell the caller. */ 3227 if (has_conflict == TRUE) { 3228 return (EACCES); 3229 } 3230 3231 return (0); 3232 } 3233 3234 #ifdef _LP64 3235 /* 3236 * Function: sd_enable_descr_sense 3237 * 3238 * Description: This routine attempts to select descriptor sense format 3239 * using the Control mode page. Devices that support 64 bit 3240 * LBAs (for >2TB luns) should also implement descriptor 3241 * sense data so we will call this function whenever we see 3242 * a lun larger than 2TB. If for some reason the device 3243 * supports 64 bit LBAs but doesn't support descriptor sense 3244 * presumably the mode select will fail. Everything will 3245 * continue to work normally except that we will not get 3246 * complete sense data for commands that fail with an LBA 3247 * larger than 32 bits. 3248 * 3249 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3250 * structure for this target. 3251 * 3252 * Context: Kernel thread context only 3253 */ 3254 3255 static void 3256 sd_enable_descr_sense(sd_ssc_t *ssc) 3257 { 3258 uchar_t *header; 3259 struct mode_control_scsi3 *ctrl_bufp; 3260 size_t buflen; 3261 size_t bd_len; 3262 int status; 3263 struct sd_lun *un; 3264 3265 ASSERT(ssc != NULL); 3266 un = ssc->ssc_un; 3267 ASSERT(un != NULL); 3268 3269 /* 3270 * Read MODE SENSE page 0xA, Control Mode Page 3271 */ 3272 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH + 3273 sizeof (struct mode_control_scsi3); 3274 header = kmem_zalloc(buflen, KM_SLEEP); 3275 3276 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 3277 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT); 3278 3279 if (status != 0) { 3280 SD_ERROR(SD_LOG_COMMON, un, 3281 "sd_enable_descr_sense: mode sense ctrl page failed\n"); 3282 goto eds_exit; 3283 } 3284 3285 /* 3286 * Determine size of Block Descriptors in order to locate 3287 * the mode page data. ATAPI devices return 0, SCSI devices 3288 * should return MODE_BLK_DESC_LENGTH. 3289 */ 3290 bd_len = ((struct mode_header *)header)->bdesc_length; 3291 3292 /* Clear the mode data length field for MODE SELECT */ 3293 ((struct mode_header *)header)->length = 0; 3294 3295 ctrl_bufp = (struct mode_control_scsi3 *) 3296 (header + MODE_HEADER_LENGTH + bd_len); 3297 3298 /* 3299 * If the page length is smaller than the expected value, 3300 * the target device doesn't support D_SENSE. Bail out here. 3301 */ 3302 if (ctrl_bufp->mode_page.length < 3303 sizeof (struct mode_control_scsi3) - 2) { 3304 SD_ERROR(SD_LOG_COMMON, un, 3305 "sd_enable_descr_sense: enable D_SENSE failed\n"); 3306 goto eds_exit; 3307 } 3308 3309 /* 3310 * Clear PS bit for MODE SELECT 3311 */ 3312 ctrl_bufp->mode_page.ps = 0; 3313 3314 /* 3315 * Set D_SENSE to enable descriptor sense format. 3316 */ 3317 ctrl_bufp->d_sense = 1; 3318 3319 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3320 3321 /* 3322 * Use MODE SELECT to commit the change to the D_SENSE bit 3323 */ 3324 status = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, header, 3325 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT); 3326 3327 if (status != 0) { 3328 SD_INFO(SD_LOG_COMMON, un, 3329 "sd_enable_descr_sense: mode select ctrl page failed\n"); 3330 } else { 3331 kmem_free(header, buflen); 3332 return; 3333 } 3334 3335 eds_exit: 3336 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3337 kmem_free(header, buflen); 3338 } 3339 3340 /* 3341 * Function: sd_reenable_dsense_task 3342 * 3343 * Description: Re-enable descriptor sense after device or bus reset 3344 * 3345 * Context: Executes in a taskq() thread context 3346 */ 3347 static void 3348 sd_reenable_dsense_task(void *arg) 3349 { 3350 struct sd_lun *un = arg; 3351 sd_ssc_t *ssc; 3352 3353 ASSERT(un != NULL); 3354 3355 ssc = sd_ssc_init(un); 3356 sd_enable_descr_sense(ssc); 3357 sd_ssc_fini(ssc); 3358 } 3359 #endif /* _LP64 */ 3360 3361 /* 3362 * Function: sd_set_mmc_caps 3363 * 3364 * Description: This routine determines if the device is MMC compliant and if 3365 * the device supports CDDA via a mode sense of the CDVD 3366 * capabilities mode page. Also checks if the device is a 3367 * dvdram writable device. 3368 * 3369 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3370 * structure for this target. 3371 * 3372 * Context: Kernel thread context only 3373 */ 3374 3375 static void 3376 sd_set_mmc_caps(sd_ssc_t *ssc) 3377 { 3378 struct mode_header_grp2 *sense_mhp; 3379 uchar_t *sense_page; 3380 caddr_t buf; 3381 int bd_len; 3382 int status; 3383 struct uscsi_cmd com; 3384 int rtn; 3385 uchar_t *out_data_rw, *out_data_hd; 3386 uchar_t *rqbuf_rw, *rqbuf_hd; 3387 uchar_t *out_data_gesn; 3388 int gesn_len; 3389 struct sd_lun *un; 3390 3391 ASSERT(ssc != NULL); 3392 un = ssc->ssc_un; 3393 ASSERT(un != NULL); 3394 3395 /* 3396 * The flags which will be set in this function are - mmc compliant, 3397 * dvdram writable device, cdda support. Initialize them to FALSE 3398 * and if a capability is detected - it will be set to TRUE. 3399 */ 3400 un->un_f_mmc_cap = FALSE; 3401 un->un_f_dvdram_writable_device = FALSE; 3402 un->un_f_cfg_cdda = FALSE; 3403 3404 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3405 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf, 3406 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 3407 3408 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3409 3410 if (status != 0) { 3411 /* command failed; just return */ 3412 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3413 return; 3414 } 3415 /* 3416 * If the mode sense request for the CDROM CAPABILITIES 3417 * page (0x2A) succeeds the device is assumed to be MMC. 3418 */ 3419 un->un_f_mmc_cap = TRUE; 3420 3421 /* See if GET STATUS EVENT NOTIFICATION is supported */ 3422 if (un->un_f_mmc_gesn_polling) { 3423 gesn_len = SD_GESN_HEADER_LEN + SD_GESN_MEDIA_DATA_LEN; 3424 out_data_gesn = kmem_zalloc(gesn_len, KM_SLEEP); 3425 3426 rtn = sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION(ssc, 3427 out_data_gesn, gesn_len, 1 << SD_GESN_MEDIA_CLASS); 3428 3429 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3430 3431 if ((rtn != 0) || !sd_gesn_media_data_valid(out_data_gesn)) { 3432 un->un_f_mmc_gesn_polling = FALSE; 3433 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3434 "sd_set_mmc_caps: gesn not supported " 3435 "%d %x %x %x %x\n", rtn, 3436 out_data_gesn[0], out_data_gesn[1], 3437 out_data_gesn[2], out_data_gesn[3]); 3438 } 3439 3440 kmem_free(out_data_gesn, gesn_len); 3441 } 3442 3443 /* Get to the page data */ 3444 sense_mhp = (struct mode_header_grp2 *)buf; 3445 bd_len = (sense_mhp->bdesc_length_hi << 8) | 3446 sense_mhp->bdesc_length_lo; 3447 if (bd_len > MODE_BLK_DESC_LENGTH) { 3448 /* 3449 * We did not get back the expected block descriptor 3450 * length so we cannot determine if the device supports 3451 * CDDA. However, we still indicate the device is MMC 3452 * according to the successful response to the page 3453 * 0x2A mode sense request. 3454 */ 3455 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3456 "sd_set_mmc_caps: Mode Sense returned " 3457 "invalid block descriptor length\n"); 3458 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3459 return; 3460 } 3461 3462 /* See if read CDDA is supported */ 3463 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + 3464 bd_len); 3465 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE; 3466 3467 /* See if writing DVD RAM is supported. */ 3468 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE; 3469 if (un->un_f_dvdram_writable_device == TRUE) { 3470 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3471 return; 3472 } 3473 3474 /* 3475 * If the device presents DVD or CD capabilities in the mode 3476 * page, we can return here since a RRD will not have 3477 * these capabilities. 3478 */ 3479 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3480 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3481 return; 3482 } 3483 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3484 3485 /* 3486 * If un->un_f_dvdram_writable_device is still FALSE, 3487 * check for a Removable Rigid Disk (RRD). A RRD 3488 * device is identified by the features RANDOM_WRITABLE and 3489 * HARDWARE_DEFECT_MANAGEMENT. 3490 */ 3491 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3492 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3493 3494 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw, 3495 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3496 RANDOM_WRITABLE, SD_PATH_STANDARD); 3497 3498 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3499 3500 if (rtn != 0) { 3501 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3502 kmem_free(rqbuf_rw, SENSE_LENGTH); 3503 return; 3504 } 3505 3506 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3507 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3508 3509 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd, 3510 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3511 HARDWARE_DEFECT_MANAGEMENT, SD_PATH_STANDARD); 3512 3513 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3514 3515 if (rtn == 0) { 3516 /* 3517 * We have good information, check for random writable 3518 * and hardware defect features. 3519 */ 3520 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3521 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) { 3522 un->un_f_dvdram_writable_device = TRUE; 3523 } 3524 } 3525 3526 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3527 kmem_free(rqbuf_rw, SENSE_LENGTH); 3528 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3529 kmem_free(rqbuf_hd, SENSE_LENGTH); 3530 } 3531 3532 /* 3533 * Function: sd_check_for_writable_cd 3534 * 3535 * Description: This routine determines if the media in the device is 3536 * writable or not. It uses the get configuration command (0x46) 3537 * to determine if the media is writable 3538 * 3539 * Arguments: un - driver soft state (unit) structure 3540 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" 3541 * chain and the normal command waitq, or 3542 * SD_PATH_DIRECT_PRIORITY to use the USCSI 3543 * "direct" chain and bypass the normal command 3544 * waitq. 3545 * 3546 * Context: Never called at interrupt context. 3547 */ 3548 3549 static void 3550 sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag) 3551 { 3552 struct uscsi_cmd com; 3553 uchar_t *out_data; 3554 uchar_t *rqbuf; 3555 int rtn; 3556 uchar_t *out_data_rw, *out_data_hd; 3557 uchar_t *rqbuf_rw, *rqbuf_hd; 3558 struct mode_header_grp2 *sense_mhp; 3559 uchar_t *sense_page; 3560 caddr_t buf; 3561 int bd_len; 3562 int status; 3563 struct sd_lun *un; 3564 3565 ASSERT(ssc != NULL); 3566 un = ssc->ssc_un; 3567 ASSERT(un != NULL); 3568 ASSERT(mutex_owned(SD_MUTEX(un))); 3569 3570 /* 3571 * Initialize the writable media to false, if configuration info. 3572 * tells us otherwise then only we will set it. 3573 */ 3574 un->un_f_mmc_writable_media = FALSE; 3575 mutex_exit(SD_MUTEX(un)); 3576 3577 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 3578 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3579 3580 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, SENSE_LENGTH, 3581 out_data, SD_PROFILE_HEADER_LEN, path_flag); 3582 3583 if (rtn != 0) 3584 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3585 3586 mutex_enter(SD_MUTEX(un)); 3587 if (rtn == 0) { 3588 /* 3589 * We have good information, check for writable DVD. 3590 */ 3591 if ((out_data[6] == 0) && (out_data[7] == 0x12)) { 3592 un->un_f_mmc_writable_media = TRUE; 3593 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3594 kmem_free(rqbuf, SENSE_LENGTH); 3595 return; 3596 } 3597 } 3598 3599 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3600 kmem_free(rqbuf, SENSE_LENGTH); 3601 3602 /* 3603 * Determine if this is a RRD type device. 3604 */ 3605 mutex_exit(SD_MUTEX(un)); 3606 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3607 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf, 3608 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, path_flag); 3609 3610 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3611 3612 mutex_enter(SD_MUTEX(un)); 3613 if (status != 0) { 3614 /* command failed; just return */ 3615 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3616 return; 3617 } 3618 3619 /* Get to the page data */ 3620 sense_mhp = (struct mode_header_grp2 *)buf; 3621 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 3622 if (bd_len > MODE_BLK_DESC_LENGTH) { 3623 /* 3624 * We did not get back the expected block descriptor length so 3625 * we cannot check the mode page. 3626 */ 3627 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3628 "sd_check_for_writable_cd: Mode Sense returned " 3629 "invalid block descriptor length\n"); 3630 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3631 return; 3632 } 3633 3634 /* 3635 * If the device presents DVD or CD capabilities in the mode 3636 * page, we can return here since a RRD device will not have 3637 * these capabilities. 3638 */ 3639 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len); 3640 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3641 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3642 return; 3643 } 3644 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3645 3646 /* 3647 * If un->un_f_mmc_writable_media is still FALSE, 3648 * check for RRD type media. A RRD device is identified 3649 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT. 3650 */ 3651 mutex_exit(SD_MUTEX(un)); 3652 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3653 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3654 3655 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw, 3656 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3657 RANDOM_WRITABLE, path_flag); 3658 3659 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3660 if (rtn != 0) { 3661 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3662 kmem_free(rqbuf_rw, SENSE_LENGTH); 3663 mutex_enter(SD_MUTEX(un)); 3664 return; 3665 } 3666 3667 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3668 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3669 3670 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd, 3671 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3672 HARDWARE_DEFECT_MANAGEMENT, path_flag); 3673 3674 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3675 mutex_enter(SD_MUTEX(un)); 3676 if (rtn == 0) { 3677 /* 3678 * We have good information, check for random writable 3679 * and hardware defect features as current. 3680 */ 3681 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3682 (out_data_rw[10] & 0x1) && 3683 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) && 3684 (out_data_hd[10] & 0x1)) { 3685 un->un_f_mmc_writable_media = TRUE; 3686 } 3687 } 3688 3689 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3690 kmem_free(rqbuf_rw, SENSE_LENGTH); 3691 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3692 kmem_free(rqbuf_hd, SENSE_LENGTH); 3693 } 3694 3695 /* 3696 * Function: sd_read_unit_properties 3697 * 3698 * Description: The following implements a property lookup mechanism. 3699 * Properties for particular disks (keyed on vendor, model 3700 * and rev numbers) are sought in the sd.conf file via 3701 * sd_process_sdconf_file(), and if not found there, are 3702 * looked for in a list hardcoded in this driver via 3703 * sd_process_sdconf_table() Once located the properties 3704 * are used to update the driver unit structure. 3705 * 3706 * Arguments: un - driver soft state (unit) structure 3707 */ 3708 3709 static void 3710 sd_read_unit_properties(struct sd_lun *un) 3711 { 3712 /* 3713 * sd_process_sdconf_file returns SD_FAILURE if it cannot find 3714 * the "sd-config-list" property (from the sd.conf file) or if 3715 * there was not a match for the inquiry vid/pid. If this event 3716 * occurs the static driver configuration table is searched for 3717 * a match. 3718 */ 3719 ASSERT(un != NULL); 3720 if (sd_process_sdconf_file(un) == SD_FAILURE) { 3721 sd_process_sdconf_table(un); 3722 } 3723 3724 /* check for LSI device */ 3725 sd_is_lsi(un); 3726 3727 3728 } 3729 3730 3731 /* 3732 * Function: sd_process_sdconf_file 3733 * 3734 * Description: Use ddi_prop_lookup(9F) to obtain the properties from the 3735 * driver's config file (ie, sd.conf) and update the driver 3736 * soft state structure accordingly. 3737 * 3738 * Arguments: un - driver soft state (unit) structure 3739 * 3740 * Return Code: SD_SUCCESS - The properties were successfully set according 3741 * to the driver configuration file. 3742 * SD_FAILURE - The driver config list was not obtained or 3743 * there was no vid/pid match. This indicates that 3744 * the static config table should be used. 3745 * 3746 * The config file has a property, "sd-config-list". Currently we support 3747 * two kinds of formats. For both formats, the value of this property 3748 * is a list of duplets: 3749 * 3750 * sd-config-list= 3751 * <duplet>, 3752 * [,<duplet>]*; 3753 * 3754 * For the improved format, where 3755 * 3756 * <duplet>:= "<vid+pid>","<tunable-list>" 3757 * 3758 * and 3759 * 3760 * <tunable-list>:= <tunable> [, <tunable> ]*; 3761 * <tunable> = <name> : <value> 3762 * 3763 * The <vid+pid> is the string that is returned by the target device on a 3764 * SCSI inquiry command, the <tunable-list> contains one or more tunables 3765 * to apply to all target devices with the specified <vid+pid>. 3766 * 3767 * Each <tunable> is a "<name> : <value>" pair. 3768 * 3769 * For the old format, the structure of each duplet is as follows: 3770 * 3771 * <duplet>:= "<vid+pid>","<data-property-name_list>" 3772 * 3773 * The first entry of the duplet is the device ID string (the concatenated 3774 * vid & pid; not to be confused with a device_id). This is defined in 3775 * the same way as in the sd_disk_table. 3776 * 3777 * The second part of the duplet is a string that identifies a 3778 * data-property-name-list. The data-property-name-list is defined as 3779 * follows: 3780 * 3781 * <data-property-name-list>:=<data-property-name> [<data-property-name>] 3782 * 3783 * The syntax of <data-property-name> depends on the <version> field. 3784 * 3785 * If version = SD_CONF_VERSION_1 we have the following syntax: 3786 * 3787 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3788 * 3789 * where the prop0 value will be used to set prop0 if bit0 set in the 3790 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1 3791 * 3792 */ 3793 3794 static int 3795 sd_process_sdconf_file(struct sd_lun *un) 3796 { 3797 char **config_list = NULL; 3798 uint_t nelements; 3799 char *vidptr; 3800 int vidlen; 3801 char *dnlist_ptr; 3802 char *dataname_ptr; 3803 char *dataname_lasts; 3804 int *data_list = NULL; 3805 uint_t data_list_len; 3806 int rval = SD_FAILURE; 3807 int i; 3808 3809 ASSERT(un != NULL); 3810 3811 /* Obtain the configuration list associated with the .conf file */ 3812 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, SD_DEVINFO(un), 3813 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, sd_config_list, 3814 &config_list, &nelements) != DDI_PROP_SUCCESS) { 3815 return (SD_FAILURE); 3816 } 3817 3818 /* 3819 * Compare vids in each duplet to the inquiry vid - if a match is 3820 * made, get the data value and update the soft state structure 3821 * accordingly. 3822 * 3823 * Each duplet should show as a pair of strings, return SD_FAILURE 3824 * otherwise. 3825 */ 3826 if (nelements & 1) { 3827 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3828 "sd-config-list should show as pairs of strings.\n"); 3829 if (config_list) 3830 ddi_prop_free(config_list); 3831 return (SD_FAILURE); 3832 } 3833 3834 for (i = 0; i < nelements; i += 2) { 3835 /* 3836 * Note: The assumption here is that each vid entry is on 3837 * a unique line from its associated duplet. 3838 */ 3839 vidptr = config_list[i]; 3840 vidlen = (int)strlen(vidptr); 3841 if (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS) { 3842 continue; 3843 } 3844 3845 /* 3846 * dnlist contains 1 or more blank separated 3847 * data-property-name entries 3848 */ 3849 dnlist_ptr = config_list[i + 1]; 3850 3851 if (strchr(dnlist_ptr, ':') != NULL) { 3852 /* 3853 * Decode the improved format sd-config-list. 3854 */ 3855 sd_nvpair_str_decode(un, dnlist_ptr); 3856 } else { 3857 /* 3858 * The old format sd-config-list, loop through all 3859 * data-property-name entries in the 3860 * data-property-name-list 3861 * setting the properties for each. 3862 */ 3863 for (dataname_ptr = strtok_r(dnlist_ptr, " \t", 3864 &dataname_lasts); dataname_ptr != NULL; 3865 dataname_ptr = strtok_r(NULL, " \t", 3866 &dataname_lasts)) { 3867 int version; 3868 3869 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3870 "sd_process_sdconf_file: disk:%s, " 3871 "data:%s\n", vidptr, dataname_ptr); 3872 3873 /* Get the data list */ 3874 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 3875 SD_DEVINFO(un), 0, dataname_ptr, &data_list, 3876 &data_list_len) != DDI_PROP_SUCCESS) { 3877 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3878 "sd_process_sdconf_file: data " 3879 "property (%s) has no value\n", 3880 dataname_ptr); 3881 continue; 3882 } 3883 3884 version = data_list[0]; 3885 3886 if (version == SD_CONF_VERSION_1) { 3887 sd_tunables values; 3888 3889 /* Set the properties */ 3890 if (sd_chk_vers1_data(un, data_list[1], 3891 &data_list[2], data_list_len, 3892 dataname_ptr) == SD_SUCCESS) { 3893 sd_get_tunables_from_conf(un, 3894 data_list[1], &data_list[2], 3895 &values); 3896 sd_set_vers1_properties(un, 3897 data_list[1], &values); 3898 rval = SD_SUCCESS; 3899 } else { 3900 rval = SD_FAILURE; 3901 } 3902 } else { 3903 scsi_log(SD_DEVINFO(un), sd_label, 3904 CE_WARN, "data property %s version " 3905 "0x%x is invalid.", 3906 dataname_ptr, version); 3907 rval = SD_FAILURE; 3908 } 3909 if (data_list) 3910 ddi_prop_free(data_list); 3911 } 3912 } 3913 } 3914 3915 /* free up the memory allocated by ddi_prop_lookup_string_array(). */ 3916 if (config_list) { 3917 ddi_prop_free(config_list); 3918 } 3919 3920 return (rval); 3921 } 3922 3923 /* 3924 * Function: sd_nvpair_str_decode() 3925 * 3926 * Description: Parse the improved format sd-config-list to get 3927 * each entry of tunable, which includes a name-value pair. 3928 * Then call sd_set_properties() to set the property. 3929 * 3930 * Arguments: un - driver soft state (unit) structure 3931 * nvpair_str - the tunable list 3932 */ 3933 static void 3934 sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str) 3935 { 3936 char *nv, *name, *value, *token; 3937 char *nv_lasts, *v_lasts, *x_lasts; 3938 3939 for (nv = strtok_r(nvpair_str, ",", &nv_lasts); nv != NULL; 3940 nv = strtok_r(NULL, ",", &nv_lasts)) { 3941 token = strtok_r(nv, ":", &v_lasts); 3942 name = strtok_r(token, " \t", &x_lasts); 3943 token = strtok_r(NULL, ":", &v_lasts); 3944 value = strtok_r(token, " \t", &x_lasts); 3945 if (name == NULL || value == NULL) { 3946 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3947 "sd_nvpair_str_decode: " 3948 "name or value is not valid!\n"); 3949 } else { 3950 sd_set_properties(un, name, value); 3951 } 3952 } 3953 } 3954 3955 /* 3956 * Function: sd_set_properties() 3957 * 3958 * Description: Set device properties based on the improved 3959 * format sd-config-list. 3960 * 3961 * Arguments: un - driver soft state (unit) structure 3962 * name - supported tunable name 3963 * value - tunable value 3964 */ 3965 static void 3966 sd_set_properties(struct sd_lun *un, char *name, char *value) 3967 { 3968 char *endptr = NULL; 3969 long val = 0; 3970 3971 if (strcasecmp(name, "cache-nonvolatile") == 0) { 3972 if (strcasecmp(value, "true") == 0) { 3973 un->un_f_suppress_cache_flush = TRUE; 3974 } else if (strcasecmp(value, "false") == 0) { 3975 un->un_f_suppress_cache_flush = FALSE; 3976 } else { 3977 goto value_invalid; 3978 } 3979 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3980 "suppress_cache_flush flag set to %d\n", 3981 un->un_f_suppress_cache_flush); 3982 return; 3983 } 3984 3985 if (strcasecmp(name, "controller-type") == 0) { 3986 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3987 un->un_ctype = val; 3988 } else { 3989 goto value_invalid; 3990 } 3991 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3992 "ctype set to %d\n", un->un_ctype); 3993 return; 3994 } 3995 3996 if (strcasecmp(name, "delay-busy") == 0) { 3997 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3998 un->un_busy_timeout = drv_usectohz(val / 1000); 3999 } else { 4000 goto value_invalid; 4001 } 4002 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4003 "busy_timeout set to %d\n", un->un_busy_timeout); 4004 return; 4005 } 4006 4007 if (strcasecmp(name, "disksort") == 0) { 4008 if (strcasecmp(value, "true") == 0) { 4009 un->un_f_disksort_disabled = FALSE; 4010 } else if (strcasecmp(value, "false") == 0) { 4011 un->un_f_disksort_disabled = TRUE; 4012 } else { 4013 goto value_invalid; 4014 } 4015 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4016 "disksort disabled flag set to %d\n", 4017 un->un_f_disksort_disabled); 4018 return; 4019 } 4020 4021 if (strcasecmp(name, "power-condition") == 0) { 4022 if (strcasecmp(value, "true") == 0) { 4023 un->un_f_power_condition_disabled = FALSE; 4024 } else if (strcasecmp(value, "false") == 0) { 4025 un->un_f_power_condition_disabled = TRUE; 4026 } else { 4027 goto value_invalid; 4028 } 4029 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4030 "power condition disabled flag set to %d\n", 4031 un->un_f_power_condition_disabled); 4032 return; 4033 } 4034 4035 if (strcasecmp(name, "timeout-releasereservation") == 0) { 4036 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4037 un->un_reserve_release_time = val; 4038 } else { 4039 goto value_invalid; 4040 } 4041 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4042 "reservation release timeout set to %d\n", 4043 un->un_reserve_release_time); 4044 return; 4045 } 4046 4047 if (strcasecmp(name, "reset-lun") == 0) { 4048 if (strcasecmp(value, "true") == 0) { 4049 un->un_f_lun_reset_enabled = TRUE; 4050 } else if (strcasecmp(value, "false") == 0) { 4051 un->un_f_lun_reset_enabled = FALSE; 4052 } else { 4053 goto value_invalid; 4054 } 4055 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4056 "lun reset enabled flag set to %d\n", 4057 un->un_f_lun_reset_enabled); 4058 return; 4059 } 4060 4061 if (strcasecmp(name, "retries-busy") == 0) { 4062 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4063 un->un_busy_retry_count = val; 4064 } else { 4065 goto value_invalid; 4066 } 4067 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4068 "busy retry count set to %d\n", un->un_busy_retry_count); 4069 return; 4070 } 4071 4072 if (strcasecmp(name, "retries-timeout") == 0) { 4073 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4074 un->un_retry_count = val; 4075 } else { 4076 goto value_invalid; 4077 } 4078 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4079 "timeout retry count set to %d\n", un->un_retry_count); 4080 return; 4081 } 4082 4083 if (strcasecmp(name, "retries-notready") == 0) { 4084 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4085 un->un_notready_retry_count = val; 4086 } else { 4087 goto value_invalid; 4088 } 4089 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4090 "notready retry count set to %d\n", 4091 un->un_notready_retry_count); 4092 return; 4093 } 4094 4095 if (strcasecmp(name, "retries-reset") == 0) { 4096 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4097 un->un_reset_retry_count = val; 4098 } else { 4099 goto value_invalid; 4100 } 4101 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4102 "reset retry count set to %d\n", 4103 un->un_reset_retry_count); 4104 return; 4105 } 4106 4107 if (strcasecmp(name, "throttle-max") == 0) { 4108 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4109 un->un_saved_throttle = un->un_throttle = val; 4110 } else { 4111 goto value_invalid; 4112 } 4113 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4114 "throttle set to %d\n", un->un_throttle); 4115 } 4116 4117 if (strcasecmp(name, "throttle-min") == 0) { 4118 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4119 un->un_min_throttle = val; 4120 } else { 4121 goto value_invalid; 4122 } 4123 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4124 "min throttle set to %d\n", un->un_min_throttle); 4125 } 4126 4127 if (strcasecmp(name, "rmw-type") == 0) { 4128 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4129 un->un_f_rmw_type = val; 4130 } else { 4131 goto value_invalid; 4132 } 4133 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4134 "RMW type set to %d\n", un->un_f_rmw_type); 4135 } 4136 4137 if (strcasecmp(name, "physical-block-size") == 0) { 4138 if (ddi_strtol(value, &endptr, 0, &val) == 0 && 4139 ISP2(val) && val >= un->un_tgt_blocksize && 4140 val >= un->un_sys_blocksize) { 4141 un->un_phy_blocksize = val; 4142 } else { 4143 goto value_invalid; 4144 } 4145 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4146 "physical block size set to %d\n", un->un_phy_blocksize); 4147 } 4148 4149 if (strcasecmp(name, "retries-victim") == 0) { 4150 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 4151 un->un_victim_retry_count = val; 4152 } else { 4153 goto value_invalid; 4154 } 4155 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4156 "victim retry count set to %d\n", 4157 un->un_victim_retry_count); 4158 return; 4159 } 4160 4161 /* 4162 * Validate the throttle values. 4163 * If any of the numbers are invalid, set everything to defaults. 4164 */ 4165 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4166 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4167 (un->un_min_throttle > un->un_throttle)) { 4168 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4169 un->un_min_throttle = sd_min_throttle; 4170 } 4171 4172 if (strcasecmp(name, "mmc-gesn-polling") == 0) { 4173 if (strcasecmp(value, "true") == 0) { 4174 un->un_f_mmc_gesn_polling = TRUE; 4175 } else if (strcasecmp(value, "false") == 0) { 4176 un->un_f_mmc_gesn_polling = FALSE; 4177 } else { 4178 goto value_invalid; 4179 } 4180 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4181 "mmc-gesn-polling set to %d\n", 4182 un->un_f_mmc_gesn_polling); 4183 } 4184 4185 return; 4186 4187 value_invalid: 4188 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 4189 "value of prop %s is invalid\n", name); 4190 } 4191 4192 /* 4193 * Function: sd_get_tunables_from_conf() 4194 * 4195 * 4196 * This function reads the data list from the sd.conf file and pulls 4197 * the values that can have numeric values as arguments and places 4198 * the values in the appropriate sd_tunables member. 4199 * Since the order of the data list members varies across platforms 4200 * This function reads them from the data list in a platform specific 4201 * order and places them into the correct sd_tunable member that is 4202 * consistent across all platforms. 4203 */ 4204 static void 4205 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list, 4206 sd_tunables *values) 4207 { 4208 int i; 4209 int mask; 4210 4211 bzero(values, sizeof (sd_tunables)); 4212 4213 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 4214 4215 mask = 1 << i; 4216 if (mask > flags) { 4217 break; 4218 } 4219 4220 switch (mask & flags) { 4221 case 0: /* This mask bit not set in flags */ 4222 continue; 4223 case SD_CONF_BSET_THROTTLE: 4224 values->sdt_throttle = data_list[i]; 4225 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4226 "sd_get_tunables_from_conf: throttle = %d\n", 4227 values->sdt_throttle); 4228 break; 4229 case SD_CONF_BSET_CTYPE: 4230 values->sdt_ctype = data_list[i]; 4231 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4232 "sd_get_tunables_from_conf: ctype = %d\n", 4233 values->sdt_ctype); 4234 break; 4235 case SD_CONF_BSET_NRR_COUNT: 4236 values->sdt_not_rdy_retries = data_list[i]; 4237 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4238 "sd_get_tunables_from_conf: not_rdy_retries = %d\n", 4239 values->sdt_not_rdy_retries); 4240 break; 4241 case SD_CONF_BSET_BSY_RETRY_COUNT: 4242 values->sdt_busy_retries = data_list[i]; 4243 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4244 "sd_get_tunables_from_conf: busy_retries = %d\n", 4245 values->sdt_busy_retries); 4246 break; 4247 case SD_CONF_BSET_RST_RETRIES: 4248 values->sdt_reset_retries = data_list[i]; 4249 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4250 "sd_get_tunables_from_conf: reset_retries = %d\n", 4251 values->sdt_reset_retries); 4252 break; 4253 case SD_CONF_BSET_RSV_REL_TIME: 4254 values->sdt_reserv_rel_time = data_list[i]; 4255 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4256 "sd_get_tunables_from_conf: reserv_rel_time = %d\n", 4257 values->sdt_reserv_rel_time); 4258 break; 4259 case SD_CONF_BSET_MIN_THROTTLE: 4260 values->sdt_min_throttle = data_list[i]; 4261 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4262 "sd_get_tunables_from_conf: min_throttle = %d\n", 4263 values->sdt_min_throttle); 4264 break; 4265 case SD_CONF_BSET_DISKSORT_DISABLED: 4266 values->sdt_disk_sort_dis = data_list[i]; 4267 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4268 "sd_get_tunables_from_conf: disk_sort_dis = %d\n", 4269 values->sdt_disk_sort_dis); 4270 break; 4271 case SD_CONF_BSET_LUN_RESET_ENABLED: 4272 values->sdt_lun_reset_enable = data_list[i]; 4273 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4274 "sd_get_tunables_from_conf: lun_reset_enable = %d" 4275 "\n", values->sdt_lun_reset_enable); 4276 break; 4277 case SD_CONF_BSET_CACHE_IS_NV: 4278 values->sdt_suppress_cache_flush = data_list[i]; 4279 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4280 "sd_get_tunables_from_conf: \ 4281 suppress_cache_flush = %d" 4282 "\n", values->sdt_suppress_cache_flush); 4283 break; 4284 case SD_CONF_BSET_PC_DISABLED: 4285 values->sdt_disk_sort_dis = data_list[i]; 4286 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4287 "sd_get_tunables_from_conf: power_condition_dis = " 4288 "%d\n", values->sdt_power_condition_dis); 4289 break; 4290 } 4291 } 4292 } 4293 4294 /* 4295 * Function: sd_process_sdconf_table 4296 * 4297 * Description: Search the static configuration table for a match on the 4298 * inquiry vid/pid and update the driver soft state structure 4299 * according to the table property values for the device. 4300 * 4301 * The form of a configuration table entry is: 4302 * <vid+pid>,<flags>,<property-data> 4303 * "SEAGATE ST42400N",1,0x40000, 4304 * 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1; 4305 * 4306 * Arguments: un - driver soft state (unit) structure 4307 */ 4308 4309 static void 4310 sd_process_sdconf_table(struct sd_lun *un) 4311 { 4312 char *id = NULL; 4313 int table_index; 4314 int idlen; 4315 4316 ASSERT(un != NULL); 4317 for (table_index = 0; table_index < sd_disk_table_size; 4318 table_index++) { 4319 id = sd_disk_table[table_index].device_id; 4320 idlen = strlen(id); 4321 4322 /* 4323 * The static configuration table currently does not 4324 * implement version 10 properties. Additionally, 4325 * multiple data-property-name entries are not 4326 * implemented in the static configuration table. 4327 */ 4328 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4329 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4330 "sd_process_sdconf_table: disk %s\n", id); 4331 sd_set_vers1_properties(un, 4332 sd_disk_table[table_index].flags, 4333 sd_disk_table[table_index].properties); 4334 break; 4335 } 4336 } 4337 } 4338 4339 4340 /* 4341 * Function: sd_sdconf_id_match 4342 * 4343 * Description: This local function implements a case sensitive vid/pid 4344 * comparison as well as the boundary cases of wild card and 4345 * multiple blanks. 4346 * 4347 * Note: An implicit assumption made here is that the scsi 4348 * inquiry structure will always keep the vid, pid and 4349 * revision strings in consecutive sequence, so they can be 4350 * read as a single string. If this assumption is not the 4351 * case, a separate string, to be used for the check, needs 4352 * to be built with these strings concatenated. 4353 * 4354 * Arguments: un - driver soft state (unit) structure 4355 * id - table or config file vid/pid 4356 * idlen - length of the vid/pid (bytes) 4357 * 4358 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 4359 * SD_FAILURE - Indicates no match with the inquiry vid/pid 4360 */ 4361 4362 static int 4363 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen) 4364 { 4365 struct scsi_inquiry *sd_inq; 4366 int rval = SD_SUCCESS; 4367 4368 ASSERT(un != NULL); 4369 sd_inq = un->un_sd->sd_inq; 4370 ASSERT(id != NULL); 4371 4372 /* 4373 * We use the inq_vid as a pointer to a buffer containing the 4374 * vid and pid and use the entire vid/pid length of the table 4375 * entry for the comparison. This works because the inq_pid 4376 * data member follows inq_vid in the scsi_inquiry structure. 4377 */ 4378 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) { 4379 /* 4380 * The user id string is compared to the inquiry vid/pid 4381 * using a case insensitive comparison and ignoring 4382 * multiple spaces. 4383 */ 4384 rval = sd_blank_cmp(un, id, idlen); 4385 if (rval != SD_SUCCESS) { 4386 /* 4387 * User id strings that start and end with a "*" 4388 * are a special case. These do not have a 4389 * specific vendor, and the product string can 4390 * appear anywhere in the 16 byte PID portion of 4391 * the inquiry data. This is a simple strstr() 4392 * type search for the user id in the inquiry data. 4393 */ 4394 if ((id[0] == '*') && (id[idlen - 1] == '*')) { 4395 char *pidptr = &id[1]; 4396 int i; 4397 int j; 4398 int pidstrlen = idlen - 2; 4399 j = sizeof (SD_INQUIRY(un)->inq_pid) - 4400 pidstrlen; 4401 4402 if (j < 0) { 4403 return (SD_FAILURE); 4404 } 4405 for (i = 0; i < j; i++) { 4406 if (bcmp(&SD_INQUIRY(un)->inq_pid[i], 4407 pidptr, pidstrlen) == 0) { 4408 rval = SD_SUCCESS; 4409 break; 4410 } 4411 } 4412 } 4413 } 4414 } 4415 return (rval); 4416 } 4417 4418 4419 /* 4420 * Function: sd_blank_cmp 4421 * 4422 * Description: If the id string starts and ends with a space, treat 4423 * multiple consecutive spaces as equivalent to a single 4424 * space. For example, this causes a sd_disk_table entry 4425 * of " NEC CDROM " to match a device's id string of 4426 * "NEC CDROM". 4427 * 4428 * Note: The success exit condition for this routine is if 4429 * the pointer to the table entry is '\0' and the cnt of 4430 * the inquiry length is zero. This will happen if the inquiry 4431 * string returned by the device is padded with spaces to be 4432 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The 4433 * SCSI spec states that the inquiry string is to be padded with 4434 * spaces. 4435 * 4436 * Arguments: un - driver soft state (unit) structure 4437 * id - table or config file vid/pid 4438 * idlen - length of the vid/pid (bytes) 4439 * 4440 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 4441 * SD_FAILURE - Indicates no match with the inquiry vid/pid 4442 */ 4443 4444 static int 4445 sd_blank_cmp(struct sd_lun *un, char *id, int idlen) 4446 { 4447 char *p1; 4448 char *p2; 4449 int cnt; 4450 cnt = sizeof (SD_INQUIRY(un)->inq_vid) + 4451 sizeof (SD_INQUIRY(un)->inq_pid); 4452 4453 ASSERT(un != NULL); 4454 p2 = un->un_sd->sd_inq->inq_vid; 4455 ASSERT(id != NULL); 4456 p1 = id; 4457 4458 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) { 4459 /* 4460 * Note: string p1 is terminated by a NUL but string p2 4461 * isn't. The end of p2 is determined by cnt. 4462 */ 4463 for (;;) { 4464 /* skip over any extra blanks in both strings */ 4465 while ((*p1 != '\0') && (*p1 == ' ')) { 4466 p1++; 4467 } 4468 while ((cnt != 0) && (*p2 == ' ')) { 4469 p2++; 4470 cnt--; 4471 } 4472 4473 /* compare the two strings */ 4474 if ((cnt == 0) || 4475 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) { 4476 break; 4477 } 4478 while ((cnt > 0) && 4479 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) { 4480 p1++; 4481 p2++; 4482 cnt--; 4483 } 4484 } 4485 } 4486 4487 /* return SD_SUCCESS if both strings match */ 4488 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE); 4489 } 4490 4491 4492 /* 4493 * Function: sd_chk_vers1_data 4494 * 4495 * Description: Verify the version 1 device properties provided by the 4496 * user via the configuration file 4497 * 4498 * Arguments: un - driver soft state (unit) structure 4499 * flags - integer mask indicating properties to be set 4500 * prop_list - integer list of property values 4501 * list_len - number of the elements 4502 * 4503 * Return Code: SD_SUCCESS - Indicates the user provided data is valid 4504 * SD_FAILURE - Indicates the user provided data is invalid 4505 */ 4506 4507 static int 4508 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 4509 int list_len, char *dataname_ptr) 4510 { 4511 int i; 4512 int mask = 1; 4513 int index = 0; 4514 4515 ASSERT(un != NULL); 4516 4517 /* Check for a NULL property name and list */ 4518 if (dataname_ptr == NULL) { 4519 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4520 "sd_chk_vers1_data: NULL data property name."); 4521 return (SD_FAILURE); 4522 } 4523 if (prop_list == NULL) { 4524 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4525 "sd_chk_vers1_data: %s NULL data property list.", 4526 dataname_ptr); 4527 return (SD_FAILURE); 4528 } 4529 4530 /* Display a warning if undefined bits are set in the flags */ 4531 if (flags & ~SD_CONF_BIT_MASK) { 4532 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4533 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. " 4534 "Properties not set.", 4535 (flags & ~SD_CONF_BIT_MASK), dataname_ptr); 4536 return (SD_FAILURE); 4537 } 4538 4539 /* 4540 * Verify the length of the list by identifying the highest bit set 4541 * in the flags and validating that the property list has a length 4542 * up to the index of this bit. 4543 */ 4544 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 4545 if (flags & mask) { 4546 index++; 4547 } 4548 mask = 1 << i; 4549 } 4550 if (list_len < (index + 2)) { 4551 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4552 "sd_chk_vers1_data: " 4553 "Data property list %s size is incorrect. " 4554 "Properties not set.", dataname_ptr); 4555 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: " 4556 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS); 4557 return (SD_FAILURE); 4558 } 4559 return (SD_SUCCESS); 4560 } 4561 4562 4563 /* 4564 * Function: sd_set_vers1_properties 4565 * 4566 * Description: Set version 1 device properties based on a property list 4567 * retrieved from the driver configuration file or static 4568 * configuration table. Version 1 properties have the format: 4569 * 4570 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 4571 * 4572 * where the prop0 value will be used to set prop0 if bit0 4573 * is set in the flags 4574 * 4575 * Arguments: un - driver soft state (unit) structure 4576 * flags - integer mask indicating properties to be set 4577 * prop_list - integer list of property values 4578 */ 4579 4580 static void 4581 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list) 4582 { 4583 ASSERT(un != NULL); 4584 4585 /* 4586 * Set the flag to indicate cache is to be disabled. An attempt 4587 * to disable the cache via sd_cache_control() will be made 4588 * later during attach once the basic initialization is complete. 4589 */ 4590 if (flags & SD_CONF_BSET_NOCACHE) { 4591 un->un_f_opt_disable_cache = TRUE; 4592 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4593 "sd_set_vers1_properties: caching disabled flag set\n"); 4594 } 4595 4596 /* CD-specific configuration parameters */ 4597 if (flags & SD_CONF_BSET_PLAYMSF_BCD) { 4598 un->un_f_cfg_playmsf_bcd = TRUE; 4599 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4600 "sd_set_vers1_properties: playmsf_bcd set\n"); 4601 } 4602 if (flags & SD_CONF_BSET_READSUB_BCD) { 4603 un->un_f_cfg_readsub_bcd = TRUE; 4604 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4605 "sd_set_vers1_properties: readsub_bcd set\n"); 4606 } 4607 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) { 4608 un->un_f_cfg_read_toc_trk_bcd = TRUE; 4609 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4610 "sd_set_vers1_properties: read_toc_trk_bcd set\n"); 4611 } 4612 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) { 4613 un->un_f_cfg_read_toc_addr_bcd = TRUE; 4614 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4615 "sd_set_vers1_properties: read_toc_addr_bcd set\n"); 4616 } 4617 if (flags & SD_CONF_BSET_NO_READ_HEADER) { 4618 un->un_f_cfg_no_read_header = TRUE; 4619 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4620 "sd_set_vers1_properties: no_read_header set\n"); 4621 } 4622 if (flags & SD_CONF_BSET_READ_CD_XD4) { 4623 un->un_f_cfg_read_cd_xd4 = TRUE; 4624 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4625 "sd_set_vers1_properties: read_cd_xd4 set\n"); 4626 } 4627 4628 /* Support for devices which do not have valid/unique serial numbers */ 4629 if (flags & SD_CONF_BSET_FAB_DEVID) { 4630 un->un_f_opt_fab_devid = TRUE; 4631 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4632 "sd_set_vers1_properties: fab_devid bit set\n"); 4633 } 4634 4635 /* Support for user throttle configuration */ 4636 if (flags & SD_CONF_BSET_THROTTLE) { 4637 ASSERT(prop_list != NULL); 4638 un->un_saved_throttle = un->un_throttle = 4639 prop_list->sdt_throttle; 4640 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4641 "sd_set_vers1_properties: throttle set to %d\n", 4642 prop_list->sdt_throttle); 4643 } 4644 4645 /* Set the per disk retry count according to the conf file or table. */ 4646 if (flags & SD_CONF_BSET_NRR_COUNT) { 4647 ASSERT(prop_list != NULL); 4648 if (prop_list->sdt_not_rdy_retries) { 4649 un->un_notready_retry_count = 4650 prop_list->sdt_not_rdy_retries; 4651 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4652 "sd_set_vers1_properties: not ready retry count" 4653 " set to %d\n", un->un_notready_retry_count); 4654 } 4655 } 4656 4657 /* The controller type is reported for generic disk driver ioctls */ 4658 if (flags & SD_CONF_BSET_CTYPE) { 4659 ASSERT(prop_list != NULL); 4660 switch (prop_list->sdt_ctype) { 4661 case CTYPE_CDROM: 4662 un->un_ctype = prop_list->sdt_ctype; 4663 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4664 "sd_set_vers1_properties: ctype set to " 4665 "CTYPE_CDROM\n"); 4666 break; 4667 case CTYPE_CCS: 4668 un->un_ctype = prop_list->sdt_ctype; 4669 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4670 "sd_set_vers1_properties: ctype set to " 4671 "CTYPE_CCS\n"); 4672 break; 4673 case CTYPE_ROD: /* RW optical */ 4674 un->un_ctype = prop_list->sdt_ctype; 4675 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4676 "sd_set_vers1_properties: ctype set to " 4677 "CTYPE_ROD\n"); 4678 break; 4679 default: 4680 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4681 "sd_set_vers1_properties: Could not set " 4682 "invalid ctype value (%d)", 4683 prop_list->sdt_ctype); 4684 } 4685 } 4686 4687 /* Purple failover timeout */ 4688 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) { 4689 ASSERT(prop_list != NULL); 4690 un->un_busy_retry_count = 4691 prop_list->sdt_busy_retries; 4692 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4693 "sd_set_vers1_properties: " 4694 "busy retry count set to %d\n", 4695 un->un_busy_retry_count); 4696 } 4697 4698 /* Purple reset retry count */ 4699 if (flags & SD_CONF_BSET_RST_RETRIES) { 4700 ASSERT(prop_list != NULL); 4701 un->un_reset_retry_count = 4702 prop_list->sdt_reset_retries; 4703 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4704 "sd_set_vers1_properties: " 4705 "reset retry count set to %d\n", 4706 un->un_reset_retry_count); 4707 } 4708 4709 /* Purple reservation release timeout */ 4710 if (flags & SD_CONF_BSET_RSV_REL_TIME) { 4711 ASSERT(prop_list != NULL); 4712 un->un_reserve_release_time = 4713 prop_list->sdt_reserv_rel_time; 4714 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4715 "sd_set_vers1_properties: " 4716 "reservation release timeout set to %d\n", 4717 un->un_reserve_release_time); 4718 } 4719 4720 /* 4721 * Driver flag telling the driver to verify that no commands are pending 4722 * for a device before issuing a Test Unit Ready. This is a workaround 4723 * for a firmware bug in some Seagate eliteI drives. 4724 */ 4725 if (flags & SD_CONF_BSET_TUR_CHECK) { 4726 un->un_f_cfg_tur_check = TRUE; 4727 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4728 "sd_set_vers1_properties: tur queue check set\n"); 4729 } 4730 4731 if (flags & SD_CONF_BSET_MIN_THROTTLE) { 4732 un->un_min_throttle = prop_list->sdt_min_throttle; 4733 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4734 "sd_set_vers1_properties: min throttle set to %d\n", 4735 un->un_min_throttle); 4736 } 4737 4738 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) { 4739 un->un_f_disksort_disabled = 4740 (prop_list->sdt_disk_sort_dis != 0) ? 4741 TRUE : FALSE; 4742 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4743 "sd_set_vers1_properties: disksort disabled " 4744 "flag set to %d\n", 4745 prop_list->sdt_disk_sort_dis); 4746 } 4747 4748 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) { 4749 un->un_f_lun_reset_enabled = 4750 (prop_list->sdt_lun_reset_enable != 0) ? 4751 TRUE : FALSE; 4752 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4753 "sd_set_vers1_properties: lun reset enabled " 4754 "flag set to %d\n", 4755 prop_list->sdt_lun_reset_enable); 4756 } 4757 4758 if (flags & SD_CONF_BSET_CACHE_IS_NV) { 4759 un->un_f_suppress_cache_flush = 4760 (prop_list->sdt_suppress_cache_flush != 0) ? 4761 TRUE : FALSE; 4762 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4763 "sd_set_vers1_properties: suppress_cache_flush " 4764 "flag set to %d\n", 4765 prop_list->sdt_suppress_cache_flush); 4766 } 4767 4768 if (flags & SD_CONF_BSET_PC_DISABLED) { 4769 un->un_f_power_condition_disabled = 4770 (prop_list->sdt_power_condition_dis != 0) ? 4771 TRUE : FALSE; 4772 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4773 "sd_set_vers1_properties: power_condition_disabled " 4774 "flag set to %d\n", 4775 prop_list->sdt_power_condition_dis); 4776 } 4777 4778 /* 4779 * Validate the throttle values. 4780 * If any of the numbers are invalid, set everything to defaults. 4781 */ 4782 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4783 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4784 (un->un_min_throttle > un->un_throttle)) { 4785 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4786 un->un_min_throttle = sd_min_throttle; 4787 } 4788 } 4789 4790 /* 4791 * Function: sd_is_lsi() 4792 * 4793 * Description: Check for lsi devices, step through the static device 4794 * table to match vid/pid. 4795 * 4796 * Args: un - ptr to sd_lun 4797 * 4798 * Notes: When creating new LSI property, need to add the new LSI property 4799 * to this function. 4800 */ 4801 static void 4802 sd_is_lsi(struct sd_lun *un) 4803 { 4804 char *id = NULL; 4805 int table_index; 4806 int idlen; 4807 void *prop; 4808 4809 ASSERT(un != NULL); 4810 for (table_index = 0; table_index < sd_disk_table_size; 4811 table_index++) { 4812 id = sd_disk_table[table_index].device_id; 4813 idlen = strlen(id); 4814 if (idlen == 0) { 4815 continue; 4816 } 4817 4818 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4819 prop = sd_disk_table[table_index].properties; 4820 if (prop == &lsi_properties || 4821 prop == &lsi_oem_properties || 4822 prop == &lsi_properties_scsi || 4823 prop == &symbios_properties) { 4824 un->un_f_cfg_is_lsi = TRUE; 4825 } 4826 break; 4827 } 4828 } 4829 } 4830 4831 /* 4832 * Function: sd_get_physical_geometry 4833 * 4834 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and 4835 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the 4836 * target, and use this information to initialize the physical 4837 * geometry cache specified by pgeom_p. 4838 * 4839 * MODE SENSE is an optional command, so failure in this case 4840 * does not necessarily denote an error. We want to use the 4841 * MODE SENSE commands to derive the physical geometry of the 4842 * device, but if either command fails, the logical geometry is 4843 * used as the fallback for disk label geometry in cmlb. 4844 * 4845 * This requires that un->un_blockcount and un->un_tgt_blocksize 4846 * have already been initialized for the current target and 4847 * that the current values be passed as args so that we don't 4848 * end up ever trying to use -1 as a valid value. This could 4849 * happen if either value is reset while we're not holding 4850 * the mutex. 4851 * 4852 * Arguments: un - driver soft state (unit) structure 4853 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4854 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4855 * to use the USCSI "direct" chain and bypass the normal 4856 * command waitq. 4857 * 4858 * Context: Kernel thread only (can sleep). 4859 */ 4860 4861 static int 4862 sd_get_physical_geometry(struct sd_lun *un, cmlb_geom_t *pgeom_p, 4863 diskaddr_t capacity, int lbasize, int path_flag) 4864 { 4865 struct mode_format *page3p; 4866 struct mode_geometry *page4p; 4867 struct mode_header *headerp; 4868 int sector_size; 4869 int nsect; 4870 int nhead; 4871 int ncyl; 4872 int intrlv; 4873 int spc; 4874 diskaddr_t modesense_capacity; 4875 int rpm; 4876 int bd_len; 4877 int mode_header_length; 4878 uchar_t *p3bufp; 4879 uchar_t *p4bufp; 4880 int cdbsize; 4881 int ret = EIO; 4882 sd_ssc_t *ssc; 4883 int status; 4884 4885 ASSERT(un != NULL); 4886 4887 if (lbasize == 0) { 4888 if (ISCD(un)) { 4889 lbasize = 2048; 4890 } else { 4891 lbasize = un->un_sys_blocksize; 4892 } 4893 } 4894 pgeom_p->g_secsize = (unsigned short)lbasize; 4895 4896 /* 4897 * If the unit is a cd/dvd drive MODE SENSE page three 4898 * and MODE SENSE page four are reserved (see SBC spec 4899 * and MMC spec). To prevent soft errors just return 4900 * using the default LBA size. 4901 * 4902 * Since SATA MODE SENSE function (sata_txlt_mode_sense()) does not 4903 * implement support for mode pages 3 and 4 return here to prevent 4904 * illegal requests on SATA drives. 4905 * 4906 * These pages are also reserved in SBC-2 and later. We assume SBC-2 4907 * or later for a direct-attached block device if the SCSI version is 4908 * at least SPC-3. 4909 */ 4910 4911 if (ISCD(un) || 4912 un->un_interconnect_type == SD_INTERCONNECT_SATA || 4913 (un->un_ctype == CTYPE_CCS && SD_INQUIRY(un)->inq_ansi >= 5)) 4914 return (ret); 4915 4916 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0; 4917 4918 /* 4919 * Retrieve MODE SENSE page 3 - Format Device Page 4920 */ 4921 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP); 4922 ssc = sd_ssc_init(un); 4923 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p3bufp, 4924 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag); 4925 if (status != 0) { 4926 SD_ERROR(SD_LOG_COMMON, un, 4927 "sd_get_physical_geometry: mode sense page 3 failed\n"); 4928 goto page3_exit; 4929 } 4930 4931 /* 4932 * Determine size of Block Descriptors in order to locate the mode 4933 * page data. ATAPI devices return 0, SCSI devices should return 4934 * MODE_BLK_DESC_LENGTH. 4935 */ 4936 headerp = (struct mode_header *)p3bufp; 4937 if (un->un_f_cfg_is_atapi == TRUE) { 4938 struct mode_header_grp2 *mhp = 4939 (struct mode_header_grp2 *)headerp; 4940 mode_header_length = MODE_HEADER_LENGTH_GRP2; 4941 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4942 } else { 4943 mode_header_length = MODE_HEADER_LENGTH; 4944 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4945 } 4946 4947 if (bd_len > MODE_BLK_DESC_LENGTH) { 4948 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 4949 "sd_get_physical_geometry: received unexpected bd_len " 4950 "of %d, page3\n", bd_len); 4951 status = EIO; 4952 goto page3_exit; 4953 } 4954 4955 page3p = (struct mode_format *) 4956 ((caddr_t)headerp + mode_header_length + bd_len); 4957 4958 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) { 4959 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 4960 "sd_get_physical_geometry: mode sense pg3 code mismatch " 4961 "%d\n", page3p->mode_page.code); 4962 status = EIO; 4963 goto page3_exit; 4964 } 4965 4966 /* 4967 * Use this physical geometry data only if BOTH MODE SENSE commands 4968 * complete successfully; otherwise, revert to the logical geometry. 4969 * So, we need to save everything in temporary variables. 4970 */ 4971 sector_size = BE_16(page3p->data_bytes_sect); 4972 4973 /* 4974 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size 4975 */ 4976 if (sector_size == 0) { 4977 sector_size = un->un_sys_blocksize; 4978 } else { 4979 sector_size &= ~(un->un_sys_blocksize - 1); 4980 } 4981 4982 nsect = BE_16(page3p->sect_track); 4983 intrlv = BE_16(page3p->interleave); 4984 4985 SD_INFO(SD_LOG_COMMON, un, 4986 "sd_get_physical_geometry: Format Parameters (page 3)\n"); 4987 SD_INFO(SD_LOG_COMMON, un, 4988 " mode page: %d; nsect: %d; sector size: %d;\n", 4989 page3p->mode_page.code, nsect, sector_size); 4990 SD_INFO(SD_LOG_COMMON, un, 4991 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv, 4992 BE_16(page3p->track_skew), 4993 BE_16(page3p->cylinder_skew)); 4994 4995 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 4996 4997 /* 4998 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page 4999 */ 5000 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP); 5001 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p4bufp, 5002 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag); 5003 if (status != 0) { 5004 SD_ERROR(SD_LOG_COMMON, un, 5005 "sd_get_physical_geometry: mode sense page 4 failed\n"); 5006 goto page4_exit; 5007 } 5008 5009 /* 5010 * Determine size of Block Descriptors in order to locate the mode 5011 * page data. ATAPI devices return 0, SCSI devices should return 5012 * MODE_BLK_DESC_LENGTH. 5013 */ 5014 headerp = (struct mode_header *)p4bufp; 5015 if (un->un_f_cfg_is_atapi == TRUE) { 5016 struct mode_header_grp2 *mhp = 5017 (struct mode_header_grp2 *)headerp; 5018 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 5019 } else { 5020 bd_len = ((struct mode_header *)headerp)->bdesc_length; 5021 } 5022 5023 if (bd_len > MODE_BLK_DESC_LENGTH) { 5024 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 5025 "sd_get_physical_geometry: received unexpected bd_len of " 5026 "%d, page4\n", bd_len); 5027 status = EIO; 5028 goto page4_exit; 5029 } 5030 5031 page4p = (struct mode_geometry *) 5032 ((caddr_t)headerp + mode_header_length + bd_len); 5033 5034 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) { 5035 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 5036 "sd_get_physical_geometry: mode sense pg4 code mismatch " 5037 "%d\n", page4p->mode_page.code); 5038 status = EIO; 5039 goto page4_exit; 5040 } 5041 5042 /* 5043 * Stash the data now, after we know that both commands completed. 5044 */ 5045 5046 5047 nhead = (int)page4p->heads; /* uchar, so no conversion needed */ 5048 spc = nhead * nsect; 5049 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb; 5050 rpm = BE_16(page4p->rpm); 5051 5052 modesense_capacity = spc * ncyl; 5053 5054 SD_INFO(SD_LOG_COMMON, un, 5055 "sd_get_physical_geometry: Geometry Parameters (page 4)\n"); 5056 SD_INFO(SD_LOG_COMMON, un, 5057 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm); 5058 SD_INFO(SD_LOG_COMMON, un, 5059 " computed capacity(h*s*c): %d;\n", modesense_capacity); 5060 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n", 5061 (void *)pgeom_p, capacity); 5062 5063 /* 5064 * Compensate if the drive's geometry is not rectangular, i.e., 5065 * the product of C * H * S returned by MODE SENSE >= that returned 5066 * by read capacity. This is an idiosyncrasy of the original x86 5067 * disk subsystem. 5068 */ 5069 if (modesense_capacity >= capacity) { 5070 SD_INFO(SD_LOG_COMMON, un, 5071 "sd_get_physical_geometry: adjusting acyl; " 5072 "old: %d; new: %d\n", pgeom_p->g_acyl, 5073 (modesense_capacity - capacity + spc - 1) / spc); 5074 if (sector_size != 0) { 5075 /* 1243403: NEC D38x7 drives don't support sec size */ 5076 pgeom_p->g_secsize = (unsigned short)sector_size; 5077 } 5078 pgeom_p->g_nsect = (unsigned short)nsect; 5079 pgeom_p->g_nhead = (unsigned short)nhead; 5080 pgeom_p->g_capacity = capacity; 5081 pgeom_p->g_acyl = 5082 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc; 5083 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl; 5084 } 5085 5086 pgeom_p->g_rpm = (unsigned short)rpm; 5087 pgeom_p->g_intrlv = (unsigned short)intrlv; 5088 ret = 0; 5089 5090 SD_INFO(SD_LOG_COMMON, un, 5091 "sd_get_physical_geometry: mode sense geometry:\n"); 5092 SD_INFO(SD_LOG_COMMON, un, 5093 " nsect: %d; sector size: %d; interlv: %d\n", 5094 nsect, sector_size, intrlv); 5095 SD_INFO(SD_LOG_COMMON, un, 5096 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n", 5097 nhead, ncyl, rpm, modesense_capacity); 5098 SD_INFO(SD_LOG_COMMON, un, 5099 "sd_get_physical_geometry: (cached)\n"); 5100 SD_INFO(SD_LOG_COMMON, un, 5101 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 5102 pgeom_p->g_ncyl, pgeom_p->g_acyl, 5103 pgeom_p->g_nhead, pgeom_p->g_nsect); 5104 SD_INFO(SD_LOG_COMMON, un, 5105 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n", 5106 pgeom_p->g_secsize, pgeom_p->g_capacity, 5107 pgeom_p->g_intrlv, pgeom_p->g_rpm); 5108 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 5109 5110 page4_exit: 5111 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH); 5112 5113 page3_exit: 5114 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH); 5115 5116 if (status != 0) { 5117 if (status == EIO) { 5118 /* 5119 * Some disks do not support mode sense(6), we 5120 * should ignore this kind of error(sense key is 5121 * 0x5 - illegal request). 5122 */ 5123 uint8_t *sensep; 5124 int senlen; 5125 5126 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 5127 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 5128 ssc->ssc_uscsi_cmd->uscsi_rqresid); 5129 5130 if (senlen > 0 && 5131 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 5132 sd_ssc_assessment(ssc, 5133 SD_FMT_IGNORE_COMPROMISE); 5134 } else { 5135 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 5136 } 5137 } else { 5138 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5139 } 5140 } 5141 sd_ssc_fini(ssc); 5142 return (ret); 5143 } 5144 5145 /* 5146 * Function: sd_get_virtual_geometry 5147 * 5148 * Description: Ask the controller to tell us about the target device. 5149 * 5150 * Arguments: un - pointer to softstate 5151 * capacity - disk capacity in #blocks 5152 * lbasize - disk block size in bytes 5153 * 5154 * Context: Kernel thread only 5155 */ 5156 5157 static int 5158 sd_get_virtual_geometry(struct sd_lun *un, cmlb_geom_t *lgeom_p, 5159 diskaddr_t capacity, int lbasize) 5160 { 5161 uint_t geombuf; 5162 int spc; 5163 5164 ASSERT(un != NULL); 5165 5166 /* Set sector size, and total number of sectors */ 5167 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1); 5168 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1); 5169 5170 /* Let the HBA tell us its geometry */ 5171 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1); 5172 5173 /* A value of -1 indicates an undefined "geometry" property */ 5174 if (geombuf == (-1)) { 5175 return (EINVAL); 5176 } 5177 5178 /* Initialize the logical geometry cache. */ 5179 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff; 5180 lgeom_p->g_nsect = geombuf & 0xffff; 5181 lgeom_p->g_secsize = un->un_sys_blocksize; 5182 5183 spc = lgeom_p->g_nhead * lgeom_p->g_nsect; 5184 5185 /* 5186 * Note: The driver originally converted the capacity value from 5187 * target blocks to system blocks. However, the capacity value passed 5188 * to this routine is already in terms of system blocks (this scaling 5189 * is done when the READ CAPACITY command is issued and processed). 5190 * This 'error' may have gone undetected because the usage of g_ncyl 5191 * (which is based upon g_capacity) is very limited within the driver 5192 */ 5193 lgeom_p->g_capacity = capacity; 5194 5195 /* 5196 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The 5197 * hba may return zero values if the device has been removed. 5198 */ 5199 if (spc == 0) { 5200 lgeom_p->g_ncyl = 0; 5201 } else { 5202 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc; 5203 } 5204 lgeom_p->g_acyl = 0; 5205 5206 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n"); 5207 return (0); 5208 5209 } 5210 /* 5211 * Function: sd_update_block_info 5212 * 5213 * Description: Calculate a byte count to sector count bitshift value 5214 * from sector size. 5215 * 5216 * Arguments: un: unit struct. 5217 * lbasize: new target sector size 5218 * capacity: new target capacity, ie. block count 5219 * 5220 * Context: Kernel thread context 5221 */ 5222 5223 static void 5224 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity) 5225 { 5226 if (lbasize != 0) { 5227 un->un_tgt_blocksize = lbasize; 5228 un->un_f_tgt_blocksize_is_valid = TRUE; 5229 if (!un->un_f_has_removable_media) { 5230 un->un_sys_blocksize = lbasize; 5231 } 5232 } 5233 5234 if (capacity != 0) { 5235 un->un_blockcount = capacity; 5236 un->un_f_blockcount_is_valid = TRUE; 5237 5238 /* 5239 * The capacity has changed so update the errstats. 5240 */ 5241 if (un->un_errstats != NULL) { 5242 struct sd_errstats *stp; 5243 5244 capacity *= un->un_sys_blocksize; 5245 stp = (struct sd_errstats *)un->un_errstats->ks_data; 5246 if (stp->sd_capacity.value.ui64 < capacity) 5247 stp->sd_capacity.value.ui64 = capacity; 5248 } 5249 } 5250 } 5251 5252 /* 5253 * Parses the SCSI Block Limits VPD page (0xB0). It's legal to pass NULL for 5254 * vpd_pg, in which case all the block limits will be reset to the defaults. 5255 */ 5256 static void 5257 sd_parse_blk_limits_vpd(struct sd_lun *un, uchar_t *vpd_pg) 5258 { 5259 sd_blk_limits_t *lim = &un->un_blk_lim; 5260 unsigned pg_len; 5261 5262 if (vpd_pg != NULL) 5263 pg_len = BE_IN16(&vpd_pg[2]); 5264 else 5265 pg_len = 0; 5266 5267 /* Block Limits VPD can be 16 bytes or 64 bytes long - support both */ 5268 if (pg_len >= 0x10) { 5269 lim->lim_opt_xfer_len_gran = BE_IN16(&vpd_pg[6]); 5270 lim->lim_max_xfer_len = BE_IN32(&vpd_pg[8]); 5271 lim->lim_opt_xfer_len = BE_IN32(&vpd_pg[12]); 5272 5273 /* Zero means not reported, so use "unlimited" */ 5274 if (lim->lim_max_xfer_len == 0) 5275 lim->lim_max_xfer_len = UINT32_MAX; 5276 if (lim->lim_opt_xfer_len == 0) 5277 lim->lim_opt_xfer_len = UINT32_MAX; 5278 } else { 5279 lim->lim_opt_xfer_len_gran = 0; 5280 lim->lim_max_xfer_len = UINT32_MAX; 5281 lim->lim_opt_xfer_len = UINT32_MAX; 5282 } 5283 if (pg_len >= 0x3c) { 5284 lim->lim_max_pfetch_len = BE_IN32(&vpd_pg[16]); 5285 /* 5286 * A zero in either of the following two fields indicates lack 5287 * of UNMAP support. 5288 */ 5289 lim->lim_max_unmap_lba_cnt = BE_IN32(&vpd_pg[20]); 5290 lim->lim_max_unmap_descr_cnt = BE_IN32(&vpd_pg[24]); 5291 lim->lim_opt_unmap_gran = BE_IN32(&vpd_pg[28]); 5292 if ((vpd_pg[32] >> 7) == 1) { 5293 lim->lim_unmap_gran_align = 5294 ((vpd_pg[32] & 0x7f) << 24) | (vpd_pg[33] << 16) | 5295 (vpd_pg[34] << 8) | vpd_pg[35]; 5296 } else { 5297 lim->lim_unmap_gran_align = 0; 5298 } 5299 lim->lim_max_write_same_len = BE_IN64(&vpd_pg[36]); 5300 } else { 5301 lim->lim_max_pfetch_len = UINT32_MAX; 5302 lim->lim_max_unmap_lba_cnt = UINT32_MAX; 5303 lim->lim_max_unmap_descr_cnt = SD_UNMAP_MAX_DESCR; 5304 lim->lim_opt_unmap_gran = 0; 5305 lim->lim_unmap_gran_align = 0; 5306 lim->lim_max_write_same_len = UINT64_MAX; 5307 } 5308 } 5309 5310 /* 5311 * Collects VPD page B0 data if available (block limits). If the data is 5312 * not available or querying the device failed, we revert to the defaults. 5313 */ 5314 static void 5315 sd_setup_blk_limits(sd_ssc_t *ssc) 5316 { 5317 struct sd_lun *un = ssc->ssc_un; 5318 uchar_t *inqB0 = NULL; 5319 size_t inqB0_resid = 0; 5320 int rval; 5321 5322 if (un->un_vpd_page_mask & SD_VPD_BLK_LIMITS_PG) { 5323 inqB0 = kmem_zalloc(MAX_INQUIRY_SIZE, KM_SLEEP); 5324 rval = sd_send_scsi_INQUIRY(ssc, inqB0, MAX_INQUIRY_SIZE, 0x01, 5325 0xB0, &inqB0_resid); 5326 if (rval != 0) { 5327 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5328 kmem_free(inqB0, MAX_INQUIRY_SIZE); 5329 inqB0 = NULL; 5330 } 5331 } 5332 /* passing NULL inqB0 will reset to defaults */ 5333 sd_parse_blk_limits_vpd(ssc->ssc_un, inqB0); 5334 if (inqB0) 5335 kmem_free(inqB0, MAX_INQUIRY_SIZE); 5336 } 5337 5338 /* 5339 * Function: sd_register_devid 5340 * 5341 * Description: This routine will obtain the device id information from the 5342 * target, obtain the serial number, and register the device 5343 * id with the ddi framework. 5344 * 5345 * Arguments: devi - the system's dev_info_t for the device. 5346 * un - driver soft state (unit) structure 5347 * reservation_flag - indicates if a reservation conflict 5348 * occurred during attach 5349 * 5350 * Context: Kernel Thread 5351 */ 5352 static void 5353 sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi, int reservation_flag) 5354 { 5355 int rval = 0; 5356 uchar_t *inq80 = NULL; 5357 size_t inq80_len = MAX_INQUIRY_SIZE; 5358 size_t inq80_resid = 0; 5359 uchar_t *inq83 = NULL; 5360 size_t inq83_len = MAX_INQUIRY_SIZE; 5361 size_t inq83_resid = 0; 5362 int dlen, len; 5363 char *sn; 5364 struct sd_lun *un; 5365 5366 ASSERT(ssc != NULL); 5367 un = ssc->ssc_un; 5368 ASSERT(un != NULL); 5369 ASSERT(mutex_owned(SD_MUTEX(un))); 5370 ASSERT((SD_DEVINFO(un)) == devi); 5371 5372 5373 /* 5374 * We check the availability of the World Wide Name (0x83) and Unit 5375 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using 5376 * un_vpd_page_mask from them, we decide which way to get the WWN. If 5377 * 0x83 is available, that is the best choice. Our next choice is 5378 * 0x80. If neither are available, we munge the devid from the device 5379 * vid/pid/serial # for Sun qualified disks, or use the ddi framework 5380 * to fabricate a devid for non-Sun qualified disks. 5381 */ 5382 if (sd_check_vpd_page_support(ssc) == 0) { 5383 /* collect page 80 data if available */ 5384 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) { 5385 5386 mutex_exit(SD_MUTEX(un)); 5387 inq80 = kmem_zalloc(inq80_len, KM_SLEEP); 5388 5389 rval = sd_send_scsi_INQUIRY(ssc, inq80, inq80_len, 5390 0x01, 0x80, &inq80_resid); 5391 5392 if (rval != 0) { 5393 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5394 kmem_free(inq80, inq80_len); 5395 inq80 = NULL; 5396 inq80_len = 0; 5397 } else if (ddi_prop_exists( 5398 DDI_DEV_T_NONE, SD_DEVINFO(un), 5399 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 5400 INQUIRY_SERIAL_NO) == 0) { 5401 /* 5402 * If we don't already have a serial number 5403 * property, do quick verify of data returned 5404 * and define property. 5405 */ 5406 dlen = inq80_len - inq80_resid; 5407 len = (size_t)inq80[3]; 5408 if ((dlen >= 4) && ((len + 4) <= dlen)) { 5409 /* 5410 * Ensure sn termination, skip leading 5411 * blanks, and create property 5412 * 'inquiry-serial-no'. 5413 */ 5414 sn = (char *)&inq80[4]; 5415 sn[len] = 0; 5416 while (*sn && (*sn == ' ')) 5417 sn++; 5418 if (*sn) { 5419 (void) ddi_prop_update_string( 5420 DDI_DEV_T_NONE, 5421 SD_DEVINFO(un), 5422 INQUIRY_SERIAL_NO, sn); 5423 } 5424 } 5425 } 5426 mutex_enter(SD_MUTEX(un)); 5427 } 5428 5429 /* collect page 83 data if available */ 5430 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) { 5431 mutex_exit(SD_MUTEX(un)); 5432 inq83 = kmem_zalloc(inq83_len, KM_SLEEP); 5433 5434 rval = sd_send_scsi_INQUIRY(ssc, inq83, inq83_len, 5435 0x01, 0x83, &inq83_resid); 5436 5437 if (rval != 0) { 5438 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5439 kmem_free(inq83, inq83_len); 5440 inq83 = NULL; 5441 inq83_len = 0; 5442 } 5443 mutex_enter(SD_MUTEX(un)); 5444 } 5445 } 5446 5447 /* 5448 * If transport has already registered a devid for this target 5449 * then that takes precedence over the driver's determination 5450 * of the devid. 5451 * 5452 * NOTE: The reason this check is done here instead of at the beginning 5453 * of the function is to allow the code above to create the 5454 * 'inquiry-serial-no' property. 5455 */ 5456 if (ddi_devid_get(SD_DEVINFO(un), &un->un_devid) == DDI_SUCCESS) { 5457 ASSERT(un->un_devid); 5458 un->un_f_devid_transport_defined = TRUE; 5459 goto cleanup; /* use devid registered by the transport */ 5460 } 5461 5462 /* 5463 * This is the case of antiquated Sun disk drives that have the 5464 * FAB_DEVID property set in the disk_table. These drives 5465 * manage the devid's by storing them in last 2 available sectors 5466 * on the drive and have them fabricated by the ddi layer by calling 5467 * ddi_devid_init and passing the DEVID_FAB flag. 5468 */ 5469 if (un->un_f_opt_fab_devid == TRUE) { 5470 /* 5471 * Depending on EINVAL isn't reliable, since a reserved disk 5472 * may result in invalid geometry, so check to make sure a 5473 * reservation conflict did not occur during attach. 5474 */ 5475 if ((sd_get_devid(ssc) == EINVAL) && 5476 (reservation_flag != SD_TARGET_IS_RESERVED)) { 5477 /* 5478 * The devid is invalid AND there is no reservation 5479 * conflict. Fabricate a new devid. 5480 */ 5481 (void) sd_create_devid(ssc); 5482 } 5483 5484 /* Register the devid if it exists */ 5485 if (un->un_devid != NULL) { 5486 (void) ddi_devid_register(SD_DEVINFO(un), 5487 un->un_devid); 5488 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5489 "sd_register_devid: Devid Fabricated\n"); 5490 } 5491 goto cleanup; 5492 } 5493 5494 /* encode best devid possible based on data available */ 5495 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, 5496 (char *)ddi_driver_name(SD_DEVINFO(un)), 5497 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)), 5498 inq80, inq80_len - inq80_resid, inq83, inq83_len - 5499 inq83_resid, &un->un_devid) == DDI_SUCCESS) { 5500 5501 /* devid successfully encoded, register devid */ 5502 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); 5503 5504 } else { 5505 /* 5506 * Unable to encode a devid based on data available. 5507 * This is not a Sun qualified disk. Older Sun disk 5508 * drives that have the SD_FAB_DEVID property 5509 * set in the disk_table and non Sun qualified 5510 * disks are treated in the same manner. These 5511 * drives manage the devid's by storing them in 5512 * last 2 available sectors on the drive and 5513 * have them fabricated by the ddi layer by 5514 * calling ddi_devid_init and passing the 5515 * DEVID_FAB flag. 5516 * Create a fabricate devid only if there's no 5517 * fabricate devid existed. 5518 */ 5519 if (sd_get_devid(ssc) == EINVAL) { 5520 (void) sd_create_devid(ssc); 5521 } 5522 un->un_f_opt_fab_devid = TRUE; 5523 5524 /* Register the devid if it exists */ 5525 if (un->un_devid != NULL) { 5526 (void) ddi_devid_register(SD_DEVINFO(un), 5527 un->un_devid); 5528 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5529 "sd_register_devid: devid fabricated using " 5530 "ddi framework\n"); 5531 } 5532 } 5533 5534 cleanup: 5535 /* clean up resources */ 5536 if (inq80 != NULL) { 5537 kmem_free(inq80, inq80_len); 5538 } 5539 if (inq83 != NULL) { 5540 kmem_free(inq83, inq83_len); 5541 } 5542 } 5543 5544 5545 5546 /* 5547 * Function: sd_get_devid 5548 * 5549 * Description: This routine will return 0 if a valid device id has been 5550 * obtained from the target and stored in the soft state. If a 5551 * valid device id has not been previously read and stored, a 5552 * read attempt will be made. 5553 * 5554 * Arguments: un - driver soft state (unit) structure 5555 * 5556 * Return Code: 0 if we successfully get the device id 5557 * 5558 * Context: Kernel Thread 5559 */ 5560 5561 static int 5562 sd_get_devid(sd_ssc_t *ssc) 5563 { 5564 struct dk_devid *dkdevid; 5565 ddi_devid_t tmpid; 5566 uint_t *ip; 5567 size_t sz; 5568 diskaddr_t blk; 5569 int status; 5570 int chksum; 5571 int i; 5572 size_t buffer_size; 5573 struct sd_lun *un; 5574 5575 ASSERT(ssc != NULL); 5576 un = ssc->ssc_un; 5577 ASSERT(un != NULL); 5578 ASSERT(mutex_owned(SD_MUTEX(un))); 5579 5580 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n", 5581 un); 5582 5583 if (un->un_devid != NULL) { 5584 return (0); 5585 } 5586 5587 mutex_exit(SD_MUTEX(un)); 5588 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5589 (void *)SD_PATH_DIRECT) != 0) { 5590 mutex_enter(SD_MUTEX(un)); 5591 return (EINVAL); 5592 } 5593 5594 /* 5595 * Read and verify device id, stored in the reserved cylinders at the 5596 * end of the disk. Backup label is on the odd sectors of the last 5597 * track of the last cylinder. Device id will be on track of the next 5598 * to last cylinder. 5599 */ 5600 mutex_enter(SD_MUTEX(un)); 5601 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid)); 5602 mutex_exit(SD_MUTEX(un)); 5603 dkdevid = kmem_alloc(buffer_size, KM_SLEEP); 5604 status = sd_send_scsi_READ(ssc, dkdevid, buffer_size, blk, 5605 SD_PATH_DIRECT); 5606 5607 if (status != 0) { 5608 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5609 goto error; 5610 } 5611 5612 /* Validate the revision */ 5613 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 5614 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 5615 status = EINVAL; 5616 goto error; 5617 } 5618 5619 /* Calculate the checksum */ 5620 chksum = 0; 5621 ip = (uint_t *)dkdevid; 5622 for (i = 0; i < ((DEV_BSIZE - sizeof (int)) / sizeof (int)); 5623 i++) { 5624 chksum ^= ip[i]; 5625 } 5626 5627 /* Compare the checksums */ 5628 if (DKD_GETCHKSUM(dkdevid) != chksum) { 5629 status = EINVAL; 5630 goto error; 5631 } 5632 5633 /* Validate the device id */ 5634 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 5635 status = EINVAL; 5636 goto error; 5637 } 5638 5639 /* 5640 * Store the device id in the driver soft state 5641 */ 5642 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 5643 tmpid = kmem_alloc(sz, KM_SLEEP); 5644 5645 mutex_enter(SD_MUTEX(un)); 5646 5647 un->un_devid = tmpid; 5648 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 5649 5650 kmem_free(dkdevid, buffer_size); 5651 5652 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un); 5653 5654 return (status); 5655 error: 5656 mutex_enter(SD_MUTEX(un)); 5657 kmem_free(dkdevid, buffer_size); 5658 return (status); 5659 } 5660 5661 5662 /* 5663 * Function: sd_create_devid 5664 * 5665 * Description: This routine will fabricate the device id and write it 5666 * to the disk. 5667 * 5668 * Arguments: un - driver soft state (unit) structure 5669 * 5670 * Return Code: value of the fabricated device id 5671 * 5672 * Context: Kernel Thread 5673 */ 5674 5675 static ddi_devid_t 5676 sd_create_devid(sd_ssc_t *ssc) 5677 { 5678 struct sd_lun *un; 5679 5680 ASSERT(ssc != NULL); 5681 un = ssc->ssc_un; 5682 ASSERT(un != NULL); 5683 5684 /* Fabricate the devid */ 5685 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid) 5686 == DDI_FAILURE) { 5687 return (NULL); 5688 } 5689 5690 /* Write the devid to disk */ 5691 if (sd_write_deviceid(ssc) != 0) { 5692 ddi_devid_free(un->un_devid); 5693 un->un_devid = NULL; 5694 } 5695 5696 return (un->un_devid); 5697 } 5698 5699 5700 /* 5701 * Function: sd_write_deviceid 5702 * 5703 * Description: This routine will write the device id to the disk 5704 * reserved sector. 5705 * 5706 * Arguments: un - driver soft state (unit) structure 5707 * 5708 * Return Code: EINVAL 5709 * value returned by sd_send_scsi_cmd 5710 * 5711 * Context: Kernel Thread 5712 */ 5713 5714 static int 5715 sd_write_deviceid(sd_ssc_t *ssc) 5716 { 5717 struct dk_devid *dkdevid; 5718 uchar_t *buf; 5719 diskaddr_t blk; 5720 uint_t *ip, chksum; 5721 int status; 5722 int i; 5723 struct sd_lun *un; 5724 5725 ASSERT(ssc != NULL); 5726 un = ssc->ssc_un; 5727 ASSERT(un != NULL); 5728 ASSERT(mutex_owned(SD_MUTEX(un))); 5729 5730 mutex_exit(SD_MUTEX(un)); 5731 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5732 (void *)SD_PATH_DIRECT) != 0) { 5733 mutex_enter(SD_MUTEX(un)); 5734 return (-1); 5735 } 5736 5737 5738 /* Allocate the buffer */ 5739 buf = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP); 5740 dkdevid = (struct dk_devid *)buf; 5741 5742 /* Fill in the revision */ 5743 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 5744 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 5745 5746 /* Copy in the device id */ 5747 mutex_enter(SD_MUTEX(un)); 5748 bcopy(un->un_devid, &dkdevid->dkd_devid, 5749 ddi_devid_sizeof(un->un_devid)); 5750 mutex_exit(SD_MUTEX(un)); 5751 5752 /* Calculate the checksum */ 5753 chksum = 0; 5754 ip = (uint_t *)dkdevid; 5755 for (i = 0; i < ((DEV_BSIZE - sizeof (int)) / sizeof (int)); 5756 i++) { 5757 chksum ^= ip[i]; 5758 } 5759 5760 /* Fill-in checksum */ 5761 DKD_FORMCHKSUM(chksum, dkdevid); 5762 5763 /* Write the reserved sector */ 5764 status = sd_send_scsi_WRITE(ssc, buf, un->un_sys_blocksize, blk, 5765 SD_PATH_DIRECT); 5766 if (status != 0) 5767 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5768 5769 kmem_free(buf, un->un_sys_blocksize); 5770 5771 mutex_enter(SD_MUTEX(un)); 5772 return (status); 5773 } 5774 5775 5776 /* 5777 * Function: sd_check_vpd_page_support 5778 * 5779 * Description: This routine sends an inquiry command with the EVPD bit set and 5780 * a page code of 0x00 to the device. It is used to determine which 5781 * vital product pages are available to find the devid. We are 5782 * looking for pages 0x83 0x80 or 0xB1. If we return a negative 1, 5783 * the device does not support that command. 5784 * 5785 * Arguments: un - driver soft state (unit) structure 5786 * 5787 * Return Code: 0 - success 5788 * 1 - check condition 5789 * 5790 * Context: This routine can sleep. 5791 */ 5792 5793 static int 5794 sd_check_vpd_page_support(sd_ssc_t *ssc) 5795 { 5796 uchar_t *page_list = NULL; 5797 uchar_t page_length = 0xff; /* Use max possible length */ 5798 uchar_t evpd = 0x01; /* Set the EVPD bit */ 5799 uchar_t page_code = 0x00; /* Supported VPD Pages */ 5800 int rval = 0; 5801 int counter; 5802 struct sd_lun *un; 5803 5804 ASSERT(ssc != NULL); 5805 un = ssc->ssc_un; 5806 ASSERT(un != NULL); 5807 ASSERT(mutex_owned(SD_MUTEX(un))); 5808 5809 mutex_exit(SD_MUTEX(un)); 5810 5811 /* 5812 * We'll set the page length to the maximum to save figuring it out 5813 * with an additional call. 5814 */ 5815 page_list = kmem_zalloc(page_length, KM_SLEEP); 5816 5817 rval = sd_send_scsi_INQUIRY(ssc, page_list, page_length, evpd, 5818 page_code, NULL); 5819 5820 if (rval != 0) 5821 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5822 5823 mutex_enter(SD_MUTEX(un)); 5824 5825 /* 5826 * Now we must validate that the device accepted the command, as some 5827 * drives do not support it. If the drive does support it, we will 5828 * return 0, and the supported pages will be in un_vpd_page_mask. If 5829 * not, we return -1. 5830 */ 5831 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) { 5832 /* Loop to find one of the 2 pages we need */ 5833 counter = 4; /* Supported pages start at byte 4, with 0x00 */ 5834 5835 /* 5836 * Pages are returned in ascending order, and 0x83 is what we 5837 * are hoping for. 5838 */ 5839 while ((page_list[counter] <= 0xB1) && 5840 (counter <= (page_list[VPD_PAGE_LENGTH] + 5841 VPD_HEAD_OFFSET))) { 5842 /* 5843 * Add 3 because page_list[3] is the number of 5844 * pages minus 3 5845 */ 5846 5847 switch (page_list[counter]) { 5848 case 0x00: 5849 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG; 5850 break; 5851 case 0x80: 5852 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG; 5853 break; 5854 case 0x81: 5855 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG; 5856 break; 5857 case 0x82: 5858 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG; 5859 break; 5860 case 0x83: 5861 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG; 5862 break; 5863 case 0x86: 5864 un->un_vpd_page_mask |= SD_VPD_EXTENDED_DATA_PG; 5865 break; 5866 case 0xB0: 5867 un->un_vpd_page_mask |= SD_VPD_BLK_LIMITS_PG; 5868 break; 5869 case 0xB1: 5870 un->un_vpd_page_mask |= SD_VPD_DEV_CHARACTER_PG; 5871 break; 5872 } 5873 counter++; 5874 } 5875 5876 } else { 5877 rval = -1; 5878 5879 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5880 "sd_check_vpd_page_support: This drive does not implement " 5881 "VPD pages.\n"); 5882 } 5883 5884 kmem_free(page_list, page_length); 5885 5886 return (rval); 5887 } 5888 5889 5890 /* 5891 * Function: sd_setup_pm 5892 * 5893 * Description: Initialize Power Management on the device 5894 * 5895 * Context: Kernel Thread 5896 */ 5897 5898 static void 5899 sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi) 5900 { 5901 uint_t log_page_size; 5902 uchar_t *log_page_data; 5903 int rval = 0; 5904 struct sd_lun *un; 5905 5906 ASSERT(ssc != NULL); 5907 un = ssc->ssc_un; 5908 ASSERT(un != NULL); 5909 5910 /* 5911 * Since we are called from attach, holding a mutex for 5912 * un is unnecessary. Because some of the routines called 5913 * from here require SD_MUTEX to not be held, assert this 5914 * right up front. 5915 */ 5916 ASSERT(!mutex_owned(SD_MUTEX(un))); 5917 /* 5918 * Since the sd device does not have the 'reg' property, 5919 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 5920 * The following code is to tell cpr that this device 5921 * DOES need to be suspended and resumed. 5922 */ 5923 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 5924 "pm-hardware-state", "needs-suspend-resume"); 5925 5926 /* 5927 * This complies with the new power management framework 5928 * for certain desktop machines. Create the pm_components 5929 * property as a string array property. 5930 * If un_f_pm_supported is TRUE, that means the disk 5931 * attached HBA has set the "pm-capable" property and 5932 * the value of this property is bigger than 0. 5933 */ 5934 if (un->un_f_pm_supported) { 5935 /* 5936 * not all devices have a motor, try it first. 5937 * some devices may return ILLEGAL REQUEST, some 5938 * will hang 5939 * The following START_STOP_UNIT is used to check if target 5940 * device has a motor. 5941 */ 5942 un->un_f_start_stop_supported = TRUE; 5943 5944 if (un->un_f_power_condition_supported) { 5945 rval = sd_send_scsi_START_STOP_UNIT(ssc, 5946 SD_POWER_CONDITION, SD_TARGET_ACTIVE, 5947 SD_PATH_DIRECT); 5948 if (rval != 0) { 5949 un->un_f_power_condition_supported = FALSE; 5950 } 5951 } 5952 if (!un->un_f_power_condition_supported) { 5953 rval = sd_send_scsi_START_STOP_UNIT(ssc, 5954 SD_START_STOP, SD_TARGET_START, SD_PATH_DIRECT); 5955 } 5956 if (rval != 0) { 5957 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5958 un->un_f_start_stop_supported = FALSE; 5959 } 5960 5961 /* 5962 * create pm properties anyways otherwise the parent can't 5963 * go to sleep 5964 */ 5965 un->un_f_pm_is_enabled = TRUE; 5966 (void) sd_create_pm_components(devi, un); 5967 5968 /* 5969 * If it claims that log sense is supported, check it out. 5970 */ 5971 if (un->un_f_log_sense_supported) { 5972 rval = sd_log_page_supported(ssc, 5973 START_STOP_CYCLE_PAGE); 5974 if (rval == 1) { 5975 /* Page found, use it. */ 5976 un->un_start_stop_cycle_page = 5977 START_STOP_CYCLE_PAGE; 5978 } else { 5979 /* 5980 * Page not found or log sense is not 5981 * supported. 5982 * Notice we do not check the old style 5983 * START_STOP_CYCLE_VU_PAGE because this 5984 * code path does not apply to old disks. 5985 */ 5986 un->un_f_log_sense_supported = FALSE; 5987 un->un_f_pm_log_sense_smart = FALSE; 5988 } 5989 } 5990 5991 return; 5992 } 5993 5994 /* 5995 * For the disk whose attached HBA has not set the "pm-capable" 5996 * property, check if it supports the power management. 5997 */ 5998 if (!un->un_f_log_sense_supported) { 5999 un->un_power_level = SD_SPINDLE_ON; 6000 un->un_f_pm_is_enabled = FALSE; 6001 return; 6002 } 6003 6004 rval = sd_log_page_supported(ssc, START_STOP_CYCLE_PAGE); 6005 6006 #ifdef SDDEBUG 6007 if (sd_force_pm_supported) { 6008 /* Force a successful result */ 6009 rval = 1; 6010 } 6011 #endif 6012 6013 /* 6014 * If the start-stop cycle counter log page is not supported 6015 * or if the pm-capable property is set to be false (0), 6016 * then we should not create the pm_components property. 6017 */ 6018 if (rval == -1) { 6019 /* 6020 * Error. 6021 * Reading log sense failed, most likely this is 6022 * an older drive that does not support log sense. 6023 * If this fails auto-pm is not supported. 6024 */ 6025 un->un_power_level = SD_SPINDLE_ON; 6026 un->un_f_pm_is_enabled = FALSE; 6027 6028 } else if (rval == 0) { 6029 /* 6030 * Page not found. 6031 * The start stop cycle counter is implemented as page 6032 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For 6033 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE). 6034 */ 6035 if (sd_log_page_supported(ssc, START_STOP_CYCLE_VU_PAGE) == 1) { 6036 /* 6037 * Page found, use this one. 6038 */ 6039 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE; 6040 un->un_f_pm_is_enabled = TRUE; 6041 } else { 6042 /* 6043 * Error or page not found. 6044 * auto-pm is not supported for this device. 6045 */ 6046 un->un_power_level = SD_SPINDLE_ON; 6047 un->un_f_pm_is_enabled = FALSE; 6048 } 6049 } else { 6050 /* 6051 * Page found, use it. 6052 */ 6053 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE; 6054 un->un_f_pm_is_enabled = TRUE; 6055 } 6056 6057 6058 if (un->un_f_pm_is_enabled == TRUE) { 6059 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 6060 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 6061 6062 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 6063 log_page_size, un->un_start_stop_cycle_page, 6064 0x01, 0, SD_PATH_DIRECT); 6065 6066 if (rval != 0) { 6067 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6068 } 6069 6070 #ifdef SDDEBUG 6071 if (sd_force_pm_supported) { 6072 /* Force a successful result */ 6073 rval = 0; 6074 } 6075 #endif 6076 6077 /* 6078 * If the Log sense for Page( Start/stop cycle counter page) 6079 * succeeds, then power management is supported and we can 6080 * enable auto-pm. 6081 */ 6082 if (rval == 0) { 6083 (void) sd_create_pm_components(devi, un); 6084 } else { 6085 un->un_power_level = SD_SPINDLE_ON; 6086 un->un_f_pm_is_enabled = FALSE; 6087 } 6088 6089 kmem_free(log_page_data, log_page_size); 6090 } 6091 } 6092 6093 6094 /* 6095 * Function: sd_create_pm_components 6096 * 6097 * Description: Initialize PM property. 6098 * 6099 * Context: Kernel thread context 6100 */ 6101 6102 static void 6103 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un) 6104 { 6105 ASSERT(!mutex_owned(SD_MUTEX(un))); 6106 6107 if (un->un_f_power_condition_supported) { 6108 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 6109 "pm-components", sd_pwr_pc.pm_comp, 5) 6110 != DDI_PROP_SUCCESS) { 6111 un->un_power_level = SD_SPINDLE_ACTIVE; 6112 un->un_f_pm_is_enabled = FALSE; 6113 return; 6114 } 6115 } else { 6116 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 6117 "pm-components", sd_pwr_ss.pm_comp, 3) 6118 != DDI_PROP_SUCCESS) { 6119 un->un_power_level = SD_SPINDLE_ON; 6120 un->un_f_pm_is_enabled = FALSE; 6121 return; 6122 } 6123 } 6124 /* 6125 * When components are initially created they are idle, 6126 * power up any non-removables. 6127 * Note: the return value of pm_raise_power can't be used 6128 * for determining if PM should be enabled for this device. 6129 * Even if you check the return values and remove this 6130 * property created above, the PM framework will not honor the 6131 * change after the first call to pm_raise_power. Hence, 6132 * removal of that property does not help if pm_raise_power 6133 * fails. In the case of removable media, the start/stop 6134 * will fail if the media is not present. 6135 */ 6136 if (un->un_f_attach_spinup && (pm_raise_power(SD_DEVINFO(un), 0, 6137 SD_PM_STATE_ACTIVE(un)) == DDI_SUCCESS)) { 6138 mutex_enter(SD_MUTEX(un)); 6139 un->un_power_level = SD_PM_STATE_ACTIVE(un); 6140 mutex_enter(&un->un_pm_mutex); 6141 /* Set to on and not busy. */ 6142 un->un_pm_count = 0; 6143 } else { 6144 mutex_enter(SD_MUTEX(un)); 6145 un->un_power_level = SD_PM_STATE_STOPPED(un); 6146 mutex_enter(&un->un_pm_mutex); 6147 /* Set to off. */ 6148 un->un_pm_count = -1; 6149 } 6150 mutex_exit(&un->un_pm_mutex); 6151 mutex_exit(SD_MUTEX(un)); 6152 } 6153 6154 6155 /* 6156 * Function: sd_ddi_suspend 6157 * 6158 * Description: Performs system power-down operations. This includes 6159 * setting the drive state to indicate its suspended so 6160 * that no new commands will be accepted. Also, wait for 6161 * all commands that are in transport or queued to a timer 6162 * for retry to complete. All timeout threads are cancelled. 6163 * 6164 * Return Code: DDI_FAILURE or DDI_SUCCESS 6165 * 6166 * Context: Kernel thread context 6167 */ 6168 6169 static int 6170 sd_ddi_suspend(dev_info_t *devi) 6171 { 6172 struct sd_lun *un; 6173 clock_t wait_cmds_complete; 6174 6175 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 6176 if (un == NULL) { 6177 return (DDI_FAILURE); 6178 } 6179 6180 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n"); 6181 6182 mutex_enter(SD_MUTEX(un)); 6183 6184 /* Return success if the device is already suspended. */ 6185 if (un->un_state == SD_STATE_SUSPENDED) { 6186 mutex_exit(SD_MUTEX(un)); 6187 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 6188 "device already suspended, exiting\n"); 6189 return (DDI_SUCCESS); 6190 } 6191 6192 /* Return failure if the device is being used by HA */ 6193 if (un->un_resvd_status & 6194 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) { 6195 mutex_exit(SD_MUTEX(un)); 6196 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 6197 "device in use by HA, exiting\n"); 6198 return (DDI_FAILURE); 6199 } 6200 6201 /* 6202 * Return failure if the device is in a resource wait 6203 * or power changing state. 6204 */ 6205 if ((un->un_state == SD_STATE_RWAIT) || 6206 (un->un_state == SD_STATE_PM_CHANGING)) { 6207 mutex_exit(SD_MUTEX(un)); 6208 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 6209 "device in resource wait state, exiting\n"); 6210 return (DDI_FAILURE); 6211 } 6212 6213 6214 un->un_save_state = un->un_last_state; 6215 New_state(un, SD_STATE_SUSPENDED); 6216 6217 /* 6218 * Wait for all commands that are in transport or queued to a timer 6219 * for retry to complete. 6220 * 6221 * While waiting, no new commands will be accepted or sent because of 6222 * the new state we set above. 6223 * 6224 * Wait till current operation has completed. If we are in the resource 6225 * wait state (with an intr outstanding) then we need to wait till the 6226 * intr completes and starts the next cmd. We want to wait for 6227 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND. 6228 */ 6229 wait_cmds_complete = ddi_get_lbolt() + 6230 (sd_wait_cmds_complete * drv_usectohz(1000000)); 6231 6232 while (un->un_ncmds_in_transport != 0) { 6233 /* 6234 * Fail if commands do not finish in the specified time. 6235 */ 6236 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un), 6237 wait_cmds_complete) == -1) { 6238 /* 6239 * Undo the state changes made above. Everything 6240 * must go back to it's original value. 6241 */ 6242 Restore_state(un); 6243 un->un_last_state = un->un_save_state; 6244 /* Wake up any threads that might be waiting. */ 6245 cv_broadcast(&un->un_suspend_cv); 6246 mutex_exit(SD_MUTEX(un)); 6247 SD_ERROR(SD_LOG_IO_PM, un, 6248 "sd_ddi_suspend: failed due to outstanding cmds\n"); 6249 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n"); 6250 return (DDI_FAILURE); 6251 } 6252 } 6253 6254 /* 6255 * Cancel SCSI watch thread and timeouts, if any are active 6256 */ 6257 6258 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) { 6259 opaque_t temp_token = un->un_swr_token; 6260 mutex_exit(SD_MUTEX(un)); 6261 scsi_watch_suspend(temp_token); 6262 mutex_enter(SD_MUTEX(un)); 6263 } 6264 6265 if (un->un_reset_throttle_timeid != NULL) { 6266 timeout_id_t temp_id = un->un_reset_throttle_timeid; 6267 un->un_reset_throttle_timeid = NULL; 6268 mutex_exit(SD_MUTEX(un)); 6269 (void) untimeout(temp_id); 6270 mutex_enter(SD_MUTEX(un)); 6271 } 6272 6273 if (un->un_dcvb_timeid != NULL) { 6274 timeout_id_t temp_id = un->un_dcvb_timeid; 6275 un->un_dcvb_timeid = NULL; 6276 mutex_exit(SD_MUTEX(un)); 6277 (void) untimeout(temp_id); 6278 mutex_enter(SD_MUTEX(un)); 6279 } 6280 6281 mutex_enter(&un->un_pm_mutex); 6282 if (un->un_pm_timeid != NULL) { 6283 timeout_id_t temp_id = un->un_pm_timeid; 6284 un->un_pm_timeid = NULL; 6285 mutex_exit(&un->un_pm_mutex); 6286 mutex_exit(SD_MUTEX(un)); 6287 (void) untimeout(temp_id); 6288 mutex_enter(SD_MUTEX(un)); 6289 } else { 6290 mutex_exit(&un->un_pm_mutex); 6291 } 6292 6293 if (un->un_rmw_msg_timeid != NULL) { 6294 timeout_id_t temp_id = un->un_rmw_msg_timeid; 6295 un->un_rmw_msg_timeid = NULL; 6296 mutex_exit(SD_MUTEX(un)); 6297 (void) untimeout(temp_id); 6298 mutex_enter(SD_MUTEX(un)); 6299 } 6300 6301 if (un->un_retry_timeid != NULL) { 6302 timeout_id_t temp_id = un->un_retry_timeid; 6303 un->un_retry_timeid = NULL; 6304 mutex_exit(SD_MUTEX(un)); 6305 (void) untimeout(temp_id); 6306 mutex_enter(SD_MUTEX(un)); 6307 6308 if (un->un_retry_bp != NULL) { 6309 un->un_retry_bp->av_forw = un->un_waitq_headp; 6310 un->un_waitq_headp = un->un_retry_bp; 6311 if (un->un_waitq_tailp == NULL) { 6312 un->un_waitq_tailp = un->un_retry_bp; 6313 } 6314 un->un_retry_bp = NULL; 6315 un->un_retry_statp = NULL; 6316 } 6317 } 6318 6319 if (un->un_direct_priority_timeid != NULL) { 6320 timeout_id_t temp_id = un->un_direct_priority_timeid; 6321 un->un_direct_priority_timeid = NULL; 6322 mutex_exit(SD_MUTEX(un)); 6323 (void) untimeout(temp_id); 6324 mutex_enter(SD_MUTEX(un)); 6325 } 6326 6327 if (un->un_f_is_fibre == TRUE) { 6328 /* 6329 * Remove callbacks for insert and remove events 6330 */ 6331 if (un->un_insert_event != NULL) { 6332 mutex_exit(SD_MUTEX(un)); 6333 (void) ddi_remove_event_handler(un->un_insert_cb_id); 6334 mutex_enter(SD_MUTEX(un)); 6335 un->un_insert_event = NULL; 6336 } 6337 6338 if (un->un_remove_event != NULL) { 6339 mutex_exit(SD_MUTEX(un)); 6340 (void) ddi_remove_event_handler(un->un_remove_cb_id); 6341 mutex_enter(SD_MUTEX(un)); 6342 un->un_remove_event = NULL; 6343 } 6344 } 6345 6346 mutex_exit(SD_MUTEX(un)); 6347 6348 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n"); 6349 6350 return (DDI_SUCCESS); 6351 } 6352 6353 6354 /* 6355 * Function: sd_ddi_resume 6356 * 6357 * Description: Performs system power-up operations.. 6358 * 6359 * Return Code: DDI_SUCCESS 6360 * DDI_FAILURE 6361 * 6362 * Context: Kernel thread context 6363 */ 6364 6365 static int 6366 sd_ddi_resume(dev_info_t *devi) 6367 { 6368 struct sd_lun *un; 6369 6370 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 6371 if (un == NULL) { 6372 return (DDI_FAILURE); 6373 } 6374 6375 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n"); 6376 6377 mutex_enter(SD_MUTEX(un)); 6378 Restore_state(un); 6379 6380 /* 6381 * Restore the state which was saved to give the 6382 * the right state in un_last_state 6383 */ 6384 un->un_last_state = un->un_save_state; 6385 /* 6386 * Note: throttle comes back at full. 6387 * Also note: this MUST be done before calling pm_raise_power 6388 * otherwise the system can get hung in biowait. The scenario where 6389 * this'll happen is under cpr suspend. Writing of the system 6390 * state goes through sddump, which writes 0 to un_throttle. If 6391 * writing the system state then fails, example if the partition is 6392 * too small, then cpr attempts a resume. If throttle isn't restored 6393 * from the saved value until after calling pm_raise_power then 6394 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs 6395 * in biowait. 6396 */ 6397 un->un_throttle = un->un_saved_throttle; 6398 6399 /* 6400 * The chance of failure is very rare as the only command done in power 6401 * entry point is START command when you transition from 0->1 or 6402 * unknown->1. Put it to SPINDLE ON state irrespective of the state at 6403 * which suspend was done. Ignore the return value as the resume should 6404 * not be failed. In the case of removable media the media need not be 6405 * inserted and hence there is a chance that raise power will fail with 6406 * media not present. 6407 */ 6408 if (un->un_f_attach_spinup) { 6409 mutex_exit(SD_MUTEX(un)); 6410 (void) pm_raise_power(SD_DEVINFO(un), 0, 6411 SD_PM_STATE_ACTIVE(un)); 6412 mutex_enter(SD_MUTEX(un)); 6413 } 6414 6415 /* 6416 * Don't broadcast to the suspend cv and therefore possibly 6417 * start I/O until after power has been restored. 6418 */ 6419 cv_broadcast(&un->un_suspend_cv); 6420 cv_broadcast(&un->un_state_cv); 6421 6422 /* restart thread */ 6423 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) { 6424 scsi_watch_resume(un->un_swr_token); 6425 } 6426 6427 #if (defined(__fibre)) 6428 if (un->un_f_is_fibre == TRUE) { 6429 /* 6430 * Add callbacks for insert and remove events 6431 */ 6432 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 6433 sd_init_event_callbacks(un); 6434 } 6435 } 6436 #endif 6437 6438 /* 6439 * Transport any pending commands to the target. 6440 * 6441 * If this is a low-activity device commands in queue will have to wait 6442 * until new commands come in, which may take awhile. Also, we 6443 * specifically don't check un_ncmds_in_transport because we know that 6444 * there really are no commands in progress after the unit was 6445 * suspended and we could have reached the throttle level, been 6446 * suspended, and have no new commands coming in for awhile. Highly 6447 * unlikely, but so is the low-activity disk scenario. 6448 */ 6449 ddi_xbuf_dispatch(un->un_xbuf_attr); 6450 6451 sd_start_cmds(un, NULL); 6452 mutex_exit(SD_MUTEX(un)); 6453 6454 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n"); 6455 6456 return (DDI_SUCCESS); 6457 } 6458 6459 6460 /* 6461 * Function: sd_pm_state_change 6462 * 6463 * Description: Change the driver power state. 6464 * Someone else is required to actually change the driver 6465 * power level. 6466 * 6467 * Arguments: un - driver soft state (unit) structure 6468 * level - the power level that is changed to 6469 * flag - to decide how to change the power state 6470 * 6471 * Return Code: DDI_SUCCESS 6472 * 6473 * Context: Kernel thread context 6474 */ 6475 static int 6476 sd_pm_state_change(struct sd_lun *un, int level, int flag) 6477 { 6478 ASSERT(un != NULL); 6479 SD_TRACE(SD_LOG_POWER, un, "sd_pm_state_change: entry\n"); 6480 6481 ASSERT(!mutex_owned(SD_MUTEX(un))); 6482 mutex_enter(SD_MUTEX(un)); 6483 6484 if (flag == SD_PM_STATE_ROLLBACK || SD_PM_IS_IO_CAPABLE(un, level)) { 6485 un->un_power_level = level; 6486 ASSERT(!mutex_owned(&un->un_pm_mutex)); 6487 mutex_enter(&un->un_pm_mutex); 6488 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 6489 un->un_pm_count++; 6490 ASSERT(un->un_pm_count == 0); 6491 } 6492 mutex_exit(&un->un_pm_mutex); 6493 } else { 6494 /* 6495 * Exit if power management is not enabled for this device, 6496 * or if the device is being used by HA. 6497 */ 6498 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status & 6499 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) { 6500 mutex_exit(SD_MUTEX(un)); 6501 SD_TRACE(SD_LOG_POWER, un, 6502 "sd_pm_state_change: exiting\n"); 6503 return (DDI_FAILURE); 6504 } 6505 6506 SD_INFO(SD_LOG_POWER, un, "sd_pm_state_change: " 6507 "un_ncmds_in_driver=%ld\n", un->un_ncmds_in_driver); 6508 6509 /* 6510 * See if the device is not busy, ie.: 6511 * - we have no commands in the driver for this device 6512 * - not waiting for resources 6513 */ 6514 if ((un->un_ncmds_in_driver == 0) && 6515 (un->un_state != SD_STATE_RWAIT)) { 6516 /* 6517 * The device is not busy, so it is OK to go to low 6518 * power state. Indicate low power, but rely on someone 6519 * else to actually change it. 6520 */ 6521 mutex_enter(&un->un_pm_mutex); 6522 un->un_pm_count = -1; 6523 mutex_exit(&un->un_pm_mutex); 6524 un->un_power_level = level; 6525 } 6526 } 6527 6528 mutex_exit(SD_MUTEX(un)); 6529 6530 SD_TRACE(SD_LOG_POWER, un, "sd_pm_state_change: exit\n"); 6531 6532 return (DDI_SUCCESS); 6533 } 6534 6535 6536 /* 6537 * Function: sd_pm_idletimeout_handler 6538 * 6539 * Description: A timer routine that's active only while a device is busy. 6540 * The purpose is to extend slightly the pm framework's busy 6541 * view of the device to prevent busy/idle thrashing for 6542 * back-to-back commands. Do this by comparing the current time 6543 * to the time at which the last command completed and when the 6544 * difference is greater than sd_pm_idletime, call 6545 * pm_idle_component. In addition to indicating idle to the pm 6546 * framework, update the chain type to again use the internal pm 6547 * layers of the driver. 6548 * 6549 * Arguments: arg - driver soft state (unit) structure 6550 * 6551 * Context: Executes in a timeout(9F) thread context 6552 */ 6553 6554 static void 6555 sd_pm_idletimeout_handler(void *arg) 6556 { 6557 const hrtime_t idletime = sd_pm_idletime * NANOSEC; 6558 struct sd_lun *un = arg; 6559 6560 /* 6561 * Grab both mutexes, in the proper order, since we're accessing 6562 * both PM and softstate variables. 6563 */ 6564 mutex_enter(SD_MUTEX(un)); 6565 mutex_enter(&un->un_pm_mutex); 6566 /* if timeout id is NULL, we are being canceled via untimeout */ 6567 if (un->un_pm_idle_timeid == NULL) { 6568 mutex_exit(&un->un_pm_mutex); 6569 mutex_exit(SD_MUTEX(un)); 6570 return; 6571 } 6572 if (((gethrtime() - un->un_pm_idle_time) > idletime) && 6573 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) { 6574 /* 6575 * Update the chain types. 6576 * This takes affect on the next new command received. 6577 */ 6578 if (un->un_f_non_devbsize_supported) { 6579 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 6580 } else { 6581 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 6582 } 6583 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 6584 6585 SD_TRACE(SD_LOG_IO_PM, un, 6586 "sd_pm_idletimeout_handler: idling device\n"); 6587 (void) pm_idle_component(SD_DEVINFO(un), 0); 6588 un->un_pm_idle_timeid = NULL; 6589 } else { 6590 un->un_pm_idle_timeid = 6591 timeout(sd_pm_idletimeout_handler, un, 6592 (drv_usectohz((clock_t)300000))); /* 300 ms. */ 6593 } 6594 mutex_exit(&un->un_pm_mutex); 6595 mutex_exit(SD_MUTEX(un)); 6596 } 6597 6598 6599 /* 6600 * Function: sd_pm_timeout_handler 6601 * 6602 * Description: Callback to tell framework we are idle. 6603 * 6604 * Context: timeout(9f) thread context. 6605 */ 6606 6607 static void 6608 sd_pm_timeout_handler(void *arg) 6609 { 6610 struct sd_lun *un = arg; 6611 6612 (void) pm_idle_component(SD_DEVINFO(un), 0); 6613 mutex_enter(&un->un_pm_mutex); 6614 un->un_pm_timeid = NULL; 6615 mutex_exit(&un->un_pm_mutex); 6616 } 6617 6618 6619 /* 6620 * Function: sdpower 6621 * 6622 * Description: PM entry point. 6623 * 6624 * Return Code: DDI_SUCCESS 6625 * DDI_FAILURE 6626 * 6627 * Context: Kernel thread context 6628 */ 6629 6630 static int 6631 sdpower(dev_info_t *devi, int component, int level) 6632 { 6633 struct sd_lun *un; 6634 int instance; 6635 int rval = DDI_SUCCESS; 6636 uint_t i, log_page_size, maxcycles, ncycles; 6637 uchar_t *log_page_data; 6638 int log_sense_page; 6639 int medium_present; 6640 time_t intvlp; 6641 struct pm_trans_data sd_pm_tran_data; 6642 uchar_t save_state = SD_STATE_NORMAL; 6643 int sval; 6644 uchar_t state_before_pm; 6645 sd_ssc_t *ssc; 6646 int last_power_level = SD_SPINDLE_UNINIT; 6647 6648 instance = ddi_get_instance(devi); 6649 6650 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 6651 !SD_PM_IS_LEVEL_VALID(un, level) || component != 0) { 6652 return (DDI_FAILURE); 6653 } 6654 6655 ssc = sd_ssc_init(un); 6656 6657 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level); 6658 6659 mutex_enter(SD_MUTEX(un)); 6660 6661 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n", 6662 un->un_ncmds_in_driver); 6663 6664 /* 6665 * If un_ncmds_in_driver is non-zero it indicates commands are 6666 * already being processed in the driver. 6667 * At the same time somebody is requesting to go to a lower power 6668 * that can't perform I/O, which can't happen, therefore we need to 6669 * return failure. 6670 */ 6671 if ((!SD_PM_IS_IO_CAPABLE(un, level)) && 6672 (un->un_ncmds_in_driver != 0)) { 6673 mutex_exit(SD_MUTEX(un)); 6674 6675 SD_TRACE(SD_LOG_IO_PM, un, 6676 "sdpower: exit, device has queued cmds.\n"); 6677 6678 goto sdpower_failed; 6679 } 6680 6681 /* 6682 * if it is OFFLINE that means the disk is completely dead 6683 * in our case we have to put the disk in on or off by sending commands 6684 * Of course that will fail anyway so return back here. 6685 * 6686 * Power changes to a device that's OFFLINE or SUSPENDED 6687 * are not allowed. 6688 */ 6689 if ((un->un_state == SD_STATE_OFFLINE) || 6690 (un->un_state == SD_STATE_SUSPENDED)) { 6691 mutex_exit(SD_MUTEX(un)); 6692 6693 SD_TRACE(SD_LOG_IO_PM, un, 6694 "sdpower: exit, device is off-line.\n"); 6695 6696 goto sdpower_failed; 6697 } 6698 6699 /* 6700 * Change the device's state to indicate it's power level 6701 * is being changed. Do this to prevent a power off in the 6702 * middle of commands, which is especially bad on devices 6703 * that are really powered off instead of just spun down. 6704 */ 6705 state_before_pm = un->un_state; 6706 un->un_state = SD_STATE_PM_CHANGING; 6707 6708 mutex_exit(SD_MUTEX(un)); 6709 6710 /* 6711 * If log sense command is not supported, bypass the 6712 * following checking, otherwise, check the log sense 6713 * information for this device. 6714 */ 6715 if (SD_PM_STOP_MOTOR_NEEDED(un, level) && 6716 un->un_f_log_sense_supported) { 6717 /* 6718 * Get the log sense information to understand whether the 6719 * the powercycle counts have gone beyond the threshhold. 6720 */ 6721 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 6722 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 6723 6724 mutex_enter(SD_MUTEX(un)); 6725 log_sense_page = un->un_start_stop_cycle_page; 6726 mutex_exit(SD_MUTEX(un)); 6727 6728 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 6729 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT); 6730 6731 if (rval != 0) { 6732 if (rval == EIO) 6733 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 6734 else 6735 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6736 } 6737 6738 #ifdef SDDEBUG 6739 if (sd_force_pm_supported) { 6740 /* Force a successful result */ 6741 rval = 0; 6742 } 6743 #endif 6744 if (rval != 0) { 6745 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 6746 "Log Sense Failed\n"); 6747 6748 kmem_free(log_page_data, log_page_size); 6749 /* Cannot support power management on those drives */ 6750 6751 /* 6752 * On exit put the state back to it's original value 6753 * and broadcast to anyone waiting for the power 6754 * change completion. 6755 */ 6756 mutex_enter(SD_MUTEX(un)); 6757 un->un_state = state_before_pm; 6758 cv_broadcast(&un->un_suspend_cv); 6759 mutex_exit(SD_MUTEX(un)); 6760 SD_TRACE(SD_LOG_IO_PM, un, 6761 "sdpower: exit, Log Sense Failed.\n"); 6762 6763 goto sdpower_failed; 6764 } 6765 6766 /* 6767 * From the page data - Convert the essential information to 6768 * pm_trans_data 6769 */ 6770 maxcycles = 6771 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) | 6772 (log_page_data[0x1E] << 8) | log_page_data[0x1F]; 6773 6774 ncycles = 6775 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) | 6776 (log_page_data[0x26] << 8) | log_page_data[0x27]; 6777 6778 if (un->un_f_pm_log_sense_smart) { 6779 sd_pm_tran_data.un.smart_count.allowed = maxcycles; 6780 sd_pm_tran_data.un.smart_count.consumed = ncycles; 6781 sd_pm_tran_data.un.smart_count.flag = 0; 6782 sd_pm_tran_data.format = DC_SMART_FORMAT; 6783 } else { 6784 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles; 6785 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles; 6786 for (i = 0; i < DC_SCSI_MFR_LEN; i++) { 6787 sd_pm_tran_data.un.scsi_cycles.svc_date[i] = 6788 log_page_data[8+i]; 6789 } 6790 sd_pm_tran_data.un.scsi_cycles.flag = 0; 6791 sd_pm_tran_data.format = DC_SCSI_FORMAT; 6792 } 6793 6794 kmem_free(log_page_data, log_page_size); 6795 6796 /* 6797 * Call pm_trans_check routine to get the Ok from 6798 * the global policy 6799 */ 6800 rval = pm_trans_check(&sd_pm_tran_data, &intvlp); 6801 #ifdef SDDEBUG 6802 if (sd_force_pm_supported) { 6803 /* Force a successful result */ 6804 rval = 1; 6805 } 6806 #endif 6807 switch (rval) { 6808 case 0: 6809 /* 6810 * Not Ok to Power cycle or error in parameters passed 6811 * Would have given the advised time to consider power 6812 * cycle. Based on the new intvlp parameter we are 6813 * supposed to pretend we are busy so that pm framework 6814 * will never call our power entry point. Because of 6815 * that install a timeout handler and wait for the 6816 * recommended time to elapse so that power management 6817 * can be effective again. 6818 * 6819 * To effect this behavior, call pm_busy_component to 6820 * indicate to the framework this device is busy. 6821 * By not adjusting un_pm_count the rest of PM in 6822 * the driver will function normally, and independent 6823 * of this but because the framework is told the device 6824 * is busy it won't attempt powering down until it gets 6825 * a matching idle. The timeout handler sends this. 6826 * Note: sd_pm_entry can't be called here to do this 6827 * because sdpower may have been called as a result 6828 * of a call to pm_raise_power from within sd_pm_entry. 6829 * 6830 * If a timeout handler is already active then 6831 * don't install another. 6832 */ 6833 mutex_enter(&un->un_pm_mutex); 6834 if (un->un_pm_timeid == NULL) { 6835 un->un_pm_timeid = 6836 timeout(sd_pm_timeout_handler, 6837 un, intvlp * drv_usectohz(1000000)); 6838 mutex_exit(&un->un_pm_mutex); 6839 (void) pm_busy_component(SD_DEVINFO(un), 0); 6840 } else { 6841 mutex_exit(&un->un_pm_mutex); 6842 } 6843 /* 6844 * On exit put the state back to its original value 6845 * and broadcast to anyone waiting for the power 6846 * change completion. 6847 */ 6848 mutex_enter(SD_MUTEX(un)); 6849 un->un_state = state_before_pm; 6850 cv_broadcast(&un->un_suspend_cv); 6851 mutex_exit(SD_MUTEX(un)); 6852 6853 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, " 6854 "trans check Failed, not ok to power cycle.\n"); 6855 6856 goto sdpower_failed; 6857 case -1: 6858 /* 6859 * On exit put the state back to its original value 6860 * and broadcast to anyone waiting for the power 6861 * change completion. 6862 */ 6863 mutex_enter(SD_MUTEX(un)); 6864 un->un_state = state_before_pm; 6865 cv_broadcast(&un->un_suspend_cv); 6866 mutex_exit(SD_MUTEX(un)); 6867 SD_TRACE(SD_LOG_IO_PM, un, 6868 "sdpower: exit, trans check command Failed.\n"); 6869 6870 goto sdpower_failed; 6871 } 6872 } 6873 6874 if (!SD_PM_IS_IO_CAPABLE(un, level)) { 6875 /* 6876 * Save the last state... if the STOP FAILS we need it 6877 * for restoring 6878 */ 6879 mutex_enter(SD_MUTEX(un)); 6880 save_state = un->un_last_state; 6881 last_power_level = un->un_power_level; 6882 /* 6883 * There must not be any cmds. getting processed 6884 * in the driver when we get here. Power to the 6885 * device is potentially going off. 6886 */ 6887 ASSERT(un->un_ncmds_in_driver == 0); 6888 mutex_exit(SD_MUTEX(un)); 6889 6890 /* 6891 * For now PM suspend the device completely before spindle is 6892 * turned off 6893 */ 6894 if ((rval = sd_pm_state_change(un, level, SD_PM_STATE_CHANGE)) 6895 == DDI_FAILURE) { 6896 /* 6897 * On exit put the state back to its original value 6898 * and broadcast to anyone waiting for the power 6899 * change completion. 6900 */ 6901 mutex_enter(SD_MUTEX(un)); 6902 un->un_state = state_before_pm; 6903 un->un_power_level = last_power_level; 6904 cv_broadcast(&un->un_suspend_cv); 6905 mutex_exit(SD_MUTEX(un)); 6906 SD_TRACE(SD_LOG_IO_PM, un, 6907 "sdpower: exit, PM suspend Failed.\n"); 6908 6909 goto sdpower_failed; 6910 } 6911 } 6912 6913 /* 6914 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open, 6915 * close, or strategy. Dump no long uses this routine, it uses it's 6916 * own code so it can be done in polled mode. 6917 */ 6918 6919 medium_present = TRUE; 6920 6921 /* 6922 * When powering up, issue a TUR in case the device is at unit 6923 * attention. Don't do retries. Bypass the PM layer, otherwise 6924 * a deadlock on un_pm_busy_cv will occur. 6925 */ 6926 if (SD_PM_IS_IO_CAPABLE(un, level)) { 6927 sval = sd_send_scsi_TEST_UNIT_READY(ssc, 6928 SD_DONT_RETRY_TUR | SD_BYPASS_PM); 6929 if (sval != 0) 6930 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6931 } 6932 6933 if (un->un_f_power_condition_supported) { 6934 char *pm_condition_name[] = {"STOPPED", "STANDBY", 6935 "IDLE", "ACTIVE"}; 6936 SD_TRACE(SD_LOG_IO_PM, un, 6937 "sdpower: sending \'%s\' power condition", 6938 pm_condition_name[level]); 6939 sval = sd_send_scsi_START_STOP_UNIT(ssc, SD_POWER_CONDITION, 6940 sd_pl2pc[level], SD_PATH_DIRECT); 6941 } else { 6942 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n", 6943 ((level == SD_SPINDLE_ON) ? "START" : "STOP")); 6944 sval = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 6945 ((level == SD_SPINDLE_ON) ? SD_TARGET_START : 6946 SD_TARGET_STOP), SD_PATH_DIRECT); 6947 } 6948 if (sval != 0) { 6949 if (sval == EIO) 6950 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 6951 else 6952 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6953 } 6954 6955 /* Command failed, check for media present. */ 6956 if ((sval == ENXIO) && un->un_f_has_removable_media) { 6957 medium_present = FALSE; 6958 } 6959 6960 /* 6961 * The conditions of interest here are: 6962 * if a spindle off with media present fails, 6963 * then restore the state and return an error. 6964 * else if a spindle on fails, 6965 * then return an error (there's no state to restore). 6966 * In all other cases we setup for the new state 6967 * and return success. 6968 */ 6969 if (!SD_PM_IS_IO_CAPABLE(un, level)) { 6970 if ((medium_present == TRUE) && (sval != 0)) { 6971 /* The stop command from above failed */ 6972 rval = DDI_FAILURE; 6973 /* 6974 * The stop command failed, and we have media 6975 * present. Put the level back by calling the 6976 * sd_pm_resume() and set the state back to 6977 * it's previous value. 6978 */ 6979 (void) sd_pm_state_change(un, last_power_level, 6980 SD_PM_STATE_ROLLBACK); 6981 mutex_enter(SD_MUTEX(un)); 6982 un->un_last_state = save_state; 6983 mutex_exit(SD_MUTEX(un)); 6984 } else if (un->un_f_monitor_media_state) { 6985 /* 6986 * The stop command from above succeeded. 6987 * Terminate watch thread in case of removable media 6988 * devices going into low power state. This is as per 6989 * the requirements of pm framework, otherwise commands 6990 * will be generated for the device (through watch 6991 * thread), even when the device is in low power state. 6992 */ 6993 mutex_enter(SD_MUTEX(un)); 6994 un->un_f_watcht_stopped = FALSE; 6995 if (un->un_swr_token != NULL) { 6996 opaque_t temp_token = un->un_swr_token; 6997 un->un_f_watcht_stopped = TRUE; 6998 un->un_swr_token = NULL; 6999 mutex_exit(SD_MUTEX(un)); 7000 (void) scsi_watch_request_terminate(temp_token, 7001 SCSI_WATCH_TERMINATE_ALL_WAIT); 7002 } else { 7003 mutex_exit(SD_MUTEX(un)); 7004 } 7005 } 7006 } else { 7007 /* 7008 * The level requested is I/O capable. 7009 * Legacy behavior: return success on a failed spinup 7010 * if there is no media in the drive. 7011 * Do this by looking at medium_present here. 7012 */ 7013 if ((sval != 0) && medium_present) { 7014 /* The start command from above failed */ 7015 rval = DDI_FAILURE; 7016 } else { 7017 /* 7018 * The start command from above succeeded 7019 * PM resume the devices now that we have 7020 * started the disks 7021 */ 7022 (void) sd_pm_state_change(un, level, 7023 SD_PM_STATE_CHANGE); 7024 7025 /* 7026 * Resume the watch thread since it was suspended 7027 * when the device went into low power mode. 7028 */ 7029 if (un->un_f_monitor_media_state) { 7030 mutex_enter(SD_MUTEX(un)); 7031 if (un->un_f_watcht_stopped == TRUE) { 7032 opaque_t temp_token; 7033 7034 un->un_f_watcht_stopped = FALSE; 7035 mutex_exit(SD_MUTEX(un)); 7036 temp_token = 7037 sd_watch_request_submit(un); 7038 mutex_enter(SD_MUTEX(un)); 7039 un->un_swr_token = temp_token; 7040 } 7041 mutex_exit(SD_MUTEX(un)); 7042 } 7043 } 7044 } 7045 7046 /* 7047 * On exit put the state back to its original value 7048 * and broadcast to anyone waiting for the power 7049 * change completion. 7050 */ 7051 mutex_enter(SD_MUTEX(un)); 7052 un->un_state = state_before_pm; 7053 cv_broadcast(&un->un_suspend_cv); 7054 mutex_exit(SD_MUTEX(un)); 7055 7056 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval); 7057 7058 sd_ssc_fini(ssc); 7059 return (rval); 7060 7061 sdpower_failed: 7062 7063 sd_ssc_fini(ssc); 7064 return (DDI_FAILURE); 7065 } 7066 7067 7068 7069 /* 7070 * Function: sdattach 7071 * 7072 * Description: Driver's attach(9e) entry point function. 7073 * 7074 * Arguments: devi - opaque device info handle 7075 * cmd - attach type 7076 * 7077 * Return Code: DDI_SUCCESS 7078 * DDI_FAILURE 7079 * 7080 * Context: Kernel thread context 7081 */ 7082 7083 static int 7084 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 7085 { 7086 switch (cmd) { 7087 case DDI_ATTACH: 7088 return (sd_unit_attach(devi)); 7089 case DDI_RESUME: 7090 return (sd_ddi_resume(devi)); 7091 default: 7092 break; 7093 } 7094 return (DDI_FAILURE); 7095 } 7096 7097 7098 /* 7099 * Function: sddetach 7100 * 7101 * Description: Driver's detach(9E) entry point function. 7102 * 7103 * Arguments: devi - opaque device info handle 7104 * cmd - detach type 7105 * 7106 * Return Code: DDI_SUCCESS 7107 * DDI_FAILURE 7108 * 7109 * Context: Kernel thread context 7110 */ 7111 7112 static int 7113 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 7114 { 7115 switch (cmd) { 7116 case DDI_DETACH: 7117 return (sd_unit_detach(devi)); 7118 case DDI_SUSPEND: 7119 return (sd_ddi_suspend(devi)); 7120 default: 7121 break; 7122 } 7123 return (DDI_FAILURE); 7124 } 7125 7126 7127 /* 7128 * Function: sd_sync_with_callback 7129 * 7130 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft 7131 * state while the callback routine is active. 7132 * 7133 * Arguments: un: softstate structure for the instance 7134 * 7135 * Context: Kernel thread context 7136 */ 7137 7138 static void 7139 sd_sync_with_callback(struct sd_lun *un) 7140 { 7141 ASSERT(un != NULL); 7142 7143 mutex_enter(SD_MUTEX(un)); 7144 7145 ASSERT(un->un_in_callback >= 0); 7146 7147 while (un->un_in_callback > 0) { 7148 mutex_exit(SD_MUTEX(un)); 7149 delay(2); 7150 mutex_enter(SD_MUTEX(un)); 7151 } 7152 7153 mutex_exit(SD_MUTEX(un)); 7154 } 7155 7156 /* 7157 * Function: sd_unit_attach 7158 * 7159 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates 7160 * the soft state structure for the device and performs 7161 * all necessary structure and device initializations. 7162 * 7163 * Arguments: devi: the system's dev_info_t for the device. 7164 * 7165 * Return Code: DDI_SUCCESS if attach is successful. 7166 * DDI_FAILURE if any part of the attach fails. 7167 * 7168 * Context: Called at attach(9e) time for the DDI_ATTACH flag. 7169 * Kernel thread context only. Can sleep. 7170 */ 7171 7172 static int 7173 sd_unit_attach(dev_info_t *devi) 7174 { 7175 struct scsi_device *devp; 7176 struct sd_lun *un; 7177 char *variantp; 7178 char name_str[48]; 7179 int reservation_flag = SD_TARGET_IS_UNRESERVED; 7180 int instance; 7181 int rval; 7182 int wc_enabled; 7183 int wc_changeable; 7184 int tgt; 7185 uint64_t capacity; 7186 uint_t lbasize = 0; 7187 dev_info_t *pdip = ddi_get_parent(devi); 7188 int offbyone = 0; 7189 int geom_label_valid = 0; 7190 sd_ssc_t *ssc; 7191 int status; 7192 struct sd_fm_internal *sfip = NULL; 7193 int max_xfer_size; 7194 7195 /* 7196 * Retrieve the target driver's private data area. This was set 7197 * up by the HBA. 7198 */ 7199 devp = ddi_get_driver_private(devi); 7200 7201 /* 7202 * Retrieve the target ID of the device. 7203 */ 7204 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7205 SCSI_ADDR_PROP_TARGET, -1); 7206 7207 /* 7208 * Since we have no idea what state things were left in by the last 7209 * user of the device, set up some 'default' settings, ie. turn 'em 7210 * off. The scsi_ifsetcap calls force re-negotiations with the drive. 7211 * Do this before the scsi_probe, which sends an inquiry. 7212 * This is a fix for bug (4430280). 7213 * Of special importance is wide-xfer. The drive could have been left 7214 * in wide transfer mode by the last driver to communicate with it, 7215 * this includes us. If that's the case, and if the following is not 7216 * setup properly or we don't re-negotiate with the drive prior to 7217 * transferring data to/from the drive, it causes bus parity errors, 7218 * data overruns, and unexpected interrupts. This first occurred when 7219 * the fix for bug (4378686) was made. 7220 */ 7221 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1); 7222 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1); 7223 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1); 7224 7225 /* 7226 * Currently, scsi_ifsetcap sets tagged-qing capability for all LUNs 7227 * on a target. Setting it per lun instance actually sets the 7228 * capability of this target, which affects those luns already 7229 * attached on the same target. So during attach, we can only disable 7230 * this capability only when no other lun has been attached on this 7231 * target. By doing this, we assume a target has the same tagged-qing 7232 * capability for every lun. The condition can be removed when HBA 7233 * is changed to support per lun based tagged-qing capability. 7234 */ 7235 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 7236 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1); 7237 } 7238 7239 /* 7240 * Use scsi_probe() to issue an INQUIRY command to the device. 7241 * This call will allocate and fill in the scsi_inquiry structure 7242 * and point the sd_inq member of the scsi_device structure to it. 7243 * If the attach succeeds, then this memory will not be de-allocated 7244 * (via scsi_unprobe()) until the instance is detached. 7245 */ 7246 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 7247 goto probe_failed; 7248 } 7249 7250 /* 7251 * Check the device type as specified in the inquiry data and 7252 * claim it if it is of a type that we support. 7253 */ 7254 switch (devp->sd_inq->inq_dtype) { 7255 case DTYPE_DIRECT: 7256 break; 7257 case DTYPE_RODIRECT: 7258 break; 7259 case DTYPE_OPTICAL: 7260 break; 7261 case DTYPE_NOTPRESENT: 7262 default: 7263 /* Unsupported device type; fail the attach. */ 7264 goto probe_failed; 7265 } 7266 7267 /* 7268 * Allocate the soft state structure for this unit. 7269 * 7270 * We rely upon this memory being set to all zeroes by 7271 * ddi_soft_state_zalloc(). We assume that any member of the 7272 * soft state structure that is not explicitly initialized by 7273 * this routine will have a value of zero. 7274 */ 7275 instance = ddi_get_instance(devp->sd_dev); 7276 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) { 7277 goto probe_failed; 7278 } 7279 7280 /* 7281 * Retrieve a pointer to the newly-allocated soft state. 7282 * 7283 * This should NEVER fail if the ddi_soft_state_zalloc() call above 7284 * was successful, unless something has gone horribly wrong and the 7285 * ddi's soft state internals are corrupt (in which case it is 7286 * probably better to halt here than just fail the attach....) 7287 */ 7288 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 7289 panic("sd_unit_attach: NULL soft state on instance:0x%x", 7290 instance); 7291 /*NOTREACHED*/ 7292 } 7293 7294 /* 7295 * Link the back ptr of the driver soft state to the scsi_device 7296 * struct for this lun. 7297 * Save a pointer to the softstate in the driver-private area of 7298 * the scsi_device struct. 7299 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until 7300 * we first set un->un_sd below. 7301 */ 7302 un->un_sd = devp; 7303 devp->sd_private = (opaque_t)un; 7304 7305 /* 7306 * The following must be after devp is stored in the soft state struct. 7307 */ 7308 #ifdef SDDEBUG 7309 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7310 "%s_unit_attach: un:0x%p instance:%d\n", 7311 ddi_driver_name(devi), un, instance); 7312 #endif 7313 7314 /* 7315 * Set up the device type and node type (for the minor nodes). 7316 * By default we assume that the device can at least support the 7317 * Common Command Set. Call it a CD-ROM if it reports itself 7318 * as a RODIRECT device. 7319 */ 7320 switch (devp->sd_inq->inq_dtype) { 7321 case DTYPE_RODIRECT: 7322 un->un_node_type = DDI_NT_CD_CHAN; 7323 un->un_ctype = CTYPE_CDROM; 7324 break; 7325 case DTYPE_OPTICAL: 7326 un->un_node_type = DDI_NT_BLOCK_CHAN; 7327 un->un_ctype = CTYPE_ROD; 7328 break; 7329 default: 7330 un->un_node_type = DDI_NT_BLOCK_CHAN; 7331 un->un_ctype = CTYPE_CCS; 7332 break; 7333 } 7334 7335 /* 7336 * Try to read the interconnect type from the HBA. 7337 * 7338 * Note: This driver is currently compiled as two binaries, a parallel 7339 * scsi version (sd) and a fibre channel version (ssd). All functional 7340 * differences are determined at compile time. In the future a single 7341 * binary will be provided and the interconnect type will be used to 7342 * differentiate between fibre and parallel scsi behaviors. At that time 7343 * it will be necessary for all fibre channel HBAs to support this 7344 * property. 7345 * 7346 * set un_f_is_fiber to TRUE ( default fiber ) 7347 */ 7348 un->un_f_is_fibre = TRUE; 7349 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { 7350 case INTERCONNECT_SSA: 7351 un->un_interconnect_type = SD_INTERCONNECT_SSA; 7352 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7353 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un); 7354 break; 7355 case INTERCONNECT_PARALLEL: 7356 un->un_f_is_fibre = FALSE; 7357 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7358 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7359 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); 7360 break; 7361 case INTERCONNECT_SAS: 7362 un->un_f_is_fibre = FALSE; 7363 un->un_interconnect_type = SD_INTERCONNECT_SAS; 7364 un->un_node_type = DDI_NT_BLOCK_SAS; 7365 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7366 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SAS\n", un); 7367 break; 7368 case INTERCONNECT_SATA: 7369 un->un_f_is_fibre = FALSE; 7370 un->un_interconnect_type = SD_INTERCONNECT_SATA; 7371 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7372 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un); 7373 break; 7374 case INTERCONNECT_FIBRE: 7375 un->un_interconnect_type = SD_INTERCONNECT_FIBRE; 7376 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7377 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); 7378 break; 7379 case INTERCONNECT_FABRIC: 7380 un->un_interconnect_type = SD_INTERCONNECT_FABRIC; 7381 un->un_node_type = DDI_NT_BLOCK_FABRIC; 7382 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7383 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); 7384 break; 7385 default: 7386 #ifdef SD_DEFAULT_INTERCONNECT_TYPE 7387 /* 7388 * The HBA does not support the "interconnect-type" property 7389 * (or did not provide a recognized type). 7390 * 7391 * Note: This will be obsoleted when a single fibre channel 7392 * and parallel scsi driver is delivered. In the meantime the 7393 * interconnect type will be set to the platform default.If that 7394 * type is not parallel SCSI, it means that we should be 7395 * assuming "ssd" semantics. However, here this also means that 7396 * the FC HBA is not supporting the "interconnect-type" property 7397 * like we expect it to, so log this occurrence. 7398 */ 7399 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE; 7400 if (!SD_IS_PARALLEL_SCSI(un)) { 7401 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7402 "sd_unit_attach: un:0x%p Assuming " 7403 "INTERCONNECT_FIBRE\n", un); 7404 } else { 7405 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7406 "sd_unit_attach: un:0x%p Assuming " 7407 "INTERCONNECT_PARALLEL\n", un); 7408 un->un_f_is_fibre = FALSE; 7409 } 7410 #else 7411 /* 7412 * Note: This source will be implemented when a single fibre 7413 * channel and parallel scsi driver is delivered. The default 7414 * will be to assume that if a device does not support the 7415 * "interconnect-type" property it is a parallel SCSI HBA and 7416 * we will set the interconnect type for parallel scsi. 7417 */ 7418 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7419 un->un_f_is_fibre = FALSE; 7420 #endif 7421 break; 7422 } 7423 7424 if (un->un_f_is_fibre == TRUE) { 7425 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) == 7426 SCSI_VERSION_3) { 7427 switch (un->un_interconnect_type) { 7428 case SD_INTERCONNECT_FIBRE: 7429 case SD_INTERCONNECT_SSA: 7430 un->un_node_type = DDI_NT_BLOCK_WWN; 7431 break; 7432 default: 7433 break; 7434 } 7435 } 7436 } 7437 7438 /* 7439 * Initialize the Request Sense command for the target 7440 */ 7441 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { 7442 goto alloc_rqs_failed; 7443 } 7444 7445 /* 7446 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc 7447 * with separate binary for sd and ssd. 7448 * 7449 * x86 has 1 binary, un_retry_count is set base on connection type. 7450 * The hardcoded values will go away when Sparc uses 1 binary 7451 * for sd and ssd. This hardcoded values need to match 7452 * SD_RETRY_COUNT in sddef.h 7453 * The value used is base on interconnect type. 7454 * fibre = 3, parallel = 5 7455 */ 7456 #if defined(__x86) 7457 un->un_retry_count = un->un_f_is_fibre ? 3 : 5; 7458 #else 7459 un->un_retry_count = SD_RETRY_COUNT; 7460 #endif 7461 7462 /* 7463 * Set the per disk retry count to the default number of retries 7464 * for disks and CDROMs. This value can be overridden by the 7465 * disk property list or an entry in sd.conf. 7466 */ 7467 un->un_notready_retry_count = 7468 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un) 7469 : DISK_NOT_READY_RETRY_COUNT(un); 7470 7471 /* 7472 * Set the busy retry count to the default value of un_retry_count. 7473 * This can be overridden by entries in sd.conf or the device 7474 * config table. 7475 */ 7476 un->un_busy_retry_count = un->un_retry_count; 7477 7478 /* 7479 * Init the reset threshold for retries. This number determines 7480 * how many retries must be performed before a reset can be issued 7481 * (for certain error conditions). This can be overridden by entries 7482 * in sd.conf or the device config table. 7483 */ 7484 un->un_reset_retry_count = (un->un_retry_count / 2); 7485 7486 /* 7487 * Set the victim_retry_count to the default un_retry_count 7488 */ 7489 un->un_victim_retry_count = (2 * un->un_retry_count); 7490 7491 /* 7492 * Set the reservation release timeout to the default value of 7493 * 5 seconds. This can be overridden by entries in ssd.conf or the 7494 * device config table. 7495 */ 7496 un->un_reserve_release_time = 5; 7497 7498 /* 7499 * Set up the default maximum transfer size. Note that this may 7500 * get updated later in the attach, when setting up default wide 7501 * operations for disks. 7502 */ 7503 #if defined(__x86) 7504 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; 7505 un->un_partial_dma_supported = 1; 7506 #else 7507 un->un_max_xfer_size = (uint_t)maxphys; 7508 #endif 7509 7510 /* 7511 * Get "allow bus device reset" property (defaults to "enabled" if 7512 * the property was not defined). This is to disable bus resets for 7513 * certain kinds of error recovery. Note: In the future when a run-time 7514 * fibre check is available the soft state flag should default to 7515 * enabled. 7516 */ 7517 if (un->un_f_is_fibre == TRUE) { 7518 un->un_f_allow_bus_device_reset = TRUE; 7519 } else { 7520 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7521 "allow-bus-device-reset", 1) != 0) { 7522 un->un_f_allow_bus_device_reset = TRUE; 7523 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7524 "sd_unit_attach: un:0x%p Bus device reset " 7525 "enabled\n", un); 7526 } else { 7527 un->un_f_allow_bus_device_reset = FALSE; 7528 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7529 "sd_unit_attach: un:0x%p Bus device reset " 7530 "disabled\n", un); 7531 } 7532 } 7533 7534 /* 7535 * Check if this is an ATAPI device. ATAPI devices use Group 1 7536 * Read/Write commands and Group 2 Mode Sense/Select commands. 7537 * 7538 * Note: The "obsolete" way of doing this is to check for the "atapi" 7539 * property. The new "variant" property with a value of "atapi" has been 7540 * introduced so that future 'variants' of standard SCSI behavior (like 7541 * atapi) could be specified by the underlying HBA drivers by supplying 7542 * a new value for the "variant" property, instead of having to define a 7543 * new property. 7544 */ 7545 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { 7546 un->un_f_cfg_is_atapi = TRUE; 7547 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7548 "sd_unit_attach: un:0x%p Atapi device\n", un); 7549 } 7550 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", 7551 &variantp) == DDI_PROP_SUCCESS) { 7552 if (strcmp(variantp, "atapi") == 0) { 7553 un->un_f_cfg_is_atapi = TRUE; 7554 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7555 "sd_unit_attach: un:0x%p Atapi device\n", un); 7556 } 7557 ddi_prop_free(variantp); 7558 } 7559 7560 un->un_cmd_timeout = SD_IO_TIME; 7561 7562 un->un_busy_timeout = SD_BSY_TIMEOUT; 7563 7564 /* Info on current states, statuses, etc. (Updated frequently) */ 7565 un->un_state = SD_STATE_NORMAL; 7566 un->un_last_state = SD_STATE_NORMAL; 7567 7568 /* Control & status info for command throttling */ 7569 un->un_throttle = sd_max_throttle; 7570 un->un_saved_throttle = sd_max_throttle; 7571 un->un_min_throttle = sd_min_throttle; 7572 7573 if (un->un_f_is_fibre == TRUE) { 7574 un->un_f_use_adaptive_throttle = TRUE; 7575 } else { 7576 un->un_f_use_adaptive_throttle = FALSE; 7577 } 7578 7579 /* Removable media support. */ 7580 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 7581 un->un_mediastate = DKIO_NONE; 7582 un->un_specified_mediastate = DKIO_NONE; 7583 7584 /* CVs for suspend/resume (PM or DR) */ 7585 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 7586 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 7587 7588 /* Power management support. */ 7589 un->un_power_level = SD_SPINDLE_UNINIT; 7590 7591 cv_init(&un->un_wcc_cv, NULL, CV_DRIVER, NULL); 7592 un->un_f_wcc_inprog = 0; 7593 7594 /* 7595 * The conf file entry and softstate variable is a forceful override, 7596 * meaning a non-zero value must be entered to change the default. 7597 */ 7598 un->un_f_disksort_disabled = FALSE; 7599 un->un_f_rmw_type = SD_RMW_TYPE_DEFAULT; 7600 un->un_f_enable_rmw = FALSE; 7601 7602 /* 7603 * GET EVENT STATUS NOTIFICATION media polling enabled by default, but 7604 * can be overridden via [s]sd-config-list "mmc-gesn-polling" property. 7605 */ 7606 un->un_f_mmc_gesn_polling = TRUE; 7607 7608 /* 7609 * physical sector size defaults to DEV_BSIZE currently. We can 7610 * override this value via the driver configuration file so we must 7611 * set it before calling sd_read_unit_properties(). 7612 */ 7613 un->un_phy_blocksize = DEV_BSIZE; 7614 7615 /* 7616 * Retrieve the properties from the static driver table or the driver 7617 * configuration file (.conf) for this unit and update the soft state 7618 * for the device as needed for the indicated properties. 7619 * Note: the property configuration needs to occur here as some of the 7620 * following routines may have dependencies on soft state flags set 7621 * as part of the driver property configuration. 7622 */ 7623 sd_read_unit_properties(un); 7624 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7625 "sd_unit_attach: un:0x%p property configuration complete.\n", un); 7626 7627 /* 7628 * Only if a device has "hotpluggable" property, it is 7629 * treated as hotpluggable device. Otherwise, it is 7630 * regarded as non-hotpluggable one. 7631 */ 7632 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "hotpluggable", 7633 -1) != -1) { 7634 un->un_f_is_hotpluggable = TRUE; 7635 } 7636 7637 /* 7638 * set unit's attributes(flags) according to "hotpluggable" and 7639 * RMB bit in INQUIRY data. 7640 */ 7641 sd_set_unit_attributes(un, devi); 7642 7643 /* 7644 * By default, we mark the capacity, lbasize, and geometry 7645 * as invalid. Only if we successfully read a valid capacity 7646 * will we update the un_blockcount and un_tgt_blocksize with the 7647 * valid values (the geometry will be validated later). 7648 */ 7649 un->un_f_blockcount_is_valid = FALSE; 7650 un->un_f_tgt_blocksize_is_valid = FALSE; 7651 7652 /* 7653 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine 7654 * otherwise. 7655 */ 7656 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE; 7657 un->un_blockcount = 0; 7658 7659 /* 7660 * Set up the per-instance info needed to determine the correct 7661 * CDBs and other info for issuing commands to the target. 7662 */ 7663 sd_init_cdb_limits(un); 7664 7665 /* 7666 * Set up the IO chains to use, based upon the target type. 7667 */ 7668 if (un->un_f_non_devbsize_supported) { 7669 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 7670 } else { 7671 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 7672 } 7673 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 7674 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD; 7675 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD; 7676 7677 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf), 7678 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit, 7679 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER); 7680 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi); 7681 7682 7683 if (ISCD(un)) { 7684 un->un_additional_codes = sd_additional_codes; 7685 } else { 7686 un->un_additional_codes = NULL; 7687 } 7688 7689 /* 7690 * Create the kstats here so they can be available for attach-time 7691 * routines that send commands to the unit (either polled or via 7692 * sd_send_scsi_cmd). 7693 * 7694 * Note: This is a critical sequence that needs to be maintained: 7695 * 1) Instantiate the kstats here, before any routines using the 7696 * iopath (i.e. sd_send_scsi_cmd). 7697 * 2) Instantiate and initialize the partition stats 7698 * (sd_set_pstats). 7699 * 3) Initialize the error stats (sd_set_errstats), following 7700 * sd_validate_geometry(),sd_register_devid(), 7701 * and sd_cache_control(). 7702 */ 7703 7704 un->un_stats = kstat_create(sd_label, instance, 7705 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 7706 if (un->un_stats != NULL) { 7707 un->un_stats->ks_lock = SD_MUTEX(un); 7708 kstat_install(un->un_stats); 7709 } 7710 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7711 "sd_unit_attach: un:0x%p un_stats created\n", un); 7712 7713 un->un_unmapstats_ks = kstat_create(sd_label, instance, "unmapstats", 7714 "misc", KSTAT_TYPE_NAMED, sizeof (*un->un_unmapstats) / 7715 sizeof (kstat_named_t), 0); 7716 if (un->un_unmapstats_ks) { 7717 un->un_unmapstats = un->un_unmapstats_ks->ks_data; 7718 7719 kstat_named_init(&un->un_unmapstats->us_cmds, 7720 "commands", KSTAT_DATA_UINT64); 7721 kstat_named_init(&un->un_unmapstats->us_errs, 7722 "errors", KSTAT_DATA_UINT64); 7723 kstat_named_init(&un->un_unmapstats->us_extents, 7724 "extents", KSTAT_DATA_UINT64); 7725 kstat_named_init(&un->un_unmapstats->us_bytes, 7726 "bytes", KSTAT_DATA_UINT64); 7727 7728 kstat_install(un->un_unmapstats_ks); 7729 } else { 7730 cmn_err(CE_NOTE, "!Cannot create unmap kstats for disk %d", 7731 instance); 7732 } 7733 7734 sd_create_errstats(un, instance); 7735 if (un->un_errstats == NULL) { 7736 goto create_errstats_failed; 7737 } 7738 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7739 "sd_unit_attach: un:0x%p errstats created\n", un); 7740 7741 /* 7742 * The following if/else code was relocated here from below as part 7743 * of the fix for bug (4430280). However with the default setup added 7744 * on entry to this routine, it's no longer absolutely necessary for 7745 * this to be before the call to sd_spin_up_unit. 7746 */ 7747 if (SD_IS_PARALLEL_SCSI(un) || SD_IS_SERIAL(un)) { 7748 int tq_trigger_flag = (((devp->sd_inq->inq_ansi == 4) || 7749 (devp->sd_inq->inq_ansi == 5)) && 7750 devp->sd_inq->inq_bque) || devp->sd_inq->inq_cmdque; 7751 7752 /* 7753 * If tagged queueing is supported by the target 7754 * and by the host adapter then we will enable it 7755 */ 7756 un->un_tagflags = 0; 7757 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && tq_trigger_flag && 7758 (un->un_f_arq_enabled == TRUE)) { 7759 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 7760 1, 1) == 1) { 7761 un->un_tagflags = FLAG_STAG; 7762 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7763 "sd_unit_attach: un:0x%p tag queueing " 7764 "enabled\n", un); 7765 } else if (scsi_ifgetcap(SD_ADDRESS(un), 7766 "untagged-qing", 0) == 1) { 7767 un->un_f_opt_queueing = TRUE; 7768 un->un_saved_throttle = un->un_throttle = 7769 min(un->un_throttle, 3); 7770 } else { 7771 un->un_f_opt_queueing = FALSE; 7772 un->un_saved_throttle = un->un_throttle = 1; 7773 } 7774 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) 7775 == 1) && (un->un_f_arq_enabled == TRUE)) { 7776 /* The Host Adapter supports internal queueing. */ 7777 un->un_f_opt_queueing = TRUE; 7778 un->un_saved_throttle = un->un_throttle = 7779 min(un->un_throttle, 3); 7780 } else { 7781 un->un_f_opt_queueing = FALSE; 7782 un->un_saved_throttle = un->un_throttle = 1; 7783 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7784 "sd_unit_attach: un:0x%p no tag queueing\n", un); 7785 } 7786 7787 /* 7788 * Enable large transfers for SATA/SAS drives 7789 */ 7790 if (SD_IS_SERIAL(un)) { 7791 un->un_max_xfer_size = 7792 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7793 sd_max_xfer_size, SD_MAX_XFER_SIZE); 7794 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7795 "sd_unit_attach: un:0x%p max transfer " 7796 "size=0x%x\n", un, un->un_max_xfer_size); 7797 7798 } 7799 7800 /* Setup or tear down default wide operations for disks */ 7801 7802 /* 7803 * Note: Legacy: it may be possible for both "sd_max_xfer_size" 7804 * and "ssd_max_xfer_size" to exist simultaneously on the same 7805 * system and be set to different values. In the future this 7806 * code may need to be updated when the ssd module is 7807 * obsoleted and removed from the system. (4299588) 7808 */ 7809 if (SD_IS_PARALLEL_SCSI(un) && 7810 (devp->sd_inq->inq_rdf == RDF_SCSI2) && 7811 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { 7812 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7813 1, 1) == 1) { 7814 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7815 "sd_unit_attach: un:0x%p Wide Transfer " 7816 "enabled\n", un); 7817 } 7818 7819 /* 7820 * If tagged queuing has also been enabled, then 7821 * enable large xfers 7822 */ 7823 if (un->un_saved_throttle == sd_max_throttle) { 7824 un->un_max_xfer_size = 7825 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7826 sd_max_xfer_size, SD_MAX_XFER_SIZE); 7827 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7828 "sd_unit_attach: un:0x%p max transfer " 7829 "size=0x%x\n", un, un->un_max_xfer_size); 7830 } 7831 } else { 7832 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7833 0, 1) == 1) { 7834 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7835 "sd_unit_attach: un:0x%p " 7836 "Wide Transfer disabled\n", un); 7837 } 7838 } 7839 } else { 7840 un->un_tagflags = FLAG_STAG; 7841 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, 7842 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE); 7843 } 7844 7845 /* 7846 * If this target supports LUN reset, try to enable it. 7847 */ 7848 if (un->un_f_lun_reset_enabled) { 7849 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { 7850 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7851 "un:0x%p lun_reset capability set\n", un); 7852 } else { 7853 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7854 "un:0x%p lun-reset capability not set\n", un); 7855 } 7856 } 7857 7858 /* 7859 * Adjust the maximum transfer size. This is to fix 7860 * the problem of partial DMA support on SPARC. Some 7861 * HBA driver, like aac, has very small dma_attr_maxxfer 7862 * size, which requires partial DMA support on SPARC. 7863 * In the future the SPARC pci nexus driver may solve 7864 * the problem instead of this fix. 7865 */ 7866 max_xfer_size = scsi_ifgetcap(SD_ADDRESS(un), "dma-max", 1); 7867 if ((max_xfer_size > 0) && (max_xfer_size < un->un_max_xfer_size)) { 7868 /* We need DMA partial even on sparc to ensure sddump() works */ 7869 un->un_max_xfer_size = max_xfer_size; 7870 if (un->un_partial_dma_supported == 0) 7871 un->un_partial_dma_supported = 1; 7872 } 7873 if (ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 7874 DDI_PROP_DONTPASS, "buf_break", 0) == 1) { 7875 if (ddi_xbuf_attr_setup_brk(un->un_xbuf_attr, 7876 un->un_max_xfer_size) == 1) { 7877 un->un_buf_breakup_supported = 1; 7878 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7879 "un:0x%p Buf breakup enabled\n", un); 7880 } 7881 } 7882 7883 /* 7884 * Set PKT_DMA_PARTIAL flag. 7885 */ 7886 if (un->un_partial_dma_supported == 1) { 7887 un->un_pkt_flags = PKT_DMA_PARTIAL; 7888 } else { 7889 un->un_pkt_flags = 0; 7890 } 7891 7892 /* Initialize sd_ssc_t for internal uscsi commands */ 7893 ssc = sd_ssc_init(un); 7894 scsi_fm_init(devp); 7895 7896 /* 7897 * Allocate memory for SCSI FMA stuffs. 7898 */ 7899 un->un_fm_private = 7900 kmem_zalloc(sizeof (struct sd_fm_internal), KM_SLEEP); 7901 sfip = (struct sd_fm_internal *)un->un_fm_private; 7902 sfip->fm_ssc.ssc_uscsi_cmd = &sfip->fm_ucmd; 7903 sfip->fm_ssc.ssc_uscsi_info = &sfip->fm_uinfo; 7904 sfip->fm_ssc.ssc_un = un; 7905 7906 if (ISCD(un) || 7907 un->un_f_has_removable_media || 7908 devp->sd_fm_capable == DDI_FM_NOT_CAPABLE) { 7909 /* 7910 * We don't touch CDROM or the DDI_FM_NOT_CAPABLE device. 7911 * Their log are unchanged. 7912 */ 7913 sfip->fm_log_level = SD_FM_LOG_NSUP; 7914 } else { 7915 /* 7916 * If enter here, it should be non-CDROM and FM-capable 7917 * device, and it will not keep the old scsi_log as before 7918 * in /var/adm/messages. However, the property 7919 * "fm-scsi-log" will control whether the FM telemetry will 7920 * be logged in /var/adm/messages. 7921 */ 7922 int fm_scsi_log; 7923 fm_scsi_log = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 7924 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "fm-scsi-log", 0); 7925 7926 if (fm_scsi_log) 7927 sfip->fm_log_level = SD_FM_LOG_EREPORT; 7928 else 7929 sfip->fm_log_level = SD_FM_LOG_SILENT; 7930 } 7931 7932 /* 7933 * At this point in the attach, we have enough info in the 7934 * soft state to be able to issue commands to the target. 7935 * 7936 * All command paths used below MUST issue their commands as 7937 * SD_PATH_DIRECT. This is important as intermediate layers 7938 * are not all initialized yet (such as PM). 7939 */ 7940 7941 /* 7942 * Send a TEST UNIT READY command to the device. This should clear 7943 * any outstanding UNIT ATTENTION that may be present. 7944 * 7945 * Note: Don't check for success, just track if there is a reservation, 7946 * this is a throw away command to clear any unit attentions. 7947 * 7948 * Note: This MUST be the first command issued to the target during 7949 * attach to ensure power on UNIT ATTENTIONS are cleared. 7950 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated 7951 * with attempts at spinning up a device with no media. 7952 */ 7953 status = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR); 7954 if (status != 0) { 7955 if (status == EACCES) 7956 reservation_flag = SD_TARGET_IS_RESERVED; 7957 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7958 } 7959 7960 /* 7961 * If the device is NOT a removable media device, attempt to spin 7962 * it up (using the START_STOP_UNIT command) and read its capacity 7963 * (using the READ CAPACITY command). Note, however, that either 7964 * of these could fail and in some cases we would continue with 7965 * the attach despite the failure (see below). 7966 */ 7967 if (un->un_f_descr_format_supported) { 7968 7969 switch (sd_spin_up_unit(ssc)) { 7970 case 0: 7971 /* 7972 * Spin-up was successful; now try to read the 7973 * capacity. If successful then save the results 7974 * and mark the capacity & lbasize as valid. 7975 */ 7976 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7977 "sd_unit_attach: un:0x%p spin-up successful\n", un); 7978 7979 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity, 7980 &lbasize, SD_PATH_DIRECT); 7981 7982 switch (status) { 7983 case 0: { 7984 if (capacity > DK_MAX_BLOCKS) { 7985 #ifdef _LP64 7986 if ((capacity + 1) > 7987 SD_GROUP1_MAX_ADDRESS) { 7988 /* 7989 * Enable descriptor format 7990 * sense data so that we can 7991 * get 64 bit sense data 7992 * fields. 7993 */ 7994 sd_enable_descr_sense(ssc); 7995 } 7996 #else 7997 /* 32-bit kernels can't handle this */ 7998 scsi_log(SD_DEVINFO(un), 7999 sd_label, CE_WARN, 8000 "disk has %llu blocks, which " 8001 "is too large for a 32-bit " 8002 "kernel", capacity); 8003 8004 #if defined(__x86) 8005 /* 8006 * 1TB disk was treated as (1T - 512)B 8007 * in the past, so that it might have 8008 * valid VTOC and solaris partitions, 8009 * we have to allow it to continue to 8010 * work. 8011 */ 8012 if (capacity - 1 > DK_MAX_BLOCKS) 8013 #endif 8014 goto spinup_failed; 8015 #endif 8016 } 8017 8018 /* 8019 * Here it's not necessary to check the case: 8020 * the capacity of the device is bigger than 8021 * what the max hba cdb can support. Because 8022 * sd_send_scsi_READ_CAPACITY will retrieve 8023 * the capacity by sending USCSI command, which 8024 * is constrained by the max hba cdb. Actually, 8025 * sd_send_scsi_READ_CAPACITY will return 8026 * EINVAL when using bigger cdb than required 8027 * cdb length. Will handle this case in 8028 * "case EINVAL". 8029 */ 8030 8031 /* 8032 * The following relies on 8033 * sd_send_scsi_READ_CAPACITY never 8034 * returning 0 for capacity and/or lbasize. 8035 */ 8036 sd_update_block_info(un, lbasize, capacity); 8037 8038 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8039 "sd_unit_attach: un:0x%p capacity = %ld " 8040 "blocks; lbasize= %ld.\n", un, 8041 un->un_blockcount, un->un_tgt_blocksize); 8042 8043 break; 8044 } 8045 case EINVAL: 8046 /* 8047 * In the case where the max-cdb-length property 8048 * is smaller than the required CDB length for 8049 * a SCSI device, a target driver can fail to 8050 * attach to that device. 8051 */ 8052 scsi_log(SD_DEVINFO(un), 8053 sd_label, CE_WARN, 8054 "disk capacity is too large " 8055 "for current cdb length"); 8056 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 8057 8058 goto spinup_failed; 8059 case EACCES: 8060 /* 8061 * Should never get here if the spin-up 8062 * succeeded, but code it in anyway. 8063 * From here, just continue with the attach... 8064 */ 8065 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8066 "sd_unit_attach: un:0x%p " 8067 "sd_send_scsi_READ_CAPACITY " 8068 "returned reservation conflict\n", un); 8069 reservation_flag = SD_TARGET_IS_RESERVED; 8070 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 8071 break; 8072 default: 8073 /* 8074 * Likewise, should never get here if the 8075 * spin-up succeeded. Just continue with 8076 * the attach... 8077 */ 8078 if (status == EIO) 8079 sd_ssc_assessment(ssc, 8080 SD_FMT_STATUS_CHECK); 8081 else 8082 sd_ssc_assessment(ssc, 8083 SD_FMT_IGNORE); 8084 break; 8085 } 8086 break; 8087 case EACCES: 8088 /* 8089 * Device is reserved by another host. In this case 8090 * we could not spin it up or read the capacity, but 8091 * we continue with the attach anyway. 8092 */ 8093 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8094 "sd_unit_attach: un:0x%p spin-up reservation " 8095 "conflict.\n", un); 8096 reservation_flag = SD_TARGET_IS_RESERVED; 8097 break; 8098 default: 8099 /* Fail the attach if the spin-up failed. */ 8100 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8101 "sd_unit_attach: un:0x%p spin-up failed.", un); 8102 goto spinup_failed; 8103 } 8104 8105 } 8106 8107 /* 8108 * Check to see if this is a MMC drive 8109 */ 8110 if (ISCD(un)) { 8111 sd_set_mmc_caps(ssc); 8112 } 8113 8114 /* 8115 * Add a zero-length attribute to tell the world we support 8116 * kernel ioctls (for layered drivers) 8117 */ 8118 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 8119 DDI_KERNEL_IOCTL, NULL, 0); 8120 8121 /* 8122 * Add a boolean property to tell the world we support 8123 * the B_FAILFAST flag (for layered drivers) 8124 */ 8125 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 8126 "ddi-failfast-supported", NULL, 0); 8127 8128 /* 8129 * Initialize power management 8130 */ 8131 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL); 8132 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL); 8133 sd_setup_pm(ssc, devi); 8134 if (un->un_f_pm_is_enabled == FALSE) { 8135 /* 8136 * For performance, point to a jump table that does 8137 * not include pm. 8138 * The direct and priority chains don't change with PM. 8139 * 8140 * Note: this is currently done based on individual device 8141 * capabilities. When an interface for determining system 8142 * power enabled state becomes available, or when additional 8143 * layers are added to the command chain, these values will 8144 * have to be re-evaluated for correctness. 8145 */ 8146 if (un->un_f_non_devbsize_supported) { 8147 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM; 8148 } else { 8149 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM; 8150 } 8151 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 8152 } 8153 8154 /* 8155 * This property is set to 0 by HA software to avoid retries 8156 * on a reserved disk. (The preferred property name is 8157 * "retry-on-reservation-conflict") (1189689) 8158 * 8159 * Note: The use of a global here can have unintended consequences. A 8160 * per instance variable is preferable to match the capabilities of 8161 * different underlying hba's (4402600) 8162 */ 8163 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, 8164 DDI_PROP_DONTPASS, "retry-on-reservation-conflict", 8165 sd_retry_on_reservation_conflict); 8166 if (sd_retry_on_reservation_conflict != 0) { 8167 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, 8168 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name, 8169 sd_retry_on_reservation_conflict); 8170 } 8171 8172 /* Set up options for QFULL handling. */ 8173 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 8174 "qfull-retries", -1)) != -1) { 8175 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries", 8176 rval, 1); 8177 } 8178 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 8179 "qfull-retry-interval", -1)) != -1) { 8180 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval", 8181 rval, 1); 8182 } 8183 8184 /* 8185 * This just prints a message that announces the existence of the 8186 * device. The message is always printed in the system logfile, but 8187 * only appears on the console if the system is booted with the 8188 * -v (verbose) argument. 8189 */ 8190 ddi_report_dev(devi); 8191 8192 un->un_mediastate = DKIO_NONE; 8193 8194 /* 8195 * Check Block Device Characteristics VPD. 8196 */ 8197 sd_check_bdc_vpd(ssc); 8198 8199 /* 8200 * Check whether the drive is in emulation mode. 8201 */ 8202 sd_check_emulation_mode(ssc); 8203 8204 cmlb_alloc_handle(&un->un_cmlbhandle); 8205 8206 #if defined(__x86) 8207 /* 8208 * On x86, compensate for off-by-1 legacy error 8209 */ 8210 if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable && 8211 (lbasize == un->un_sys_blocksize)) 8212 offbyone = CMLB_OFF_BY_ONE; 8213 #endif 8214 8215 if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype, 8216 VOID2BOOLEAN(un->un_f_has_removable_media != 0), 8217 VOID2BOOLEAN(un->un_f_is_hotpluggable != 0), 8218 un->un_node_type, offbyone, un->un_cmlbhandle, 8219 (void *)SD_PATH_DIRECT) != 0) { 8220 goto cmlb_attach_failed; 8221 } 8222 8223 8224 /* 8225 * Read and validate the device's geometry (ie, disk label) 8226 * A new unformatted drive will not have a valid geometry, but 8227 * the driver needs to successfully attach to this device so 8228 * the drive can be formatted via ioctls. 8229 */ 8230 geom_label_valid = (cmlb_validate(un->un_cmlbhandle, 0, 8231 (void *)SD_PATH_DIRECT) == 0) ? 1: 0; 8232 8233 mutex_enter(SD_MUTEX(un)); 8234 8235 /* 8236 * Read and initialize the devid for the unit. 8237 */ 8238 if (un->un_f_devid_supported) { 8239 sd_register_devid(ssc, devi, reservation_flag); 8240 } 8241 mutex_exit(SD_MUTEX(un)); 8242 8243 #if (defined(__fibre)) 8244 /* 8245 * Register callbacks for fibre only. You can't do this solely 8246 * on the basis of the devid_type because this is hba specific. 8247 * We need to query our hba capabilities to find out whether to 8248 * register or not. 8249 */ 8250 if (un->un_f_is_fibre) { 8251 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 8252 sd_init_event_callbacks(un); 8253 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8254 "sd_unit_attach: un:0x%p event callbacks inserted", 8255 un); 8256 } 8257 } 8258 #endif 8259 8260 if (un->un_f_opt_disable_cache == TRUE) { 8261 /* 8262 * Disable both read cache and write cache. This is 8263 * the historic behavior of the keywords in the config file. 8264 */ 8265 if (sd_cache_control(ssc, SD_CACHE_DISABLE, SD_CACHE_DISABLE) != 8266 0) { 8267 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8268 "sd_unit_attach: un:0x%p Could not disable " 8269 "caching", un); 8270 goto devid_failed; 8271 } 8272 } 8273 8274 /* 8275 * Check the value of the WCE bit and if it's allowed to be changed, 8276 * set un_f_write_cache_enabled and un_f_cache_mode_changeable 8277 * accordingly. 8278 */ 8279 (void) sd_get_write_cache_enabled(ssc, &wc_enabled); 8280 sd_get_write_cache_changeable(ssc, &wc_changeable); 8281 mutex_enter(SD_MUTEX(un)); 8282 un->un_f_write_cache_enabled = (wc_enabled != 0); 8283 un->un_f_cache_mode_changeable = (wc_changeable != 0); 8284 mutex_exit(SD_MUTEX(un)); 8285 8286 if ((un->un_f_rmw_type != SD_RMW_TYPE_RETURN_ERROR && 8287 un->un_tgt_blocksize != DEV_BSIZE) || 8288 un->un_f_enable_rmw) { 8289 if (!(un->un_wm_cache)) { 8290 (void) snprintf(name_str, sizeof (name_str), 8291 "%s%d_cache", 8292 ddi_driver_name(SD_DEVINFO(un)), 8293 ddi_get_instance(SD_DEVINFO(un))); 8294 un->un_wm_cache = kmem_cache_create( 8295 name_str, sizeof (struct sd_w_map), 8296 8, sd_wm_cache_constructor, 8297 sd_wm_cache_destructor, NULL, 8298 (void *)un, NULL, 0); 8299 if (!(un->un_wm_cache)) { 8300 goto wm_cache_failed; 8301 } 8302 } 8303 } 8304 8305 /* 8306 * Check the value of the NV_SUP bit and set 8307 * un_f_suppress_cache_flush accordingly. 8308 */ 8309 sd_get_nv_sup(ssc); 8310 8311 /* 8312 * Find out what type of reservation this disk supports. 8313 */ 8314 status = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS, 0, NULL); 8315 8316 switch (status) { 8317 case 0: 8318 /* 8319 * SCSI-3 reservations are supported. 8320 */ 8321 un->un_reservation_type = SD_SCSI3_RESERVATION; 8322 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8323 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un); 8324 break; 8325 case ENOTSUP: 8326 /* 8327 * The PERSISTENT RESERVE IN command would not be recognized by 8328 * a SCSI-2 device, so assume the reservation type is SCSI-2. 8329 */ 8330 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8331 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un); 8332 un->un_reservation_type = SD_SCSI2_RESERVATION; 8333 8334 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 8335 break; 8336 default: 8337 /* 8338 * default to SCSI-3 reservations 8339 */ 8340 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8341 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un); 8342 un->un_reservation_type = SD_SCSI3_RESERVATION; 8343 8344 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 8345 break; 8346 } 8347 8348 /* 8349 * Set the pstat and error stat values here, so data obtained during the 8350 * previous attach-time routines is available. 8351 * 8352 * Note: This is a critical sequence that needs to be maintained: 8353 * 1) Instantiate the kstats before any routines using the iopath 8354 * (i.e. sd_send_scsi_cmd). 8355 * 2) Initialize the error stats (sd_set_errstats) and partition 8356 * stats (sd_set_pstats)here, following 8357 * cmlb_validate_geometry(), sd_register_devid(), and 8358 * sd_cache_control(). 8359 */ 8360 8361 if (un->un_f_pkstats_enabled && geom_label_valid) { 8362 sd_set_pstats(un); 8363 SD_TRACE(SD_LOG_IO_PARTITION, un, 8364 "sd_unit_attach: un:0x%p pstats created and set\n", un); 8365 } 8366 8367 sd_set_errstats(un); 8368 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8369 "sd_unit_attach: un:0x%p errstats set\n", un); 8370 8371 sd_setup_blk_limits(ssc); 8372 8373 /* 8374 * After successfully attaching an instance, we record the information 8375 * of how many luns have been attached on the relative target and 8376 * controller for parallel SCSI. This information is used when sd tries 8377 * to set the tagged queuing capability in HBA. 8378 */ 8379 if (SD_IS_PARALLEL_SCSI(un) && (tgt >= 0) && (tgt < NTARGETS_WIDE)) { 8380 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_ATTACH); 8381 } 8382 8383 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8384 "sd_unit_attach: un:0x%p exit success\n", un); 8385 8386 /* Uninitialize sd_ssc_t pointer */ 8387 sd_ssc_fini(ssc); 8388 8389 return (DDI_SUCCESS); 8390 8391 /* 8392 * An error occurred during the attach; clean up & return failure. 8393 */ 8394 wm_cache_failed: 8395 devid_failed: 8396 ddi_remove_minor_node(devi, NULL); 8397 8398 cmlb_attach_failed: 8399 /* 8400 * Cleanup from the scsi_ifsetcap() calls (437868) 8401 */ 8402 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 8403 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 8404 8405 /* 8406 * Refer to the comments of setting tagged-qing in the beginning of 8407 * sd_unit_attach. We can only disable tagged queuing when there is 8408 * no lun attached on the target. 8409 */ 8410 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 8411 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 8412 } 8413 8414 if (un->un_f_is_fibre == FALSE) { 8415 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 8416 } 8417 8418 spinup_failed: 8419 8420 /* Uninitialize sd_ssc_t pointer */ 8421 sd_ssc_fini(ssc); 8422 8423 mutex_enter(SD_MUTEX(un)); 8424 8425 /* Deallocate SCSI FMA memory spaces */ 8426 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal)); 8427 8428 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ 8429 if (un->un_direct_priority_timeid != NULL) { 8430 timeout_id_t temp_id = un->un_direct_priority_timeid; 8431 un->un_direct_priority_timeid = NULL; 8432 mutex_exit(SD_MUTEX(un)); 8433 (void) untimeout(temp_id); 8434 mutex_enter(SD_MUTEX(un)); 8435 } 8436 8437 /* Cancel any pending start/stop timeouts */ 8438 if (un->un_startstop_timeid != NULL) { 8439 timeout_id_t temp_id = un->un_startstop_timeid; 8440 un->un_startstop_timeid = NULL; 8441 mutex_exit(SD_MUTEX(un)); 8442 (void) untimeout(temp_id); 8443 mutex_enter(SD_MUTEX(un)); 8444 } 8445 8446 /* Cancel any pending reset-throttle timeouts */ 8447 if (un->un_reset_throttle_timeid != NULL) { 8448 timeout_id_t temp_id = un->un_reset_throttle_timeid; 8449 un->un_reset_throttle_timeid = NULL; 8450 mutex_exit(SD_MUTEX(un)); 8451 (void) untimeout(temp_id); 8452 mutex_enter(SD_MUTEX(un)); 8453 } 8454 8455 /* Cancel rmw warning message timeouts */ 8456 if (un->un_rmw_msg_timeid != NULL) { 8457 timeout_id_t temp_id = un->un_rmw_msg_timeid; 8458 un->un_rmw_msg_timeid = NULL; 8459 mutex_exit(SD_MUTEX(un)); 8460 (void) untimeout(temp_id); 8461 mutex_enter(SD_MUTEX(un)); 8462 } 8463 8464 /* Cancel any pending retry timeouts */ 8465 if (un->un_retry_timeid != NULL) { 8466 timeout_id_t temp_id = un->un_retry_timeid; 8467 un->un_retry_timeid = NULL; 8468 mutex_exit(SD_MUTEX(un)); 8469 (void) untimeout(temp_id); 8470 mutex_enter(SD_MUTEX(un)); 8471 } 8472 8473 /* Cancel any pending delayed cv broadcast timeouts */ 8474 if (un->un_dcvb_timeid != NULL) { 8475 timeout_id_t temp_id = un->un_dcvb_timeid; 8476 un->un_dcvb_timeid = NULL; 8477 mutex_exit(SD_MUTEX(un)); 8478 (void) untimeout(temp_id); 8479 mutex_enter(SD_MUTEX(un)); 8480 } 8481 8482 mutex_exit(SD_MUTEX(un)); 8483 8484 /* There should not be any in-progress I/O so ASSERT this check */ 8485 ASSERT(un->un_ncmds_in_transport == 0); 8486 ASSERT(un->un_ncmds_in_driver == 0); 8487 8488 /* Do not free the softstate if the callback routine is active */ 8489 sd_sync_with_callback(un); 8490 8491 /* 8492 * Partition stats apparently are not used with removables. These would 8493 * not have been created during attach, so no need to clean them up... 8494 */ 8495 if (un->un_errstats != NULL) { 8496 kstat_delete(un->un_errstats); 8497 un->un_errstats = NULL; 8498 } 8499 8500 create_errstats_failed: 8501 8502 if (un->un_stats != NULL) { 8503 kstat_delete(un->un_stats); 8504 un->un_stats = NULL; 8505 } 8506 8507 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8508 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8509 8510 ddi_prop_remove_all(devi); 8511 cv_destroy(&un->un_state_cv); 8512 8513 sd_free_rqs(un); 8514 8515 alloc_rqs_failed: 8516 8517 devp->sd_private = NULL; 8518 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ 8519 8520 /* 8521 * Note: the man pages are unclear as to whether or not doing a 8522 * ddi_soft_state_free(sd_state, instance) is the right way to 8523 * clean up after the ddi_soft_state_zalloc() if the subsequent 8524 * ddi_get_soft_state() fails. The implication seems to be 8525 * that the get_soft_state cannot fail if the zalloc succeeds. 8526 */ 8527 #ifndef XPV_HVM_DRIVER 8528 ddi_soft_state_free(sd_state, instance); 8529 #endif /* !XPV_HVM_DRIVER */ 8530 8531 probe_failed: 8532 scsi_unprobe(devp); 8533 8534 return (DDI_FAILURE); 8535 } 8536 8537 8538 /* 8539 * Function: sd_unit_detach 8540 * 8541 * Description: Performs DDI_DETACH processing for sddetach(). 8542 * 8543 * Return Code: DDI_SUCCESS 8544 * DDI_FAILURE 8545 * 8546 * Context: Kernel thread context 8547 */ 8548 8549 static int 8550 sd_unit_detach(dev_info_t *devi) 8551 { 8552 struct scsi_device *devp; 8553 struct sd_lun *un; 8554 int i; 8555 int tgt; 8556 dev_t dev; 8557 dev_info_t *pdip = ddi_get_parent(devi); 8558 int instance = ddi_get_instance(devi); 8559 8560 /* 8561 * Fail the detach for any of the following: 8562 * - Unable to get the sd_lun struct for the instance 8563 * - There is pending I/O 8564 */ 8565 devp = ddi_get_driver_private(devi); 8566 if ((devp == NULL) || 8567 ((un = (struct sd_lun *)devp->sd_private) == NULL) || 8568 (un->un_ncmds_in_driver != 0)) { 8569 return (DDI_FAILURE); 8570 } 8571 8572 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un); 8573 8574 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 8575 SCSI_ADDR_PROP_TARGET, -1); 8576 8577 dev = sd_make_device(SD_DEVINFO(un)); 8578 8579 #ifndef lint 8580 _NOTE(COMPETING_THREADS_NOW); 8581 #endif 8582 8583 mutex_enter(SD_MUTEX(un)); 8584 8585 /* 8586 * Fail the detach if there are any outstanding layered 8587 * opens on this device. 8588 */ 8589 for (i = 0; i < NDKMAP; i++) { 8590 if (un->un_ocmap.lyropen[i] != 0) { 8591 goto err_notclosed; 8592 } 8593 } 8594 8595 /* 8596 * Verify there are NO outstanding commands issued to this device. 8597 * ie, un_ncmds_in_transport == 0. 8598 * It's possible to have outstanding commands through the physio 8599 * code path, even though everything's closed. 8600 */ 8601 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) || 8602 (un->un_direct_priority_timeid != NULL) || 8603 (un->un_state == SD_STATE_RWAIT)) { 8604 mutex_exit(SD_MUTEX(un)); 8605 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8606 "sd_dr_detach: Detach failure due to outstanding cmds\n"); 8607 goto err_stillbusy; 8608 } 8609 8610 /* 8611 * If we have the device reserved, release the reservation. 8612 */ 8613 if ((un->un_resvd_status & SD_RESERVE) && 8614 !(un->un_resvd_status & SD_LOST_RESERVE)) { 8615 mutex_exit(SD_MUTEX(un)); 8616 /* 8617 * Note: sd_reserve_release sends a command to the device 8618 * via the sd_ioctlcmd() path, and can sleep. 8619 */ 8620 if (sd_reserve_release(dev, SD_RELEASE) != 0) { 8621 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8622 "sd_dr_detach: Cannot release reservation \n"); 8623 } 8624 } else { 8625 mutex_exit(SD_MUTEX(un)); 8626 } 8627 8628 /* 8629 * Untimeout any reserve recover, throttle reset, restart unit 8630 * and delayed broadcast timeout threads. Protect the timeout pointer 8631 * from getting nulled by their callback functions. 8632 */ 8633 mutex_enter(SD_MUTEX(un)); 8634 if (un->un_resvd_timeid != NULL) { 8635 timeout_id_t temp_id = un->un_resvd_timeid; 8636 un->un_resvd_timeid = NULL; 8637 mutex_exit(SD_MUTEX(un)); 8638 (void) untimeout(temp_id); 8639 mutex_enter(SD_MUTEX(un)); 8640 } 8641 8642 if (un->un_reset_throttle_timeid != NULL) { 8643 timeout_id_t temp_id = un->un_reset_throttle_timeid; 8644 un->un_reset_throttle_timeid = NULL; 8645 mutex_exit(SD_MUTEX(un)); 8646 (void) untimeout(temp_id); 8647 mutex_enter(SD_MUTEX(un)); 8648 } 8649 8650 if (un->un_startstop_timeid != NULL) { 8651 timeout_id_t temp_id = un->un_startstop_timeid; 8652 un->un_startstop_timeid = NULL; 8653 mutex_exit(SD_MUTEX(un)); 8654 (void) untimeout(temp_id); 8655 mutex_enter(SD_MUTEX(un)); 8656 } 8657 8658 if (un->un_rmw_msg_timeid != NULL) { 8659 timeout_id_t temp_id = un->un_rmw_msg_timeid; 8660 un->un_rmw_msg_timeid = NULL; 8661 mutex_exit(SD_MUTEX(un)); 8662 (void) untimeout(temp_id); 8663 mutex_enter(SD_MUTEX(un)); 8664 } 8665 8666 if (un->un_dcvb_timeid != NULL) { 8667 timeout_id_t temp_id = un->un_dcvb_timeid; 8668 un->un_dcvb_timeid = NULL; 8669 mutex_exit(SD_MUTEX(un)); 8670 (void) untimeout(temp_id); 8671 } else { 8672 mutex_exit(SD_MUTEX(un)); 8673 } 8674 8675 /* Remove any pending reservation reclaim requests for this device */ 8676 sd_rmv_resv_reclaim_req(dev); 8677 8678 mutex_enter(SD_MUTEX(un)); 8679 8680 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */ 8681 if (un->un_direct_priority_timeid != NULL) { 8682 timeout_id_t temp_id = un->un_direct_priority_timeid; 8683 un->un_direct_priority_timeid = NULL; 8684 mutex_exit(SD_MUTEX(un)); 8685 (void) untimeout(temp_id); 8686 mutex_enter(SD_MUTEX(un)); 8687 } 8688 8689 /* Cancel any active multi-host disk watch thread requests */ 8690 if (un->un_mhd_token != NULL) { 8691 mutex_exit(SD_MUTEX(un)); 8692 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token)); 8693 if (scsi_watch_request_terminate(un->un_mhd_token, 8694 SCSI_WATCH_TERMINATE_NOWAIT)) { 8695 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8696 "sd_dr_detach: Cannot cancel mhd watch request\n"); 8697 /* 8698 * Note: We are returning here after having removed 8699 * some driver timeouts above. This is consistent with 8700 * the legacy implementation but perhaps the watch 8701 * terminate call should be made with the wait flag set. 8702 */ 8703 goto err_stillbusy; 8704 } 8705 mutex_enter(SD_MUTEX(un)); 8706 un->un_mhd_token = NULL; 8707 } 8708 8709 if (un->un_swr_token != NULL) { 8710 mutex_exit(SD_MUTEX(un)); 8711 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token)); 8712 if (scsi_watch_request_terminate(un->un_swr_token, 8713 SCSI_WATCH_TERMINATE_NOWAIT)) { 8714 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8715 "sd_dr_detach: Cannot cancel swr watch request\n"); 8716 /* 8717 * Note: We are returning here after having removed 8718 * some driver timeouts above. This is consistent with 8719 * the legacy implementation but perhaps the watch 8720 * terminate call should be made with the wait flag set. 8721 */ 8722 goto err_stillbusy; 8723 } 8724 mutex_enter(SD_MUTEX(un)); 8725 un->un_swr_token = NULL; 8726 } 8727 8728 mutex_exit(SD_MUTEX(un)); 8729 8730 /* 8731 * Clear any scsi_reset_notifies. We clear the reset notifies 8732 * if we have not registered one. 8733 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! 8734 */ 8735 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 8736 sd_mhd_reset_notify_cb, (caddr_t)un); 8737 8738 /* 8739 * protect the timeout pointers from getting nulled by 8740 * their callback functions during the cancellation process. 8741 * In such a scenario untimeout can be invoked with a null value. 8742 */ 8743 _NOTE(NO_COMPETING_THREADS_NOW); 8744 8745 mutex_enter(&un->un_pm_mutex); 8746 if (un->un_pm_idle_timeid != NULL) { 8747 timeout_id_t temp_id = un->un_pm_idle_timeid; 8748 un->un_pm_idle_timeid = NULL; 8749 mutex_exit(&un->un_pm_mutex); 8750 8751 /* 8752 * Timeout is active; cancel it. 8753 * Note that it'll never be active on a device 8754 * that does not support PM therefore we don't 8755 * have to check before calling pm_idle_component. 8756 */ 8757 (void) untimeout(temp_id); 8758 (void) pm_idle_component(SD_DEVINFO(un), 0); 8759 mutex_enter(&un->un_pm_mutex); 8760 } 8761 8762 /* 8763 * Check whether there is already a timeout scheduled for power 8764 * management. If yes then don't lower the power here, that's. 8765 * the timeout handler's job. 8766 */ 8767 if (un->un_pm_timeid != NULL) { 8768 timeout_id_t temp_id = un->un_pm_timeid; 8769 un->un_pm_timeid = NULL; 8770 mutex_exit(&un->un_pm_mutex); 8771 /* 8772 * Timeout is active; cancel it. 8773 * Note that it'll never be active on a device 8774 * that does not support PM therefore we don't 8775 * have to check before calling pm_idle_component. 8776 */ 8777 (void) untimeout(temp_id); 8778 (void) pm_idle_component(SD_DEVINFO(un), 0); 8779 8780 } else { 8781 mutex_exit(&un->un_pm_mutex); 8782 if ((un->un_f_pm_is_enabled == TRUE) && 8783 (pm_lower_power(SD_DEVINFO(un), 0, SD_PM_STATE_STOPPED(un)) 8784 != DDI_SUCCESS)) { 8785 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8786 "sd_dr_detach: Lower power request failed, ignoring.\n"); 8787 /* 8788 * Fix for bug: 4297749, item # 13 8789 * The above test now includes a check to see if PM is 8790 * supported by this device before call 8791 * pm_lower_power(). 8792 * Note, the following is not dead code. The call to 8793 * pm_lower_power above will generate a call back into 8794 * our sdpower routine which might result in a timeout 8795 * handler getting activated. Therefore the following 8796 * code is valid and necessary. 8797 */ 8798 mutex_enter(&un->un_pm_mutex); 8799 if (un->un_pm_timeid != NULL) { 8800 timeout_id_t temp_id = un->un_pm_timeid; 8801 un->un_pm_timeid = NULL; 8802 mutex_exit(&un->un_pm_mutex); 8803 (void) untimeout(temp_id); 8804 (void) pm_idle_component(SD_DEVINFO(un), 0); 8805 } else { 8806 mutex_exit(&un->un_pm_mutex); 8807 } 8808 } 8809 } 8810 8811 /* 8812 * Cleanup from the scsi_ifsetcap() calls (437868) 8813 * Relocated here from above to be after the call to 8814 * pm_lower_power, which was getting errors. 8815 */ 8816 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 8817 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 8818 8819 /* 8820 * Currently, tagged queuing is supported per target based by HBA. 8821 * Setting this per lun instance actually sets the capability of this 8822 * target in HBA, which affects those luns already attached on the 8823 * same target. So during detach, we can only disable this capability 8824 * only when this is the only lun left on this target. By doing 8825 * this, we assume a target has the same tagged queuing capability 8826 * for every lun. The condition can be removed when HBA is changed to 8827 * support per lun based tagged queuing capability. 8828 */ 8829 if (sd_scsi_get_target_lun_count(pdip, tgt) <= 1) { 8830 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 8831 } 8832 8833 if (un->un_f_is_fibre == FALSE) { 8834 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 8835 } 8836 8837 /* 8838 * Remove any event callbacks, fibre only 8839 */ 8840 if (un->un_f_is_fibre == TRUE) { 8841 if ((un->un_insert_event != NULL) && 8842 (ddi_remove_event_handler(un->un_insert_cb_id) != 8843 DDI_SUCCESS)) { 8844 /* 8845 * Note: We are returning here after having done 8846 * substantial cleanup above. This is consistent 8847 * with the legacy implementation but this may not 8848 * be the right thing to do. 8849 */ 8850 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8851 "sd_dr_detach: Cannot cancel insert event\n"); 8852 goto err_remove_event; 8853 } 8854 un->un_insert_event = NULL; 8855 8856 if ((un->un_remove_event != NULL) && 8857 (ddi_remove_event_handler(un->un_remove_cb_id) != 8858 DDI_SUCCESS)) { 8859 /* 8860 * Note: We are returning here after having done 8861 * substantial cleanup above. This is consistent 8862 * with the legacy implementation but this may not 8863 * be the right thing to do. 8864 */ 8865 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8866 "sd_dr_detach: Cannot cancel remove event\n"); 8867 goto err_remove_event; 8868 } 8869 un->un_remove_event = NULL; 8870 } 8871 8872 /* Do not free the softstate if the callback routine is active */ 8873 sd_sync_with_callback(un); 8874 8875 cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 8876 cmlb_free_handle(&un->un_cmlbhandle); 8877 8878 /* 8879 * Clean up the soft state struct. 8880 * Cleanup is done in reverse order of allocs/inits. 8881 * At this point there should be no competing threads anymore. 8882 */ 8883 8884 scsi_fm_fini(devp); 8885 8886 /* 8887 * Deallocate memory for SCSI FMA. 8888 */ 8889 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal)); 8890 8891 /* 8892 * Unregister and free device id if it was not registered 8893 * by the transport. 8894 */ 8895 if (un->un_f_devid_transport_defined == FALSE) 8896 ddi_devid_unregister(devi); 8897 8898 /* 8899 * free the devid structure if allocated before (by ddi_devid_init() 8900 * or ddi_devid_get()). 8901 */ 8902 if (un->un_devid) { 8903 ddi_devid_free(un->un_devid); 8904 un->un_devid = NULL; 8905 } 8906 8907 /* 8908 * Destroy wmap cache if it exists. 8909 */ 8910 if (un->un_wm_cache != NULL) { 8911 kmem_cache_destroy(un->un_wm_cache); 8912 un->un_wm_cache = NULL; 8913 } 8914 8915 /* 8916 * kstat cleanup is done in detach for all device types (4363169). 8917 * We do not want to fail detach if the device kstats are not deleted 8918 * since there is a confusion about the devo_refcnt for the device. 8919 * We just delete the kstats and let detach complete successfully. 8920 */ 8921 if (un->un_stats != NULL) { 8922 kstat_delete(un->un_stats); 8923 un->un_stats = NULL; 8924 } 8925 if (un->un_unmapstats != NULL) { 8926 kstat_delete(un->un_unmapstats_ks); 8927 un->un_unmapstats_ks = NULL; 8928 un->un_unmapstats = NULL; 8929 } 8930 if (un->un_errstats != NULL) { 8931 kstat_delete(un->un_errstats); 8932 un->un_errstats = NULL; 8933 } 8934 8935 /* Remove partition stats */ 8936 if (un->un_f_pkstats_enabled) { 8937 for (i = 0; i < NSDMAP; i++) { 8938 if (un->un_pstats[i] != NULL) { 8939 kstat_delete(un->un_pstats[i]); 8940 un->un_pstats[i] = NULL; 8941 } 8942 } 8943 } 8944 8945 /* Remove xbuf registration */ 8946 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8947 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8948 8949 /* Remove driver properties */ 8950 ddi_prop_remove_all(devi); 8951 8952 mutex_destroy(&un->un_pm_mutex); 8953 cv_destroy(&un->un_pm_busy_cv); 8954 8955 cv_destroy(&un->un_wcc_cv); 8956 8957 /* Removable media condvar. */ 8958 cv_destroy(&un->un_state_cv); 8959 8960 /* Suspend/resume condvar. */ 8961 cv_destroy(&un->un_suspend_cv); 8962 cv_destroy(&un->un_disk_busy_cv); 8963 8964 sd_free_rqs(un); 8965 8966 /* Free up soft state */ 8967 devp->sd_private = NULL; 8968 8969 bzero(un, sizeof (struct sd_lun)); 8970 8971 ddi_soft_state_free(sd_state, instance); 8972 8973 /* This frees up the INQUIRY data associated with the device. */ 8974 scsi_unprobe(devp); 8975 8976 /* 8977 * After successfully detaching an instance, we update the information 8978 * of how many luns have been attached in the relative target and 8979 * controller for parallel SCSI. This information is used when sd tries 8980 * to set the tagged queuing capability in HBA. 8981 * Since un has been released, we can't use SD_IS_PARALLEL_SCSI(un) to 8982 * check if the device is parallel SCSI. However, we don't need to 8983 * check here because we've already checked during attach. No device 8984 * that is not parallel SCSI is in the chain. 8985 */ 8986 if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) { 8987 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH); 8988 } 8989 8990 return (DDI_SUCCESS); 8991 8992 err_notclosed: 8993 mutex_exit(SD_MUTEX(un)); 8994 8995 err_stillbusy: 8996 _NOTE(NO_COMPETING_THREADS_NOW); 8997 8998 err_remove_event: 8999 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n"); 9000 return (DDI_FAILURE); 9001 } 9002 9003 9004 /* 9005 * Function: sd_create_errstats 9006 * 9007 * Description: This routine instantiates the device error stats. 9008 * 9009 * Note: During attach the stats are instantiated first so they are 9010 * available for attach-time routines that utilize the driver 9011 * iopath to send commands to the device. The stats are initialized 9012 * separately so data obtained during some attach-time routines is 9013 * available. (4362483) 9014 * 9015 * Arguments: un - driver soft state (unit) structure 9016 * instance - driver instance 9017 * 9018 * Context: Kernel thread context 9019 */ 9020 9021 static void 9022 sd_create_errstats(struct sd_lun *un, int instance) 9023 { 9024 struct sd_errstats *stp; 9025 char kstatmodule_err[KSTAT_STRLEN]; 9026 char kstatname[KSTAT_STRLEN]; 9027 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t)); 9028 9029 ASSERT(un != NULL); 9030 9031 if (un->un_errstats != NULL) { 9032 return; 9033 } 9034 9035 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err), 9036 "%serr", sd_label); 9037 (void) snprintf(kstatname, sizeof (kstatname), 9038 "%s%d,err", sd_label, instance); 9039 9040 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname, 9041 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT); 9042 9043 if (un->un_errstats == NULL) { 9044 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 9045 "sd_create_errstats: Failed kstat_create\n"); 9046 return; 9047 } 9048 9049 stp = (struct sd_errstats *)un->un_errstats->ks_data; 9050 kstat_named_init(&stp->sd_softerrs, "Soft Errors", 9051 KSTAT_DATA_UINT32); 9052 kstat_named_init(&stp->sd_harderrs, "Hard Errors", 9053 KSTAT_DATA_UINT32); 9054 kstat_named_init(&stp->sd_transerrs, "Transport Errors", 9055 KSTAT_DATA_UINT32); 9056 kstat_named_init(&stp->sd_vid, "Vendor", 9057 KSTAT_DATA_CHAR); 9058 kstat_named_init(&stp->sd_pid, "Product", 9059 KSTAT_DATA_CHAR); 9060 kstat_named_init(&stp->sd_revision, "Revision", 9061 KSTAT_DATA_CHAR); 9062 kstat_named_init(&stp->sd_serial, "Serial No", 9063 KSTAT_DATA_CHAR); 9064 kstat_named_init(&stp->sd_capacity, "Size", 9065 KSTAT_DATA_ULONGLONG); 9066 kstat_named_init(&stp->sd_rq_media_err, "Media Error", 9067 KSTAT_DATA_UINT32); 9068 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready", 9069 KSTAT_DATA_UINT32); 9070 kstat_named_init(&stp->sd_rq_nodev_err, "No Device", 9071 KSTAT_DATA_UINT32); 9072 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable", 9073 KSTAT_DATA_UINT32); 9074 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request", 9075 KSTAT_DATA_UINT32); 9076 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis", 9077 KSTAT_DATA_UINT32); 9078 9079 un->un_errstats->ks_private = un; 9080 un->un_errstats->ks_update = nulldev; 9081 9082 kstat_install(un->un_errstats); 9083 } 9084 9085 9086 /* 9087 * Function: sd_set_errstats 9088 * 9089 * Description: This routine sets the value of the vendor id, product id, 9090 * revision, serial number, and capacity device error stats. 9091 * 9092 * Note: During attach the stats are instantiated first so they are 9093 * available for attach-time routines that utilize the driver 9094 * iopath to send commands to the device. The stats are initialized 9095 * separately so data obtained during some attach-time routines is 9096 * available. (4362483) 9097 * 9098 * Arguments: un - driver soft state (unit) structure 9099 * 9100 * Context: Kernel thread context 9101 */ 9102 9103 static void 9104 sd_set_errstats(struct sd_lun *un) 9105 { 9106 struct sd_errstats *stp; 9107 char *sn; 9108 9109 ASSERT(un != NULL); 9110 ASSERT(un->un_errstats != NULL); 9111 stp = (struct sd_errstats *)un->un_errstats->ks_data; 9112 ASSERT(stp != NULL); 9113 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8); 9114 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16); 9115 (void) strncpy(stp->sd_revision.value.c, 9116 un->un_sd->sd_inq->inq_revision, 4); 9117 9118 /* 9119 * All the errstats are persistent across detach/attach, 9120 * so reset all the errstats here in case of the hot 9121 * replacement of disk drives, except for not changed 9122 * Sun qualified drives. 9123 */ 9124 if ((bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) != 0) || 9125 (bcmp(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 9126 sizeof (SD_INQUIRY(un)->inq_serial)) != 0)) { 9127 stp->sd_softerrs.value.ui32 = 0; 9128 stp->sd_harderrs.value.ui32 = 0; 9129 stp->sd_transerrs.value.ui32 = 0; 9130 stp->sd_rq_media_err.value.ui32 = 0; 9131 stp->sd_rq_ntrdy_err.value.ui32 = 0; 9132 stp->sd_rq_nodev_err.value.ui32 = 0; 9133 stp->sd_rq_recov_err.value.ui32 = 0; 9134 stp->sd_rq_illrq_err.value.ui32 = 0; 9135 stp->sd_rq_pfa_err.value.ui32 = 0; 9136 } 9137 9138 /* 9139 * Set the "Serial No" kstat for Sun qualified drives (indicated by 9140 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid) 9141 * (4376302)) 9142 */ 9143 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) { 9144 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 9145 sizeof (SD_INQUIRY(un)->inq_serial)); 9146 } else { 9147 /* 9148 * Set the "Serial No" kstat for non-Sun qualified drives 9149 */ 9150 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, SD_DEVINFO(un), 9151 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 9152 INQUIRY_SERIAL_NO, &sn) == DDI_SUCCESS) { 9153 (void) strlcpy(stp->sd_serial.value.c, sn, 9154 sizeof (stp->sd_serial.value.c)); 9155 ddi_prop_free(sn); 9156 } 9157 } 9158 9159 if (un->un_f_blockcount_is_valid != TRUE) { 9160 /* 9161 * Set capacity error stat to 0 for no media. This ensures 9162 * a valid capacity is displayed in response to 'iostat -E' 9163 * when no media is present in the device. 9164 */ 9165 stp->sd_capacity.value.ui64 = 0; 9166 } else { 9167 /* 9168 * Multiply un_blockcount by un->un_sys_blocksize to get 9169 * capacity. 9170 * 9171 * Note: for non-512 blocksize devices "un_blockcount" has been 9172 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by 9173 * (un_tgt_blocksize / un->un_sys_blocksize). 9174 */ 9175 stp->sd_capacity.value.ui64 = (uint64_t) 9176 ((uint64_t)un->un_blockcount * un->un_sys_blocksize); 9177 } 9178 } 9179 9180 9181 /* 9182 * Function: sd_set_pstats 9183 * 9184 * Description: This routine instantiates and initializes the partition 9185 * stats for each partition with more than zero blocks. 9186 * (4363169) 9187 * 9188 * Arguments: un - driver soft state (unit) structure 9189 * 9190 * Context: Kernel thread context 9191 */ 9192 9193 static void 9194 sd_set_pstats(struct sd_lun *un) 9195 { 9196 char kstatname[KSTAT_STRLEN]; 9197 int instance; 9198 int i; 9199 diskaddr_t nblks = 0; 9200 char *partname = NULL; 9201 9202 ASSERT(un != NULL); 9203 9204 instance = ddi_get_instance(SD_DEVINFO(un)); 9205 9206 /* Note:x86: is this a VTOC8/VTOC16 difference? */ 9207 for (i = 0; i < NSDMAP; i++) { 9208 9209 if (cmlb_partinfo(un->un_cmlbhandle, i, 9210 &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0) 9211 continue; 9212 mutex_enter(SD_MUTEX(un)); 9213 9214 if ((un->un_pstats[i] == NULL) && 9215 (nblks != 0)) { 9216 9217 (void) snprintf(kstatname, sizeof (kstatname), 9218 "%s%d,%s", sd_label, instance, 9219 partname); 9220 9221 un->un_pstats[i] = kstat_create(sd_label, 9222 instance, kstatname, "partition", KSTAT_TYPE_IO, 9223 1, KSTAT_FLAG_PERSISTENT); 9224 if (un->un_pstats[i] != NULL) { 9225 un->un_pstats[i]->ks_lock = SD_MUTEX(un); 9226 kstat_install(un->un_pstats[i]); 9227 } 9228 } 9229 mutex_exit(SD_MUTEX(un)); 9230 } 9231 } 9232 9233 9234 #if (defined(__fibre)) 9235 /* 9236 * Function: sd_init_event_callbacks 9237 * 9238 * Description: This routine initializes the insertion and removal event 9239 * callbacks. (fibre only) 9240 * 9241 * Arguments: un - driver soft state (unit) structure 9242 * 9243 * Context: Kernel thread context 9244 */ 9245 9246 static void 9247 sd_init_event_callbacks(struct sd_lun *un) 9248 { 9249 ASSERT(un != NULL); 9250 9251 if ((un->un_insert_event == NULL) && 9252 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT, 9253 &un->un_insert_event) == DDI_SUCCESS)) { 9254 /* 9255 * Add the callback for an insertion event 9256 */ 9257 (void) ddi_add_event_handler(SD_DEVINFO(un), 9258 un->un_insert_event, sd_event_callback, (void *)un, 9259 &(un->un_insert_cb_id)); 9260 } 9261 9262 if ((un->un_remove_event == NULL) && 9263 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT, 9264 &un->un_remove_event) == DDI_SUCCESS)) { 9265 /* 9266 * Add the callback for a removal event 9267 */ 9268 (void) ddi_add_event_handler(SD_DEVINFO(un), 9269 un->un_remove_event, sd_event_callback, (void *)un, 9270 &(un->un_remove_cb_id)); 9271 } 9272 } 9273 9274 9275 /* 9276 * Function: sd_event_callback 9277 * 9278 * Description: This routine handles insert/remove events (photon). The 9279 * state is changed to OFFLINE which can be used to supress 9280 * error msgs. (fibre only) 9281 * 9282 * Arguments: un - driver soft state (unit) structure 9283 * 9284 * Context: Callout thread context 9285 */ 9286 /* ARGSUSED */ 9287 static void 9288 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg, 9289 void *bus_impldata) 9290 { 9291 struct sd_lun *un = (struct sd_lun *)arg; 9292 9293 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event)); 9294 if (event == un->un_insert_event) { 9295 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event"); 9296 mutex_enter(SD_MUTEX(un)); 9297 if (un->un_state == SD_STATE_OFFLINE) { 9298 if (un->un_last_state != SD_STATE_SUSPENDED) { 9299 un->un_state = un->un_last_state; 9300 } else { 9301 /* 9302 * We have gone through SUSPEND/RESUME while 9303 * we were offline. Restore the last state 9304 */ 9305 un->un_state = un->un_save_state; 9306 } 9307 } 9308 mutex_exit(SD_MUTEX(un)); 9309 9310 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event)); 9311 } else if (event == un->un_remove_event) { 9312 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event"); 9313 mutex_enter(SD_MUTEX(un)); 9314 /* 9315 * We need to handle an event callback that occurs during 9316 * the suspend operation, since we don't prevent it. 9317 */ 9318 if (un->un_state != SD_STATE_OFFLINE) { 9319 if (un->un_state != SD_STATE_SUSPENDED) { 9320 New_state(un, SD_STATE_OFFLINE); 9321 } else { 9322 un->un_last_state = SD_STATE_OFFLINE; 9323 } 9324 } 9325 mutex_exit(SD_MUTEX(un)); 9326 } else { 9327 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 9328 "!Unknown event\n"); 9329 } 9330 9331 } 9332 #endif 9333 9334 /* 9335 * Values related to caching mode page depending on whether the unit is ATAPI. 9336 */ 9337 #define SDC_CDB_GROUP(un) ((un->un_f_cfg_is_atapi == TRUE) ? \ 9338 CDB_GROUP1 : CDB_GROUP0) 9339 #define SDC_HDRLEN(un) ((un->un_f_cfg_is_atapi == TRUE) ? \ 9340 MODE_HEADER_LENGTH_GRP2 : MODE_HEADER_LENGTH) 9341 /* 9342 * Use mode_cache_scsi3 to ensure we get all of the mode sense data, otherwise 9343 * the mode select will fail (mode_cache_scsi3 is a superset of mode_caching). 9344 */ 9345 #define SDC_BUFLEN(un) (SDC_HDRLEN(un) + MODE_BLK_DESC_LENGTH + \ 9346 sizeof (struct mode_cache_scsi3)) 9347 9348 static int 9349 sd_get_caching_mode_page(sd_ssc_t *ssc, uchar_t page_control, uchar_t **header, 9350 int *bdlen) 9351 { 9352 struct sd_lun *un = ssc->ssc_un; 9353 struct mode_caching *mode_caching_page; 9354 size_t buflen = SDC_BUFLEN(un); 9355 int hdrlen = SDC_HDRLEN(un); 9356 int rval; 9357 9358 /* 9359 * Do a test unit ready, otherwise a mode sense may not work if this 9360 * is the first command sent to the device after boot. 9361 */ 9362 if (sd_send_scsi_TEST_UNIT_READY(ssc, 0) != 0) 9363 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9364 9365 /* 9366 * Allocate memory for the retrieved mode page and its headers. Set 9367 * a pointer to the page itself. 9368 */ 9369 *header = kmem_zalloc(buflen, KM_SLEEP); 9370 9371 /* Get the information from the device */ 9372 rval = sd_send_scsi_MODE_SENSE(ssc, SDC_CDB_GROUP(un), *header, buflen, 9373 page_control | MODEPAGE_CACHING, SD_PATH_DIRECT); 9374 if (rval != 0) { 9375 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, "%s: Mode Sense Failed\n", 9376 __func__); 9377 goto mode_sense_failed; 9378 } 9379 9380 /* 9381 * Determine size of Block Descriptors in order to locate 9382 * the mode page data. ATAPI devices return 0, SCSI devices 9383 * should return MODE_BLK_DESC_LENGTH. 9384 */ 9385 if (un->un_f_cfg_is_atapi == TRUE) { 9386 struct mode_header_grp2 *mhp = 9387 (struct mode_header_grp2 *)(*header); 9388 *bdlen = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 9389 } else { 9390 *bdlen = ((struct mode_header *)(*header))->bdesc_length; 9391 } 9392 9393 if (*bdlen > MODE_BLK_DESC_LENGTH) { 9394 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 0, 9395 "%s: Mode Sense returned invalid block descriptor length\n", 9396 __func__); 9397 rval = EIO; 9398 goto mode_sense_failed; 9399 } 9400 9401 mode_caching_page = (struct mode_caching *)(*header + hdrlen + *bdlen); 9402 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 9403 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 9404 "%s: Mode Sense caching page code mismatch %d\n", 9405 __func__, mode_caching_page->mode_page.code); 9406 rval = EIO; 9407 } 9408 9409 mode_sense_failed: 9410 if (rval != 0) { 9411 kmem_free(*header, buflen); 9412 *header = NULL; 9413 *bdlen = 0; 9414 } 9415 return (rval); 9416 } 9417 9418 /* 9419 * Function: sd_cache_control() 9420 * 9421 * Description: This routine is the driver entry point for setting 9422 * read and write caching by modifying the WCE (write cache 9423 * enable) and RCD (read cache disable) bits of mode 9424 * page 8 (MODEPAGE_CACHING). 9425 * 9426 * Arguments: ssc - ssc contains pointer to driver soft state 9427 * (unit) structure for this target. 9428 * rcd_flag - flag for controlling the read cache 9429 * wce_flag - flag for controlling the write cache 9430 * 9431 * Return Code: EIO 9432 * code returned by sd_send_scsi_MODE_SENSE and 9433 * sd_send_scsi_MODE_SELECT 9434 * 9435 * Context: Kernel Thread 9436 */ 9437 9438 static int 9439 sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag) 9440 { 9441 struct sd_lun *un = ssc->ssc_un; 9442 struct mode_caching *mode_caching_page; 9443 uchar_t *header; 9444 size_t buflen = SDC_BUFLEN(un); 9445 int hdrlen = SDC_HDRLEN(un); 9446 int bdlen; 9447 int rval; 9448 9449 rval = sd_get_caching_mode_page(ssc, MODEPAGE_CURRENT, &header, &bdlen); 9450 switch (rval) { 9451 case 0: 9452 /* Check the relevant bits on successful mode sense */ 9453 mode_caching_page = (struct mode_caching *)(header + hdrlen + 9454 bdlen); 9455 if ((mode_caching_page->rcd && rcd_flag == SD_CACHE_ENABLE) || 9456 (!mode_caching_page->rcd && rcd_flag == SD_CACHE_DISABLE) || 9457 (mode_caching_page->wce && wce_flag == SD_CACHE_DISABLE) || 9458 (!mode_caching_page->wce && wce_flag == SD_CACHE_ENABLE)) { 9459 size_t sbuflen; 9460 uchar_t save_pg; 9461 9462 /* 9463 * Construct select buffer length based on the 9464 * length of the sense data returned. 9465 */ 9466 sbuflen = hdrlen + bdlen + sizeof (struct mode_page) + 9467 (int)mode_caching_page->mode_page.length; 9468 9469 /* Set the caching bits as requested */ 9470 if (rcd_flag == SD_CACHE_ENABLE) 9471 mode_caching_page->rcd = 0; 9472 else if (rcd_flag == SD_CACHE_DISABLE) 9473 mode_caching_page->rcd = 1; 9474 9475 if (wce_flag == SD_CACHE_ENABLE) 9476 mode_caching_page->wce = 1; 9477 else if (wce_flag == SD_CACHE_DISABLE) 9478 mode_caching_page->wce = 0; 9479 9480 /* 9481 * Save the page if the mode sense says the 9482 * drive supports it. 9483 */ 9484 save_pg = mode_caching_page->mode_page.ps ? 9485 SD_SAVE_PAGE : SD_DONTSAVE_PAGE; 9486 9487 /* Clear reserved bits before mode select */ 9488 mode_caching_page->mode_page.ps = 0; 9489 9490 /* 9491 * Clear out mode header for mode select. 9492 * The rest of the retrieved page will be reused. 9493 */ 9494 bzero(header, hdrlen); 9495 9496 if (un->un_f_cfg_is_atapi == TRUE) { 9497 struct mode_header_grp2 *mhp = 9498 (struct mode_header_grp2 *)header; 9499 mhp->bdesc_length_hi = bdlen >> 8; 9500 mhp->bdesc_length_lo = (uchar_t)bdlen & 0xff; 9501 } else { 9502 ((struct mode_header *)header)->bdesc_length = 9503 bdlen; 9504 } 9505 9506 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9507 9508 /* Issue mode select to change the cache settings */ 9509 rval = sd_send_scsi_MODE_SELECT(ssc, SDC_CDB_GROUP(un), 9510 header, sbuflen, save_pg, SD_PATH_DIRECT); 9511 } 9512 kmem_free(header, buflen); 9513 break; 9514 case EIO: 9515 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 9516 break; 9517 default: 9518 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9519 break; 9520 } 9521 9522 return (rval); 9523 } 9524 9525 9526 /* 9527 * Function: sd_get_write_cache_enabled() 9528 * 9529 * Description: This routine is the driver entry point for determining if write 9530 * caching is enabled. It examines the WCE (write cache enable) 9531 * bits of mode page 8 (MODEPAGE_CACHING) with Page Control field 9532 * bits set to MODEPAGE_CURRENT. 9533 * 9534 * Arguments: ssc - ssc contains pointer to driver soft state 9535 * (unit) structure for this target. 9536 * is_enabled - pointer to int where write cache enabled state 9537 * is returned (non-zero -> write cache enabled) 9538 * 9539 * Return Code: EIO 9540 * code returned by sd_send_scsi_MODE_SENSE 9541 * 9542 * Context: Kernel Thread 9543 * 9544 * NOTE: If ioctl is added to disable write cache, this sequence should 9545 * be followed so that no locking is required for accesses to 9546 * un->un_f_write_cache_enabled: 9547 * do mode select to clear wce 9548 * do synchronize cache to flush cache 9549 * set un->un_f_write_cache_enabled = FALSE 9550 * 9551 * Conversely, an ioctl to enable the write cache should be done 9552 * in this order: 9553 * set un->un_f_write_cache_enabled = TRUE 9554 * do mode select to set wce 9555 */ 9556 9557 static int 9558 sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled) 9559 { 9560 struct sd_lun *un = ssc->ssc_un; 9561 struct mode_caching *mode_caching_page; 9562 uchar_t *header; 9563 size_t buflen = SDC_BUFLEN(un); 9564 int hdrlen = SDC_HDRLEN(un); 9565 int bdlen; 9566 int rval; 9567 9568 /* In case of error, flag as enabled */ 9569 *is_enabled = TRUE; 9570 9571 rval = sd_get_caching_mode_page(ssc, MODEPAGE_CURRENT, &header, &bdlen); 9572 switch (rval) { 9573 case 0: 9574 mode_caching_page = (struct mode_caching *)(header + hdrlen + 9575 bdlen); 9576 *is_enabled = mode_caching_page->wce; 9577 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 9578 kmem_free(header, buflen); 9579 break; 9580 case EIO: { 9581 /* 9582 * Some disks do not support Mode Sense(6), we 9583 * should ignore this kind of error (sense key is 9584 * 0x5 - illegal request). 9585 */ 9586 uint8_t *sensep; 9587 int senlen; 9588 9589 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 9590 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 9591 ssc->ssc_uscsi_cmd->uscsi_rqresid); 9592 9593 if (senlen > 0 && 9594 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 9595 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 9596 } else { 9597 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 9598 } 9599 break; 9600 } 9601 default: 9602 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9603 break; 9604 } 9605 9606 return (rval); 9607 } 9608 9609 /* 9610 * Function: sd_get_write_cache_changeable() 9611 * 9612 * Description: This routine is the driver entry point for determining if write 9613 * caching is changeable. It examines the WCE (write cache enable) 9614 * bits of mode page 8 (MODEPAGE_CACHING) with Page Control field 9615 * bits set to MODEPAGE_CHANGEABLE. 9616 * 9617 * Arguments: ssc - ssc contains pointer to driver soft state 9618 * (unit) structure for this target. 9619 * is_changeable - pointer to int where write cache changeable 9620 * state is returned (non-zero -> write cache 9621 * changeable) 9622 * 9623 * Context: Kernel Thread 9624 */ 9625 9626 static void 9627 sd_get_write_cache_changeable(sd_ssc_t *ssc, int *is_changeable) 9628 { 9629 struct sd_lun *un = ssc->ssc_un; 9630 struct mode_caching *mode_caching_page; 9631 uchar_t *header; 9632 size_t buflen = SDC_BUFLEN(un); 9633 int hdrlen = SDC_HDRLEN(un); 9634 int bdlen; 9635 int rval; 9636 9637 /* In case of error, flag as enabled */ 9638 *is_changeable = TRUE; 9639 9640 rval = sd_get_caching_mode_page(ssc, MODEPAGE_CHANGEABLE, &header, 9641 &bdlen); 9642 switch (rval) { 9643 case 0: 9644 mode_caching_page = (struct mode_caching *)(header + hdrlen + 9645 bdlen); 9646 *is_changeable = mode_caching_page->wce; 9647 kmem_free(header, buflen); 9648 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 9649 break; 9650 case EIO: 9651 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 9652 break; 9653 default: 9654 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9655 break; 9656 } 9657 } 9658 9659 /* 9660 * Function: sd_get_nv_sup() 9661 * 9662 * Description: This routine is the driver entry point for 9663 * determining whether non-volatile cache is supported. This 9664 * determination process works as follows: 9665 * 9666 * 1. sd first queries sd.conf on whether 9667 * suppress_cache_flush bit is set for this device. 9668 * 9669 * 2. if not there, then queries the internal disk table. 9670 * 9671 * 3. if either sd.conf or internal disk table specifies 9672 * cache flush be suppressed, we don't bother checking 9673 * NV_SUP bit. 9674 * 9675 * If SUPPRESS_CACHE_FLUSH bit is not set to 1, sd queries 9676 * the optional INQUIRY VPD page 0x86. If the device 9677 * supports VPD page 0x86, sd examines the NV_SUP 9678 * (non-volatile cache support) bit in the INQUIRY VPD page 9679 * 0x86: 9680 * o If NV_SUP bit is set, sd assumes the device has a 9681 * non-volatile cache and set the 9682 * un_f_sync_nv_supported to TRUE. 9683 * o Otherwise cache is not non-volatile, 9684 * un_f_sync_nv_supported is set to FALSE. 9685 * 9686 * Arguments: un - driver soft state (unit) structure 9687 * 9688 * Return Code: 9689 * 9690 * Context: Kernel Thread 9691 */ 9692 9693 static void 9694 sd_get_nv_sup(sd_ssc_t *ssc) 9695 { 9696 int rval = 0; 9697 uchar_t *inq86 = NULL; 9698 size_t inq86_len = MAX_INQUIRY_SIZE; 9699 size_t inq86_resid = 0; 9700 struct dk_callback *dkc; 9701 struct sd_lun *un; 9702 9703 ASSERT(ssc != NULL); 9704 un = ssc->ssc_un; 9705 ASSERT(un != NULL); 9706 9707 mutex_enter(SD_MUTEX(un)); 9708 9709 /* 9710 * Be conservative on the device's support of 9711 * SYNC_NV bit: un_f_sync_nv_supported is 9712 * initialized to be false. 9713 */ 9714 un->un_f_sync_nv_supported = FALSE; 9715 9716 /* 9717 * If either sd.conf or internal disk table 9718 * specifies cache flush be suppressed, then 9719 * we don't bother checking NV_SUP bit. 9720 */ 9721 if (un->un_f_suppress_cache_flush == TRUE) { 9722 mutex_exit(SD_MUTEX(un)); 9723 return; 9724 } 9725 9726 if (sd_check_vpd_page_support(ssc) == 0 && 9727 un->un_vpd_page_mask & SD_VPD_EXTENDED_DATA_PG) { 9728 mutex_exit(SD_MUTEX(un)); 9729 /* collect page 86 data if available */ 9730 inq86 = kmem_zalloc(inq86_len, KM_SLEEP); 9731 9732 rval = sd_send_scsi_INQUIRY(ssc, inq86, inq86_len, 9733 0x01, 0x86, &inq86_resid); 9734 9735 if (rval == 0 && (inq86_len - inq86_resid > 6)) { 9736 SD_TRACE(SD_LOG_COMMON, un, 9737 "sd_get_nv_sup: \ 9738 successfully get VPD page: %x \ 9739 PAGE LENGTH: %x BYTE 6: %x\n", 9740 inq86[1], inq86[3], inq86[6]); 9741 9742 mutex_enter(SD_MUTEX(un)); 9743 /* 9744 * check the value of NV_SUP bit: only if the device 9745 * reports NV_SUP bit to be 1, the 9746 * un_f_sync_nv_supported bit will be set to true. 9747 */ 9748 if (inq86[6] & SD_VPD_NV_SUP) { 9749 un->un_f_sync_nv_supported = TRUE; 9750 } 9751 mutex_exit(SD_MUTEX(un)); 9752 } else if (rval != 0) { 9753 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9754 } 9755 9756 kmem_free(inq86, inq86_len); 9757 } else { 9758 mutex_exit(SD_MUTEX(un)); 9759 } 9760 9761 /* 9762 * Send a SYNC CACHE command to check whether 9763 * SYNC_NV bit is supported. This command should have 9764 * un_f_sync_nv_supported set to correct value. 9765 */ 9766 mutex_enter(SD_MUTEX(un)); 9767 if (un->un_f_sync_nv_supported) { 9768 mutex_exit(SD_MUTEX(un)); 9769 dkc = kmem_zalloc(sizeof (struct dk_callback), KM_SLEEP); 9770 dkc->dkc_flag = FLUSH_VOLATILE; 9771 (void) sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 9772 9773 /* 9774 * Send a TEST UNIT READY command to the device. This should 9775 * clear any outstanding UNIT ATTENTION that may be present. 9776 */ 9777 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR); 9778 if (rval != 0) 9779 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9780 9781 kmem_free(dkc, sizeof (struct dk_callback)); 9782 } else { 9783 mutex_exit(SD_MUTEX(un)); 9784 } 9785 9786 SD_TRACE(SD_LOG_COMMON, un, "sd_get_nv_sup: \ 9787 un_f_suppress_cache_flush is set to %d\n", 9788 un->un_f_suppress_cache_flush); 9789 } 9790 9791 /* 9792 * Function: sd_make_device 9793 * 9794 * Description: Utility routine to return the Solaris device number from 9795 * the data in the device's dev_info structure. 9796 * 9797 * Return Code: The Solaris device number 9798 * 9799 * Context: Any 9800 */ 9801 9802 static dev_t 9803 sd_make_device(dev_info_t *devi) 9804 { 9805 return (makedevice(ddi_driver_major(devi), 9806 ddi_get_instance(devi) << SDUNIT_SHIFT)); 9807 } 9808 9809 9810 /* 9811 * Function: sd_pm_entry 9812 * 9813 * Description: Called at the start of a new command to manage power 9814 * and busy status of a device. This includes determining whether 9815 * the current power state of the device is sufficient for 9816 * performing the command or whether it must be changed. 9817 * The PM framework is notified appropriately. 9818 * Only with a return status of DDI_SUCCESS will the 9819 * component be busy to the framework. 9820 * 9821 * All callers of sd_pm_entry must check the return status 9822 * and only call sd_pm_exit it it was DDI_SUCCESS. A status 9823 * of DDI_FAILURE indicates the device failed to power up. 9824 * In this case un_pm_count has been adjusted so the result 9825 * on exit is still powered down, ie. count is less than 0. 9826 * Calling sd_pm_exit with this count value hits an ASSERT. 9827 * 9828 * Return Code: DDI_SUCCESS or DDI_FAILURE 9829 * 9830 * Context: Kernel thread context. 9831 */ 9832 9833 static int 9834 sd_pm_entry(struct sd_lun *un) 9835 { 9836 int return_status = DDI_SUCCESS; 9837 9838 ASSERT(!mutex_owned(SD_MUTEX(un))); 9839 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9840 9841 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n"); 9842 9843 if (un->un_f_pm_is_enabled == FALSE) { 9844 SD_TRACE(SD_LOG_IO_PM, un, 9845 "sd_pm_entry: exiting, PM not enabled\n"); 9846 return (return_status); 9847 } 9848 9849 /* 9850 * Just increment a counter if PM is enabled. On the transition from 9851 * 0 ==> 1, mark the device as busy. The iodone side will decrement 9852 * the count with each IO and mark the device as idle when the count 9853 * hits 0. 9854 * 9855 * If the count is less than 0 the device is powered down. If a powered 9856 * down device is successfully powered up then the count must be 9857 * incremented to reflect the power up. Note that it'll get incremented 9858 * a second time to become busy. 9859 * 9860 * Because the following has the potential to change the device state 9861 * and must release the un_pm_mutex to do so, only one thread can be 9862 * allowed through at a time. 9863 */ 9864 9865 mutex_enter(&un->un_pm_mutex); 9866 while (un->un_pm_busy == TRUE) { 9867 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex); 9868 } 9869 un->un_pm_busy = TRUE; 9870 9871 if (un->un_pm_count < 1) { 9872 9873 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n"); 9874 9875 /* 9876 * Indicate we are now busy so the framework won't attempt to 9877 * power down the device. This call will only fail if either 9878 * we passed a bad component number or the device has no 9879 * components. Neither of these should ever happen. 9880 */ 9881 mutex_exit(&un->un_pm_mutex); 9882 return_status = pm_busy_component(SD_DEVINFO(un), 0); 9883 ASSERT(return_status == DDI_SUCCESS); 9884 9885 mutex_enter(&un->un_pm_mutex); 9886 9887 if (un->un_pm_count < 0) { 9888 mutex_exit(&un->un_pm_mutex); 9889 9890 SD_TRACE(SD_LOG_IO_PM, un, 9891 "sd_pm_entry: power up component\n"); 9892 9893 /* 9894 * pm_raise_power will cause sdpower to be called 9895 * which brings the device power level to the 9896 * desired state, If successful, un_pm_count and 9897 * un_power_level will be updated appropriately. 9898 */ 9899 return_status = pm_raise_power(SD_DEVINFO(un), 0, 9900 SD_PM_STATE_ACTIVE(un)); 9901 9902 mutex_enter(&un->un_pm_mutex); 9903 9904 if (return_status != DDI_SUCCESS) { 9905 /* 9906 * Power up failed. 9907 * Idle the device and adjust the count 9908 * so the result on exit is that we're 9909 * still powered down, ie. count is less than 0. 9910 */ 9911 SD_TRACE(SD_LOG_IO_PM, un, 9912 "sd_pm_entry: power up failed," 9913 " idle the component\n"); 9914 9915 (void) pm_idle_component(SD_DEVINFO(un), 0); 9916 un->un_pm_count--; 9917 } else { 9918 /* 9919 * Device is powered up, verify the 9920 * count is non-negative. 9921 * This is debug only. 9922 */ 9923 ASSERT(un->un_pm_count == 0); 9924 } 9925 } 9926 9927 if (return_status == DDI_SUCCESS) { 9928 /* 9929 * For performance, now that the device has been tagged 9930 * as busy, and it's known to be powered up, update the 9931 * chain types to use jump tables that do not include 9932 * pm. This significantly lowers the overhead and 9933 * therefore improves performance. 9934 */ 9935 9936 mutex_exit(&un->un_pm_mutex); 9937 mutex_enter(SD_MUTEX(un)); 9938 SD_TRACE(SD_LOG_IO_PM, un, 9939 "sd_pm_entry: changing uscsi_chain_type from %d\n", 9940 un->un_uscsi_chain_type); 9941 9942 if (un->un_f_non_devbsize_supported) { 9943 un->un_buf_chain_type = 9944 SD_CHAIN_INFO_RMMEDIA_NO_PM; 9945 } else { 9946 un->un_buf_chain_type = 9947 SD_CHAIN_INFO_DISK_NO_PM; 9948 } 9949 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 9950 9951 SD_TRACE(SD_LOG_IO_PM, un, 9952 " changed uscsi_chain_type to %d\n", 9953 un->un_uscsi_chain_type); 9954 mutex_exit(SD_MUTEX(un)); 9955 mutex_enter(&un->un_pm_mutex); 9956 9957 if (un->un_pm_idle_timeid == NULL) { 9958 /* 300 ms. */ 9959 un->un_pm_idle_timeid = 9960 timeout(sd_pm_idletimeout_handler, un, 9961 (drv_usectohz((clock_t)300000))); 9962 /* 9963 * Include an extra call to busy which keeps the 9964 * device busy with-respect-to the PM layer 9965 * until the timer fires, at which time it'll 9966 * get the extra idle call. 9967 */ 9968 (void) pm_busy_component(SD_DEVINFO(un), 0); 9969 } 9970 } 9971 } 9972 un->un_pm_busy = FALSE; 9973 /* Next... */ 9974 cv_signal(&un->un_pm_busy_cv); 9975 9976 un->un_pm_count++; 9977 9978 SD_TRACE(SD_LOG_IO_PM, un, 9979 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count); 9980 9981 mutex_exit(&un->un_pm_mutex); 9982 9983 return (return_status); 9984 } 9985 9986 9987 /* 9988 * Function: sd_pm_exit 9989 * 9990 * Description: Called at the completion of a command to manage busy 9991 * status for the device. If the device becomes idle the 9992 * PM framework is notified. 9993 * 9994 * Context: Kernel thread context 9995 */ 9996 9997 static void 9998 sd_pm_exit(struct sd_lun *un) 9999 { 10000 ASSERT(!mutex_owned(SD_MUTEX(un))); 10001 ASSERT(!mutex_owned(&un->un_pm_mutex)); 10002 10003 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n"); 10004 10005 /* 10006 * After attach the following flag is only read, so don't 10007 * take the penalty of acquiring a mutex for it. 10008 */ 10009 if (un->un_f_pm_is_enabled == TRUE) { 10010 10011 mutex_enter(&un->un_pm_mutex); 10012 un->un_pm_count--; 10013 10014 SD_TRACE(SD_LOG_IO_PM, un, 10015 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count); 10016 10017 ASSERT(un->un_pm_count >= 0); 10018 if (un->un_pm_count == 0) { 10019 mutex_exit(&un->un_pm_mutex); 10020 10021 SD_TRACE(SD_LOG_IO_PM, un, 10022 "sd_pm_exit: idle component\n"); 10023 10024 (void) pm_idle_component(SD_DEVINFO(un), 0); 10025 10026 } else { 10027 mutex_exit(&un->un_pm_mutex); 10028 } 10029 } 10030 10031 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n"); 10032 } 10033 10034 10035 /* 10036 * Function: sdopen 10037 * 10038 * Description: Driver's open(9e) entry point function. 10039 * 10040 * Arguments: dev_i - pointer to device number 10041 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE) 10042 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 10043 * cred_p - user credential pointer 10044 * 10045 * Return Code: EINVAL 10046 * ENXIO 10047 * EIO 10048 * EROFS 10049 * EBUSY 10050 * 10051 * Context: Kernel thread context 10052 */ 10053 /* ARGSUSED */ 10054 static int 10055 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 10056 { 10057 struct sd_lun *un; 10058 int nodelay; 10059 int part; 10060 uint64_t partmask; 10061 int instance; 10062 dev_t dev; 10063 int rval = EIO; 10064 diskaddr_t nblks = 0; 10065 diskaddr_t label_cap; 10066 10067 /* Validate the open type */ 10068 if (otyp >= OTYPCNT) { 10069 return (EINVAL); 10070 } 10071 10072 dev = *dev_p; 10073 instance = SDUNIT(dev); 10074 10075 /* 10076 * Fail the open if there is no softstate for the instance. 10077 */ 10078 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 10079 /* 10080 * The probe cache only needs to be cleared when open (9e) fails 10081 * with ENXIO (4238046). 10082 */ 10083 /* 10084 * un-conditionally clearing probe cache is ok with 10085 * separate sd/ssd binaries 10086 * x86 platform can be an issue with both parallel 10087 * and fibre in 1 binary 10088 */ 10089 sd_scsi_clear_probe_cache(); 10090 return (ENXIO); 10091 } 10092 10093 nodelay = (flag & (FNDELAY | FNONBLOCK)); 10094 part = SDPART(dev); 10095 partmask = 1 << part; 10096 10097 mutex_enter(SD_MUTEX(un)); 10098 10099 /* 10100 * All device accesses go thru sdstrategy() where we check 10101 * on suspend status but there could be a scsi_poll command, 10102 * which bypasses sdstrategy(), so we need to check pm 10103 * status. 10104 */ 10105 10106 if (!nodelay) { 10107 while ((un->un_state == SD_STATE_SUSPENDED) || 10108 (un->un_state == SD_STATE_PM_CHANGING)) { 10109 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10110 } 10111 10112 mutex_exit(SD_MUTEX(un)); 10113 if (sd_pm_entry(un) != DDI_SUCCESS) { 10114 rval = EIO; 10115 SD_ERROR(SD_LOG_OPEN_CLOSE, un, 10116 "sdopen: sd_pm_entry failed\n"); 10117 goto open_failed_with_pm; 10118 } 10119 mutex_enter(SD_MUTEX(un)); 10120 } 10121 10122 /* check for previous exclusive open */ 10123 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); 10124 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 10125 "sdopen: exclopen=%x, flag=%x, regopen=%x\n", 10126 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 10127 10128 if (un->un_exclopen & (partmask)) { 10129 goto excl_open_fail; 10130 } 10131 10132 if (flag & FEXCL) { 10133 int i; 10134 if (un->un_ocmap.lyropen[part]) { 10135 goto excl_open_fail; 10136 } 10137 for (i = 0; i < (OTYPCNT - 1); i++) { 10138 if (un->un_ocmap.regopen[i] & (partmask)) { 10139 goto excl_open_fail; 10140 } 10141 } 10142 } 10143 10144 /* 10145 * Check the write permission if this is a removable media device, 10146 * NDELAY has not been set, and writable permission is requested. 10147 * 10148 * Note: If NDELAY was set and this is write-protected media the WRITE 10149 * attempt will fail with EIO as part of the I/O processing. This is a 10150 * more permissive implementation that allows the open to succeed and 10151 * WRITE attempts to fail when appropriate. 10152 */ 10153 if (un->un_f_chk_wp_open) { 10154 if ((flag & FWRITE) && (!nodelay)) { 10155 mutex_exit(SD_MUTEX(un)); 10156 /* 10157 * Defer the check for write permission on writable 10158 * DVD drive till sdstrategy and will not fail open even 10159 * if FWRITE is set as the device can be writable 10160 * depending upon the media and the media can change 10161 * after the call to open(). 10162 */ 10163 if (un->un_f_dvdram_writable_device == FALSE) { 10164 if (ISCD(un) || sr_check_wp(dev)) { 10165 rval = EROFS; 10166 mutex_enter(SD_MUTEX(un)); 10167 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10168 "write to cd or write protected media\n"); 10169 goto open_fail; 10170 } 10171 } 10172 mutex_enter(SD_MUTEX(un)); 10173 } 10174 } 10175 10176 /* 10177 * If opening in NDELAY/NONBLOCK mode, just return. 10178 * Check if disk is ready and has a valid geometry later. 10179 */ 10180 if (!nodelay) { 10181 sd_ssc_t *ssc; 10182 10183 mutex_exit(SD_MUTEX(un)); 10184 ssc = sd_ssc_init(un); 10185 rval = sd_ready_and_valid(ssc, part); 10186 sd_ssc_fini(ssc); 10187 mutex_enter(SD_MUTEX(un)); 10188 /* 10189 * Fail if device is not ready or if the number of disk 10190 * blocks is zero or negative for non CD devices. 10191 */ 10192 10193 nblks = 0; 10194 10195 if (rval == SD_READY_VALID && (!ISCD(un))) { 10196 /* if cmlb_partinfo fails, nblks remains 0 */ 10197 mutex_exit(SD_MUTEX(un)); 10198 (void) cmlb_partinfo(un->un_cmlbhandle, part, &nblks, 10199 NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 10200 mutex_enter(SD_MUTEX(un)); 10201 } 10202 10203 if ((rval != SD_READY_VALID) || 10204 (!ISCD(un) && nblks <= 0)) { 10205 rval = un->un_f_has_removable_media ? ENXIO : EIO; 10206 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10207 "device not ready or invalid disk block value\n"); 10208 goto open_fail; 10209 } 10210 #if defined(__x86) 10211 } else { 10212 uchar_t *cp; 10213 /* 10214 * x86 requires special nodelay handling, so that p0 is 10215 * always defined and accessible. 10216 * Invalidate geometry only if device is not already open. 10217 */ 10218 cp = &un->un_ocmap.chkd[0]; 10219 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 10220 if (*cp != (uchar_t)0) { 10221 break; 10222 } 10223 cp++; 10224 } 10225 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 10226 mutex_exit(SD_MUTEX(un)); 10227 cmlb_invalidate(un->un_cmlbhandle, 10228 (void *)SD_PATH_DIRECT); 10229 mutex_enter(SD_MUTEX(un)); 10230 } 10231 10232 #endif 10233 } 10234 10235 if (otyp == OTYP_LYR) { 10236 un->un_ocmap.lyropen[part]++; 10237 } else { 10238 un->un_ocmap.regopen[otyp] |= partmask; 10239 } 10240 10241 /* Set up open and exclusive open flags */ 10242 if (flag & FEXCL) { 10243 un->un_exclopen |= (partmask); 10244 } 10245 10246 /* 10247 * If the lun is EFI labeled and lun capacity is greater than the 10248 * capacity contained in the label, log a sys-event to notify the 10249 * interested module. 10250 * To avoid an infinite loop of logging sys-event, we only log the 10251 * event when the lun is not opened in NDELAY mode. The event handler 10252 * should open the lun in NDELAY mode. 10253 */ 10254 if (!nodelay) { 10255 mutex_exit(SD_MUTEX(un)); 10256 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 10257 (void*)SD_PATH_DIRECT) == 0) { 10258 mutex_enter(SD_MUTEX(un)); 10259 if (un->un_f_blockcount_is_valid && 10260 un->un_blockcount > label_cap && 10261 un->un_f_expnevent == B_FALSE) { 10262 un->un_f_expnevent = B_TRUE; 10263 mutex_exit(SD_MUTEX(un)); 10264 sd_log_lun_expansion_event(un, 10265 (nodelay ? KM_NOSLEEP : KM_SLEEP)); 10266 mutex_enter(SD_MUTEX(un)); 10267 } 10268 } else { 10269 mutex_enter(SD_MUTEX(un)); 10270 } 10271 } 10272 10273 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10274 "open of part %d type %d\n", part, otyp); 10275 10276 mutex_exit(SD_MUTEX(un)); 10277 if (!nodelay) { 10278 sd_pm_exit(un); 10279 } 10280 10281 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n"); 10282 return (DDI_SUCCESS); 10283 10284 excl_open_fail: 10285 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n"); 10286 rval = EBUSY; 10287 10288 open_fail: 10289 mutex_exit(SD_MUTEX(un)); 10290 10291 /* 10292 * On a failed open we must exit the pm management. 10293 */ 10294 if (!nodelay) { 10295 sd_pm_exit(un); 10296 } 10297 open_failed_with_pm: 10298 10299 return (rval); 10300 } 10301 10302 10303 /* 10304 * Function: sdclose 10305 * 10306 * Description: Driver's close(9e) entry point function. 10307 * 10308 * Arguments: dev - device number 10309 * flag - file status flag, informational only 10310 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 10311 * cred_p - user credential pointer 10312 * 10313 * Return Code: ENXIO 10314 * 10315 * Context: Kernel thread context 10316 */ 10317 /* ARGSUSED */ 10318 static int 10319 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 10320 { 10321 struct sd_lun *un; 10322 uchar_t *cp; 10323 int part; 10324 int nodelay; 10325 int rval = 0; 10326 10327 /* Validate the open type */ 10328 if (otyp >= OTYPCNT) { 10329 return (ENXIO); 10330 } 10331 10332 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10333 return (ENXIO); 10334 } 10335 10336 part = SDPART(dev); 10337 nodelay = flag & (FNDELAY | FNONBLOCK); 10338 10339 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 10340 "sdclose: close of part %d type %d\n", part, otyp); 10341 10342 mutex_enter(SD_MUTEX(un)); 10343 10344 /* Don't proceed if power is being changed. */ 10345 while (un->un_state == SD_STATE_PM_CHANGING) { 10346 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10347 } 10348 10349 if (un->un_exclopen & (1 << part)) { 10350 un->un_exclopen &= ~(1 << part); 10351 } 10352 10353 /* Update the open partition map */ 10354 if (otyp == OTYP_LYR) { 10355 un->un_ocmap.lyropen[part] -= 1; 10356 } else { 10357 un->un_ocmap.regopen[otyp] &= ~(1 << part); 10358 } 10359 10360 cp = &un->un_ocmap.chkd[0]; 10361 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 10362 if (*cp != '\0') { 10363 break; 10364 } 10365 cp++; 10366 } 10367 10368 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 10369 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n"); 10370 10371 /* 10372 * We avoid persistance upon the last close, and set 10373 * the throttle back to the maximum. 10374 */ 10375 un->un_throttle = un->un_saved_throttle; 10376 10377 if (un->un_state == SD_STATE_OFFLINE) { 10378 if (un->un_f_is_fibre == FALSE) { 10379 scsi_log(SD_DEVINFO(un), sd_label, 10380 CE_WARN, "offline\n"); 10381 } 10382 mutex_exit(SD_MUTEX(un)); 10383 cmlb_invalidate(un->un_cmlbhandle, 10384 (void *)SD_PATH_DIRECT); 10385 mutex_enter(SD_MUTEX(un)); 10386 10387 } else { 10388 /* 10389 * Flush any outstanding writes in NVRAM cache. 10390 * Note: SYNCHRONIZE CACHE is an optional SCSI-2 10391 * cmd, it may not work for non-Pluto devices. 10392 * SYNCHRONIZE CACHE is not required for removables, 10393 * except DVD-RAM drives. 10394 * 10395 * Also note: because SYNCHRONIZE CACHE is currently 10396 * the only command issued here that requires the 10397 * drive be powered up, only do the power up before 10398 * sending the Sync Cache command. If additional 10399 * commands are added which require a powered up 10400 * drive, the following sequence may have to change. 10401 * 10402 * And finally, note that parallel SCSI on SPARC 10403 * only issues a Sync Cache to DVD-RAM, a newly 10404 * supported device. 10405 */ 10406 #if defined(__x86) 10407 if ((un->un_f_sync_cache_supported && 10408 un->un_f_sync_cache_required) || 10409 un->un_f_dvdram_writable_device == TRUE) { 10410 #else 10411 if (un->un_f_dvdram_writable_device == TRUE) { 10412 #endif 10413 mutex_exit(SD_MUTEX(un)); 10414 if (sd_pm_entry(un) == DDI_SUCCESS) { 10415 rval = 10416 sd_send_scsi_SYNCHRONIZE_CACHE(un, 10417 NULL); 10418 /* ignore error if not supported */ 10419 if (rval == ENOTSUP) { 10420 rval = 0; 10421 } else if (rval != 0) { 10422 rval = EIO; 10423 } 10424 sd_pm_exit(un); 10425 } else { 10426 rval = EIO; 10427 } 10428 mutex_enter(SD_MUTEX(un)); 10429 } 10430 10431 /* 10432 * For devices which supports DOOR_LOCK, send an ALLOW 10433 * MEDIA REMOVAL command, but don't get upset if it 10434 * fails. We need to raise the power of the drive before 10435 * we can call sd_send_scsi_DOORLOCK() 10436 */ 10437 if (un->un_f_doorlock_supported) { 10438 mutex_exit(SD_MUTEX(un)); 10439 if (sd_pm_entry(un) == DDI_SUCCESS) { 10440 sd_ssc_t *ssc; 10441 10442 ssc = sd_ssc_init(un); 10443 rval = sd_send_scsi_DOORLOCK(ssc, 10444 SD_REMOVAL_ALLOW, SD_PATH_DIRECT); 10445 if (rval != 0) 10446 sd_ssc_assessment(ssc, 10447 SD_FMT_IGNORE); 10448 sd_ssc_fini(ssc); 10449 10450 sd_pm_exit(un); 10451 if (ISCD(un) && (rval != 0) && 10452 (nodelay != 0)) { 10453 rval = ENXIO; 10454 } 10455 } else { 10456 rval = EIO; 10457 } 10458 mutex_enter(SD_MUTEX(un)); 10459 } 10460 10461 /* 10462 * If a device has removable media, invalidate all 10463 * parameters related to media, such as geometry, 10464 * blocksize, and blockcount. 10465 */ 10466 if (un->un_f_has_removable_media) { 10467 sr_ejected(un); 10468 } 10469 10470 /* 10471 * Destroy the cache (if it exists) which was 10472 * allocated for the write maps, as long as no 10473 * other outstanding commands for the device exist. 10474 * (If we don't destroy it here, we will do so later 10475 * on detach. More likely we'll just reuse it on 10476 * a future open.) 10477 */ 10478 if ((un->un_wm_cache != NULL) && 10479 (un->un_ncmds_in_driver == 0)) { 10480 kmem_cache_destroy(un->un_wm_cache); 10481 un->un_wm_cache = NULL; 10482 } 10483 } 10484 } 10485 10486 mutex_exit(SD_MUTEX(un)); 10487 10488 return (rval); 10489 } 10490 10491 10492 /* 10493 * Function: sd_ready_and_valid 10494 * 10495 * Description: Test if device is ready and has a valid geometry. 10496 * 10497 * Arguments: ssc - sd_ssc_t will contain un 10498 * un - driver soft state (unit) structure 10499 * 10500 * Return Code: SD_READY_VALID ready and valid label 10501 * SD_NOT_READY_VALID not ready, no label 10502 * SD_RESERVED_BY_OTHERS reservation conflict 10503 * 10504 * Context: Never called at interrupt context. 10505 */ 10506 10507 static int 10508 sd_ready_and_valid(sd_ssc_t *ssc, int part) 10509 { 10510 struct sd_errstats *stp; 10511 uint64_t capacity; 10512 uint_t lbasize; 10513 int rval = SD_READY_VALID; 10514 char name_str[48]; 10515 boolean_t is_valid; 10516 struct sd_lun *un; 10517 int status; 10518 10519 ASSERT(ssc != NULL); 10520 un = ssc->ssc_un; 10521 ASSERT(un != NULL); 10522 ASSERT(!mutex_owned(SD_MUTEX(un))); 10523 10524 mutex_enter(SD_MUTEX(un)); 10525 /* 10526 * If a device has removable media, we must check if media is 10527 * ready when checking if this device is ready and valid. 10528 */ 10529 if (un->un_f_has_removable_media) { 10530 mutex_exit(SD_MUTEX(un)); 10531 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10532 10533 if (status != 0) { 10534 rval = SD_NOT_READY_VALID; 10535 mutex_enter(SD_MUTEX(un)); 10536 10537 /* Ignore all failed status for removalbe media */ 10538 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10539 10540 goto done; 10541 } 10542 10543 is_valid = SD_IS_VALID_LABEL(un); 10544 mutex_enter(SD_MUTEX(un)); 10545 if (!is_valid || 10546 (un->un_f_blockcount_is_valid == FALSE) || 10547 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 10548 10549 /* capacity has to be read every open. */ 10550 mutex_exit(SD_MUTEX(un)); 10551 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity, 10552 &lbasize, SD_PATH_DIRECT); 10553 10554 if (status != 0) { 10555 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10556 10557 cmlb_invalidate(un->un_cmlbhandle, 10558 (void *)SD_PATH_DIRECT); 10559 mutex_enter(SD_MUTEX(un)); 10560 rval = SD_NOT_READY_VALID; 10561 10562 goto done; 10563 } else { 10564 mutex_enter(SD_MUTEX(un)); 10565 sd_update_block_info(un, lbasize, capacity); 10566 } 10567 } 10568 10569 /* 10570 * Check if the media in the device is writable or not. 10571 */ 10572 if (!is_valid && ISCD(un)) { 10573 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT); 10574 } 10575 10576 } else { 10577 /* 10578 * Do a test unit ready to clear any unit attention from non-cd 10579 * devices. 10580 */ 10581 mutex_exit(SD_MUTEX(un)); 10582 10583 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10584 if (status != 0) { 10585 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10586 } 10587 10588 mutex_enter(SD_MUTEX(un)); 10589 } 10590 10591 10592 /* 10593 * If this is a non 512 block device, allocate space for 10594 * the wmap cache. This is being done here since every time 10595 * a media is changed this routine will be called and the 10596 * block size is a function of media rather than device. 10597 */ 10598 if (((un->un_f_rmw_type != SD_RMW_TYPE_RETURN_ERROR || 10599 un->un_f_non_devbsize_supported) && 10600 un->un_tgt_blocksize != DEV_BSIZE) || 10601 un->un_f_enable_rmw) { 10602 if (!(un->un_wm_cache)) { 10603 (void) snprintf(name_str, sizeof (name_str), 10604 "%s%d_cache", 10605 ddi_driver_name(SD_DEVINFO(un)), 10606 ddi_get_instance(SD_DEVINFO(un))); 10607 un->un_wm_cache = kmem_cache_create( 10608 name_str, sizeof (struct sd_w_map), 10609 8, sd_wm_cache_constructor, 10610 sd_wm_cache_destructor, NULL, 10611 (void *)un, NULL, 0); 10612 if (!(un->un_wm_cache)) { 10613 rval = ENOMEM; 10614 goto done; 10615 } 10616 } 10617 } 10618 10619 if (un->un_state == SD_STATE_NORMAL) { 10620 /* 10621 * If the target is not yet ready here (defined by a TUR 10622 * failure), invalidate the geometry and print an 'offline' 10623 * message. This is a legacy message, as the state of the 10624 * target is not actually changed to SD_STATE_OFFLINE. 10625 * 10626 * If the TUR fails for EACCES (Reservation Conflict), 10627 * SD_RESERVED_BY_OTHERS will be returned to indicate 10628 * reservation conflict. If the TUR fails for other 10629 * reasons, SD_NOT_READY_VALID will be returned. 10630 */ 10631 int err; 10632 10633 mutex_exit(SD_MUTEX(un)); 10634 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10635 mutex_enter(SD_MUTEX(un)); 10636 10637 if (err != 0) { 10638 mutex_exit(SD_MUTEX(un)); 10639 cmlb_invalidate(un->un_cmlbhandle, 10640 (void *)SD_PATH_DIRECT); 10641 mutex_enter(SD_MUTEX(un)); 10642 if (err == EACCES) { 10643 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10644 "reservation conflict\n"); 10645 rval = SD_RESERVED_BY_OTHERS; 10646 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10647 } else { 10648 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10649 "drive offline\n"); 10650 rval = SD_NOT_READY_VALID; 10651 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 10652 } 10653 goto done; 10654 } 10655 } 10656 10657 if (un->un_f_format_in_progress == FALSE) { 10658 mutex_exit(SD_MUTEX(un)); 10659 10660 (void) cmlb_validate(un->un_cmlbhandle, 0, 10661 (void *)SD_PATH_DIRECT); 10662 if (cmlb_partinfo(un->un_cmlbhandle, part, NULL, NULL, NULL, 10663 NULL, (void *) SD_PATH_DIRECT) != 0) { 10664 rval = SD_NOT_READY_VALID; 10665 mutex_enter(SD_MUTEX(un)); 10666 10667 goto done; 10668 } 10669 if (un->un_f_pkstats_enabled) { 10670 sd_set_pstats(un); 10671 SD_TRACE(SD_LOG_IO_PARTITION, un, 10672 "sd_ready_and_valid: un:0x%p pstats created and " 10673 "set\n", un); 10674 } 10675 mutex_enter(SD_MUTEX(un)); 10676 } 10677 10678 /* 10679 * If this device supports DOOR_LOCK command, try and send 10680 * this command to PREVENT MEDIA REMOVAL, but don't get upset 10681 * if it fails. For a CD, however, it is an error 10682 */ 10683 if (un->un_f_doorlock_supported) { 10684 mutex_exit(SD_MUTEX(un)); 10685 status = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 10686 SD_PATH_DIRECT); 10687 10688 if ((status != 0) && ISCD(un)) { 10689 rval = SD_NOT_READY_VALID; 10690 mutex_enter(SD_MUTEX(un)); 10691 10692 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10693 10694 goto done; 10695 } else if (status != 0) 10696 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10697 mutex_enter(SD_MUTEX(un)); 10698 } 10699 10700 /* The state has changed, inform the media watch routines */ 10701 un->un_mediastate = DKIO_INSERTED; 10702 cv_broadcast(&un->un_state_cv); 10703 rval = SD_READY_VALID; 10704 10705 done: 10706 10707 /* 10708 * Initialize the capacity kstat value, if no media previously 10709 * (capacity kstat is 0) and a media has been inserted 10710 * (un_blockcount > 0). 10711 */ 10712 if (un->un_errstats != NULL) { 10713 stp = (struct sd_errstats *)un->un_errstats->ks_data; 10714 if ((stp->sd_capacity.value.ui64 == 0) && 10715 (un->un_f_blockcount_is_valid == TRUE)) { 10716 stp->sd_capacity.value.ui64 = 10717 (uint64_t)((uint64_t)un->un_blockcount * 10718 un->un_sys_blocksize); 10719 } 10720 } 10721 10722 mutex_exit(SD_MUTEX(un)); 10723 return (rval); 10724 } 10725 10726 10727 /* 10728 * Function: sdmin 10729 * 10730 * Description: Routine to limit the size of a data transfer. Used in 10731 * conjunction with physio(9F). 10732 * 10733 * Arguments: bp - pointer to the indicated buf(9S) struct. 10734 * 10735 * Context: Kernel thread context. 10736 */ 10737 10738 static void 10739 sdmin(struct buf *bp) 10740 { 10741 struct sd_lun *un; 10742 int instance; 10743 10744 instance = SDUNIT(bp->b_edev); 10745 10746 un = ddi_get_soft_state(sd_state, instance); 10747 ASSERT(un != NULL); 10748 10749 /* 10750 * We depend on buf breakup to restrict 10751 * IO size if it is enabled. 10752 */ 10753 if (un->un_buf_breakup_supported) { 10754 return; 10755 } 10756 10757 if (bp->b_bcount > un->un_max_xfer_size) { 10758 bp->b_bcount = un->un_max_xfer_size; 10759 } 10760 } 10761 10762 10763 /* 10764 * Function: sdread 10765 * 10766 * Description: Driver's read(9e) entry point function. 10767 * 10768 * Arguments: dev - device number 10769 * uio - structure pointer describing where data is to be stored 10770 * in user's space 10771 * cred_p - user credential pointer 10772 * 10773 * Return Code: ENXIO 10774 * EIO 10775 * EINVAL 10776 * value returned by physio 10777 * 10778 * Context: Kernel thread context. 10779 */ 10780 /* ARGSUSED */ 10781 static int 10782 sdread(dev_t dev, struct uio *uio, cred_t *cred_p) 10783 { 10784 struct sd_lun *un = NULL; 10785 int secmask; 10786 int err = 0; 10787 sd_ssc_t *ssc; 10788 10789 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10790 return (ENXIO); 10791 } 10792 10793 ASSERT(!mutex_owned(SD_MUTEX(un))); 10794 10795 10796 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10797 mutex_enter(SD_MUTEX(un)); 10798 /* 10799 * Because the call to sd_ready_and_valid will issue I/O we 10800 * must wait here if either the device is suspended or 10801 * if it's power level is changing. 10802 */ 10803 while ((un->un_state == SD_STATE_SUSPENDED) || 10804 (un->un_state == SD_STATE_PM_CHANGING)) { 10805 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10806 } 10807 un->un_ncmds_in_driver++; 10808 mutex_exit(SD_MUTEX(un)); 10809 10810 /* Initialize sd_ssc_t for internal uscsi commands */ 10811 ssc = sd_ssc_init(un); 10812 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10813 err = EIO; 10814 } else { 10815 err = 0; 10816 } 10817 sd_ssc_fini(ssc); 10818 10819 mutex_enter(SD_MUTEX(un)); 10820 un->un_ncmds_in_driver--; 10821 ASSERT(un->un_ncmds_in_driver >= 0); 10822 mutex_exit(SD_MUTEX(un)); 10823 if (err != 0) 10824 return (err); 10825 } 10826 10827 /* 10828 * Read requests are restricted to multiples of the system block size. 10829 */ 10830 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR && 10831 !un->un_f_enable_rmw) 10832 secmask = un->un_tgt_blocksize - 1; 10833 else 10834 secmask = DEV_BSIZE - 1; 10835 10836 if (uio->uio_loffset & ((offset_t)(secmask))) { 10837 SD_ERROR(SD_LOG_READ_WRITE, un, 10838 "sdread: file offset not modulo %d\n", 10839 secmask + 1); 10840 err = EINVAL; 10841 } else if (uio->uio_iov->iov_len & (secmask)) { 10842 SD_ERROR(SD_LOG_READ_WRITE, un, 10843 "sdread: transfer length not modulo %d\n", 10844 secmask + 1); 10845 err = EINVAL; 10846 } else { 10847 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio); 10848 } 10849 10850 return (err); 10851 } 10852 10853 10854 /* 10855 * Function: sdwrite 10856 * 10857 * Description: Driver's write(9e) entry point function. 10858 * 10859 * Arguments: dev - device number 10860 * uio - structure pointer describing where data is stored in 10861 * user's space 10862 * cred_p - user credential pointer 10863 * 10864 * Return Code: ENXIO 10865 * EIO 10866 * EINVAL 10867 * value returned by physio 10868 * 10869 * Context: Kernel thread context. 10870 */ 10871 /* ARGSUSED */ 10872 static int 10873 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 10874 { 10875 struct sd_lun *un = NULL; 10876 int secmask; 10877 int err = 0; 10878 sd_ssc_t *ssc; 10879 10880 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10881 return (ENXIO); 10882 } 10883 10884 ASSERT(!mutex_owned(SD_MUTEX(un))); 10885 10886 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10887 mutex_enter(SD_MUTEX(un)); 10888 /* 10889 * Because the call to sd_ready_and_valid will issue I/O we 10890 * must wait here if either the device is suspended or 10891 * if it's power level is changing. 10892 */ 10893 while ((un->un_state == SD_STATE_SUSPENDED) || 10894 (un->un_state == SD_STATE_PM_CHANGING)) { 10895 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10896 } 10897 un->un_ncmds_in_driver++; 10898 mutex_exit(SD_MUTEX(un)); 10899 10900 /* Initialize sd_ssc_t for internal uscsi commands */ 10901 ssc = sd_ssc_init(un); 10902 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10903 err = EIO; 10904 } else { 10905 err = 0; 10906 } 10907 sd_ssc_fini(ssc); 10908 10909 mutex_enter(SD_MUTEX(un)); 10910 un->un_ncmds_in_driver--; 10911 ASSERT(un->un_ncmds_in_driver >= 0); 10912 mutex_exit(SD_MUTEX(un)); 10913 if (err != 0) 10914 return (err); 10915 } 10916 10917 /* 10918 * Write requests are restricted to multiples of the system block size. 10919 */ 10920 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR && 10921 !un->un_f_enable_rmw) 10922 secmask = un->un_tgt_blocksize - 1; 10923 else 10924 secmask = DEV_BSIZE - 1; 10925 10926 if (uio->uio_loffset & ((offset_t)(secmask))) { 10927 SD_ERROR(SD_LOG_READ_WRITE, un, 10928 "sdwrite: file offset not modulo %d\n", 10929 secmask + 1); 10930 err = EINVAL; 10931 } else if (uio->uio_iov->iov_len & (secmask)) { 10932 SD_ERROR(SD_LOG_READ_WRITE, un, 10933 "sdwrite: transfer length not modulo %d\n", 10934 secmask + 1); 10935 err = EINVAL; 10936 } else { 10937 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio); 10938 } 10939 10940 return (err); 10941 } 10942 10943 10944 /* 10945 * Function: sdaread 10946 * 10947 * Description: Driver's aread(9e) entry point function. 10948 * 10949 * Arguments: dev - device number 10950 * aio - structure pointer describing where data is to be stored 10951 * cred_p - user credential pointer 10952 * 10953 * Return Code: ENXIO 10954 * EIO 10955 * EINVAL 10956 * value returned by aphysio 10957 * 10958 * Context: Kernel thread context. 10959 */ 10960 /* ARGSUSED */ 10961 static int 10962 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10963 { 10964 struct sd_lun *un = NULL; 10965 struct uio *uio = aio->aio_uio; 10966 int secmask; 10967 int err = 0; 10968 sd_ssc_t *ssc; 10969 10970 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10971 return (ENXIO); 10972 } 10973 10974 ASSERT(!mutex_owned(SD_MUTEX(un))); 10975 10976 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10977 mutex_enter(SD_MUTEX(un)); 10978 /* 10979 * Because the call to sd_ready_and_valid will issue I/O we 10980 * must wait here if either the device is suspended or 10981 * if it's power level is changing. 10982 */ 10983 while ((un->un_state == SD_STATE_SUSPENDED) || 10984 (un->un_state == SD_STATE_PM_CHANGING)) { 10985 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10986 } 10987 un->un_ncmds_in_driver++; 10988 mutex_exit(SD_MUTEX(un)); 10989 10990 /* Initialize sd_ssc_t for internal uscsi commands */ 10991 ssc = sd_ssc_init(un); 10992 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10993 err = EIO; 10994 } else { 10995 err = 0; 10996 } 10997 sd_ssc_fini(ssc); 10998 10999 mutex_enter(SD_MUTEX(un)); 11000 un->un_ncmds_in_driver--; 11001 ASSERT(un->un_ncmds_in_driver >= 0); 11002 mutex_exit(SD_MUTEX(un)); 11003 if (err != 0) 11004 return (err); 11005 } 11006 11007 /* 11008 * Read requests are restricted to multiples of the system block size. 11009 */ 11010 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR && 11011 !un->un_f_enable_rmw) 11012 secmask = un->un_tgt_blocksize - 1; 11013 else 11014 secmask = DEV_BSIZE - 1; 11015 11016 if (uio->uio_loffset & ((offset_t)(secmask))) { 11017 SD_ERROR(SD_LOG_READ_WRITE, un, 11018 "sdaread: file offset not modulo %d\n", 11019 secmask + 1); 11020 err = EINVAL; 11021 } else if (uio->uio_iov->iov_len & (secmask)) { 11022 SD_ERROR(SD_LOG_READ_WRITE, un, 11023 "sdaread: transfer length not modulo %d\n", 11024 secmask + 1); 11025 err = EINVAL; 11026 } else { 11027 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio); 11028 } 11029 11030 return (err); 11031 } 11032 11033 11034 /* 11035 * Function: sdawrite 11036 * 11037 * Description: Driver's awrite(9e) entry point function. 11038 * 11039 * Arguments: dev - device number 11040 * aio - structure pointer describing where data is stored 11041 * cred_p - user credential pointer 11042 * 11043 * Return Code: ENXIO 11044 * EIO 11045 * EINVAL 11046 * value returned by aphysio 11047 * 11048 * Context: Kernel thread context. 11049 */ 11050 /* ARGSUSED */ 11051 static int 11052 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 11053 { 11054 struct sd_lun *un = NULL; 11055 struct uio *uio = aio->aio_uio; 11056 int secmask; 11057 int err = 0; 11058 sd_ssc_t *ssc; 11059 11060 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 11061 return (ENXIO); 11062 } 11063 11064 ASSERT(!mutex_owned(SD_MUTEX(un))); 11065 11066 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 11067 mutex_enter(SD_MUTEX(un)); 11068 /* 11069 * Because the call to sd_ready_and_valid will issue I/O we 11070 * must wait here if either the device is suspended or 11071 * if it's power level is changing. 11072 */ 11073 while ((un->un_state == SD_STATE_SUSPENDED) || 11074 (un->un_state == SD_STATE_PM_CHANGING)) { 11075 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 11076 } 11077 un->un_ncmds_in_driver++; 11078 mutex_exit(SD_MUTEX(un)); 11079 11080 /* Initialize sd_ssc_t for internal uscsi commands */ 11081 ssc = sd_ssc_init(un); 11082 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 11083 err = EIO; 11084 } else { 11085 err = 0; 11086 } 11087 sd_ssc_fini(ssc); 11088 11089 mutex_enter(SD_MUTEX(un)); 11090 un->un_ncmds_in_driver--; 11091 ASSERT(un->un_ncmds_in_driver >= 0); 11092 mutex_exit(SD_MUTEX(un)); 11093 if (err != 0) 11094 return (err); 11095 } 11096 11097 /* 11098 * Write requests are restricted to multiples of the system block size. 11099 */ 11100 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR && 11101 !un->un_f_enable_rmw) 11102 secmask = un->un_tgt_blocksize - 1; 11103 else 11104 secmask = DEV_BSIZE - 1; 11105 11106 if (uio->uio_loffset & ((offset_t)(secmask))) { 11107 SD_ERROR(SD_LOG_READ_WRITE, un, 11108 "sdawrite: file offset not modulo %d\n", 11109 secmask + 1); 11110 err = EINVAL; 11111 } else if (uio->uio_iov->iov_len & (secmask)) { 11112 SD_ERROR(SD_LOG_READ_WRITE, un, 11113 "sdawrite: transfer length not modulo %d\n", 11114 secmask + 1); 11115 err = EINVAL; 11116 } else { 11117 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio); 11118 } 11119 11120 return (err); 11121 } 11122 11123 11124 11125 11126 11127 /* 11128 * Driver IO processing follows the following sequence: 11129 * 11130 * sdioctl(9E) sdstrategy(9E) biodone(9F) 11131 * | | ^ 11132 * v v | 11133 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+ 11134 * | | | | 11135 * v | | | 11136 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone() 11137 * | | ^ ^ 11138 * v v | | 11139 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | | 11140 * | | | | 11141 * +---+ | +------------+ +-------+ 11142 * | | | | 11143 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 11144 * | v | | 11145 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() | 11146 * | | ^ | 11147 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 11148 * | v | | 11149 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() | 11150 * | | ^ | 11151 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 11152 * | v | | 11153 * | sd_checksum_iostart() sd_checksum_iodone() | 11154 * | | ^ | 11155 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+ 11156 * | v | | 11157 * | sd_pm_iostart() sd_pm_iodone() | 11158 * | | ^ | 11159 * | | | | 11160 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+ 11161 * | ^ 11162 * v | 11163 * sd_core_iostart() | 11164 * | | 11165 * | +------>(*destroypkt)() 11166 * +-> sd_start_cmds() <-+ | | 11167 * | | | v 11168 * | | | scsi_destroy_pkt(9F) 11169 * | | | 11170 * +->(*initpkt)() +- sdintr() 11171 * | | | | 11172 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx() 11173 * | +-> scsi_setup_cdb(9F) | 11174 * | | 11175 * +--> scsi_transport(9F) | 11176 * | | 11177 * +----> SCSA ---->+ 11178 * 11179 * 11180 * This code is based upon the following presumptions: 11181 * 11182 * - iostart and iodone functions operate on buf(9S) structures. These 11183 * functions perform the necessary operations on the buf(9S) and pass 11184 * them along to the next function in the chain by using the macros 11185 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE() 11186 * (for iodone side functions). 11187 * 11188 * - The iostart side functions may sleep. The iodone side functions 11189 * are called under interrupt context and may NOT sleep. Therefore 11190 * iodone side functions also may not call iostart side functions. 11191 * (NOTE: iostart side functions should NOT sleep for memory, as 11192 * this could result in deadlock.) 11193 * 11194 * - An iostart side function may call its corresponding iodone side 11195 * function directly (if necessary). 11196 * 11197 * - In the event of an error, an iostart side function can return a buf(9S) 11198 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and 11199 * b_error in the usual way of course). 11200 * 11201 * - The taskq mechanism may be used by the iodone side functions to dispatch 11202 * requests to the iostart side functions. The iostart side functions in 11203 * this case would be called under the context of a taskq thread, so it's 11204 * OK for them to block/sleep/spin in this case. 11205 * 11206 * - iostart side functions may allocate "shadow" buf(9S) structs and 11207 * pass them along to the next function in the chain. The corresponding 11208 * iodone side functions must coalesce the "shadow" bufs and return 11209 * the "original" buf to the next higher layer. 11210 * 11211 * - The b_private field of the buf(9S) struct holds a pointer to 11212 * an sd_xbuf struct, which contains information needed to 11213 * construct the scsi_pkt for the command. 11214 * 11215 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each 11216 * layer must acquire & release the SD_MUTEX(un) as needed. 11217 */ 11218 11219 11220 /* 11221 * Create taskq for all targets in the system. This is created at 11222 * _init(9E) and destroyed at _fini(9E). 11223 * 11224 * Note: here we set the minalloc to a reasonably high number to ensure that 11225 * we will have an adequate supply of task entries available at interrupt time. 11226 * This is used in conjunction with the TASKQ_PREPOPULATE flag in 11227 * sd_create_taskq(). Since we do not want to sleep for allocations at 11228 * interrupt time, set maxalloc equal to minalloc. That way we will just fail 11229 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq 11230 * requests any one instant in time. 11231 */ 11232 #define SD_TASKQ_NUMTHREADS 8 11233 #define SD_TASKQ_MINALLOC 256 11234 #define SD_TASKQ_MAXALLOC 256 11235 11236 static taskq_t *sd_tq = NULL; 11237 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq)) 11238 11239 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; 11240 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; 11241 11242 /* 11243 * The following task queue is being created for the write part of 11244 * read-modify-write of non-512 block size devices. 11245 * Limit the number of threads to 1 for now. This number has been chosen 11246 * considering the fact that it applies only to dvd ram drives/MO drives 11247 * currently. Performance for which is not main criteria at this stage. 11248 * Note: It needs to be explored if we can use a single taskq in future 11249 */ 11250 #define SD_WMR_TASKQ_NUMTHREADS 1 11251 static taskq_t *sd_wmr_tq = NULL; 11252 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq)) 11253 11254 /* 11255 * Function: sd_taskq_create 11256 * 11257 * Description: Create taskq thread(s) and preallocate task entries 11258 * 11259 * Return Code: Returns a pointer to the allocated taskq_t. 11260 * 11261 * Context: Can sleep. Requires blockable context. 11262 * 11263 * Notes: - The taskq() facility currently is NOT part of the DDI. 11264 * (definitely NOT recommeded for 3rd-party drivers!) :-) 11265 * - taskq_create() will block for memory, also it will panic 11266 * if it cannot create the requested number of threads. 11267 * - Currently taskq_create() creates threads that cannot be 11268 * swapped. 11269 * - We use TASKQ_PREPOPULATE to ensure we have an adequate 11270 * supply of taskq entries at interrupt time (ie, so that we 11271 * do not have to sleep for memory) 11272 */ 11273 11274 static void 11275 sd_taskq_create(void) 11276 { 11277 char taskq_name[TASKQ_NAMELEN]; 11278 11279 ASSERT(sd_tq == NULL); 11280 ASSERT(sd_wmr_tq == NULL); 11281 11282 (void) snprintf(taskq_name, sizeof (taskq_name), 11283 "%s_drv_taskq", sd_label); 11284 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS, 11285 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 11286 TASKQ_PREPOPULATE)); 11287 11288 (void) snprintf(taskq_name, sizeof (taskq_name), 11289 "%s_rmw_taskq", sd_label); 11290 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS, 11291 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 11292 TASKQ_PREPOPULATE)); 11293 } 11294 11295 11296 /* 11297 * Function: sd_taskq_delete 11298 * 11299 * Description: Complementary cleanup routine for sd_taskq_create(). 11300 * 11301 * Context: Kernel thread context. 11302 */ 11303 11304 static void 11305 sd_taskq_delete(void) 11306 { 11307 ASSERT(sd_tq != NULL); 11308 ASSERT(sd_wmr_tq != NULL); 11309 taskq_destroy(sd_tq); 11310 taskq_destroy(sd_wmr_tq); 11311 sd_tq = NULL; 11312 sd_wmr_tq = NULL; 11313 } 11314 11315 11316 /* 11317 * Function: sdstrategy 11318 * 11319 * Description: Driver's strategy (9E) entry point function. 11320 * 11321 * Arguments: bp - pointer to buf(9S) 11322 * 11323 * Return Code: Always returns zero 11324 * 11325 * Context: Kernel thread context. 11326 */ 11327 11328 static int 11329 sdstrategy(struct buf *bp) 11330 { 11331 struct sd_lun *un; 11332 11333 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 11334 if (un == NULL) { 11335 bioerror(bp, EIO); 11336 bp->b_resid = bp->b_bcount; 11337 biodone(bp); 11338 return (0); 11339 } 11340 11341 /* As was done in the past, fail new cmds. if state is dumping. */ 11342 if (un->un_state == SD_STATE_DUMPING) { 11343 bioerror(bp, ENXIO); 11344 bp->b_resid = bp->b_bcount; 11345 biodone(bp); 11346 return (0); 11347 } 11348 11349 ASSERT(!mutex_owned(SD_MUTEX(un))); 11350 11351 /* 11352 * Commands may sneak in while we released the mutex in 11353 * DDI_SUSPEND, we should block new commands. However, old 11354 * commands that are still in the driver at this point should 11355 * still be allowed to drain. 11356 */ 11357 mutex_enter(SD_MUTEX(un)); 11358 /* 11359 * Must wait here if either the device is suspended or 11360 * if it's power level is changing. 11361 */ 11362 while ((un->un_state == SD_STATE_SUSPENDED) || 11363 (un->un_state == SD_STATE_PM_CHANGING)) { 11364 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 11365 } 11366 11367 un->un_ncmds_in_driver++; 11368 11369 /* 11370 * atapi: Since we are running the CD for now in PIO mode we need to 11371 * call bp_mapin here to avoid bp_mapin called interrupt context under 11372 * the HBA's init_pkt routine. 11373 */ 11374 if (un->un_f_cfg_is_atapi == TRUE) { 11375 mutex_exit(SD_MUTEX(un)); 11376 bp_mapin(bp); 11377 mutex_enter(SD_MUTEX(un)); 11378 } 11379 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n", 11380 un->un_ncmds_in_driver); 11381 11382 if (bp->b_flags & B_WRITE) 11383 un->un_f_sync_cache_required = TRUE; 11384 11385 mutex_exit(SD_MUTEX(un)); 11386 11387 /* 11388 * This will (eventually) allocate the sd_xbuf area and 11389 * call sd_xbuf_strategy(). We just want to return the 11390 * result of ddi_xbuf_qstrategy so that we have an opt- 11391 * imized tail call which saves us a stack frame. 11392 */ 11393 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); 11394 } 11395 11396 11397 /* 11398 * Function: sd_xbuf_strategy 11399 * 11400 * Description: Function for initiating IO operations via the 11401 * ddi_xbuf_qstrategy() mechanism. 11402 * 11403 * Context: Kernel thread context. 11404 */ 11405 11406 static void 11407 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg) 11408 { 11409 struct sd_lun *un = arg; 11410 11411 ASSERT(bp != NULL); 11412 ASSERT(xp != NULL); 11413 ASSERT(un != NULL); 11414 ASSERT(!mutex_owned(SD_MUTEX(un))); 11415 11416 /* 11417 * Initialize the fields in the xbuf and save a pointer to the 11418 * xbuf in bp->b_private. 11419 */ 11420 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL); 11421 11422 /* Send the buf down the iostart chain */ 11423 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp); 11424 } 11425 11426 11427 /* 11428 * Function: sd_xbuf_init 11429 * 11430 * Description: Prepare the given sd_xbuf struct for use. 11431 * 11432 * Arguments: un - ptr to softstate 11433 * bp - ptr to associated buf(9S) 11434 * xp - ptr to associated sd_xbuf 11435 * chain_type - IO chain type to use: 11436 * SD_CHAIN_NULL 11437 * SD_CHAIN_BUFIO 11438 * SD_CHAIN_USCSI 11439 * SD_CHAIN_DIRECT 11440 * SD_CHAIN_DIRECT_PRIORITY 11441 * pktinfop - ptr to private data struct for scsi_pkt(9S) 11442 * initialization; may be NULL if none. 11443 * 11444 * Context: Kernel thread context 11445 */ 11446 11447 static void 11448 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 11449 uchar_t chain_type, void *pktinfop) 11450 { 11451 int index; 11452 11453 ASSERT(un != NULL); 11454 ASSERT(bp != NULL); 11455 ASSERT(xp != NULL); 11456 11457 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n", 11458 bp, chain_type); 11459 11460 xp->xb_un = un; 11461 xp->xb_pktp = NULL; 11462 xp->xb_pktinfo = pktinfop; 11463 xp->xb_private = bp->b_private; 11464 xp->xb_blkno = (daddr_t)bp->b_blkno; 11465 11466 /* 11467 * Set up the iostart and iodone chain indexes in the xbuf, based 11468 * upon the specified chain type to use. 11469 */ 11470 switch (chain_type) { 11471 case SD_CHAIN_NULL: 11472 /* 11473 * Fall thru to just use the values for the buf type, even 11474 * tho for the NULL chain these values will never be used. 11475 */ 11476 /* FALLTHRU */ 11477 case SD_CHAIN_BUFIO: 11478 index = un->un_buf_chain_type; 11479 if ((!un->un_f_has_removable_media) && 11480 (un->un_tgt_blocksize != 0) && 11481 (un->un_tgt_blocksize != DEV_BSIZE || 11482 un->un_f_enable_rmw)) { 11483 int secmask = 0, blknomask = 0; 11484 if (un->un_f_enable_rmw) { 11485 blknomask = 11486 (un->un_phy_blocksize / DEV_BSIZE) - 1; 11487 secmask = un->un_phy_blocksize - 1; 11488 } else { 11489 blknomask = 11490 (un->un_tgt_blocksize / DEV_BSIZE) - 1; 11491 secmask = un->un_tgt_blocksize - 1; 11492 } 11493 11494 if ((bp->b_lblkno & (blknomask)) || 11495 (bp->b_bcount & (secmask))) { 11496 if ((un->un_f_rmw_type != 11497 SD_RMW_TYPE_RETURN_ERROR) || 11498 un->un_f_enable_rmw) { 11499 if (un->un_f_pm_is_enabled == FALSE) 11500 index = 11501 SD_CHAIN_INFO_MSS_DSK_NO_PM; 11502 else 11503 index = 11504 SD_CHAIN_INFO_MSS_DISK; 11505 } 11506 } 11507 } 11508 break; 11509 case SD_CHAIN_USCSI: 11510 index = un->un_uscsi_chain_type; 11511 break; 11512 case SD_CHAIN_DIRECT: 11513 index = un->un_direct_chain_type; 11514 break; 11515 case SD_CHAIN_DIRECT_PRIORITY: 11516 index = un->un_priority_chain_type; 11517 break; 11518 default: 11519 /* We're really broken if we ever get here... */ 11520 panic("sd_xbuf_init: illegal chain type!"); 11521 /*NOTREACHED*/ 11522 } 11523 11524 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index; 11525 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index; 11526 11527 /* 11528 * It might be a bit easier to simply bzero the entire xbuf above, 11529 * but it turns out that since we init a fair number of members anyway, 11530 * we save a fair number cycles by doing explicit assignment of zero. 11531 */ 11532 xp->xb_pkt_flags = 0; 11533 xp->xb_dma_resid = 0; 11534 xp->xb_retry_count = 0; 11535 xp->xb_victim_retry_count = 0; 11536 xp->xb_ua_retry_count = 0; 11537 xp->xb_nr_retry_count = 0; 11538 xp->xb_sense_bp = NULL; 11539 xp->xb_sense_status = 0; 11540 xp->xb_sense_state = 0; 11541 xp->xb_sense_resid = 0; 11542 xp->xb_ena = 0; 11543 11544 bp->b_private = xp; 11545 bp->b_flags &= ~(B_DONE | B_ERROR); 11546 bp->b_resid = 0; 11547 bp->av_forw = NULL; 11548 bp->av_back = NULL; 11549 bioerror(bp, 0); 11550 11551 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n"); 11552 } 11553 11554 11555 /* 11556 * Function: sd_uscsi_strategy 11557 * 11558 * Description: Wrapper for calling into the USCSI chain via physio(9F) 11559 * 11560 * Arguments: bp - buf struct ptr 11561 * 11562 * Return Code: Always returns 0 11563 * 11564 * Context: Kernel thread context 11565 */ 11566 11567 static int 11568 sd_uscsi_strategy(struct buf *bp) 11569 { 11570 struct sd_lun *un; 11571 struct sd_uscsi_info *uip; 11572 struct sd_xbuf *xp; 11573 uchar_t chain_type; 11574 uchar_t cmd; 11575 11576 ASSERT(bp != NULL); 11577 11578 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 11579 if (un == NULL) { 11580 bioerror(bp, EIO); 11581 bp->b_resid = bp->b_bcount; 11582 biodone(bp); 11583 return (0); 11584 } 11585 11586 ASSERT(!mutex_owned(SD_MUTEX(un))); 11587 11588 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp); 11589 11590 /* 11591 * A pointer to a struct sd_uscsi_info is expected in bp->b_private 11592 */ 11593 ASSERT(bp->b_private != NULL); 11594 uip = (struct sd_uscsi_info *)bp->b_private; 11595 cmd = ((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_cdb[0]; 11596 11597 mutex_enter(SD_MUTEX(un)); 11598 /* 11599 * atapi: Since we are running the CD for now in PIO mode we need to 11600 * call bp_mapin here to avoid bp_mapin called interrupt context under 11601 * the HBA's init_pkt routine. 11602 */ 11603 if (un->un_f_cfg_is_atapi == TRUE) { 11604 mutex_exit(SD_MUTEX(un)); 11605 bp_mapin(bp); 11606 mutex_enter(SD_MUTEX(un)); 11607 } 11608 un->un_ncmds_in_driver++; 11609 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n", 11610 un->un_ncmds_in_driver); 11611 11612 if ((bp->b_flags & B_WRITE) && (bp->b_bcount != 0) && 11613 (cmd != SCMD_MODE_SELECT) && (cmd != SCMD_MODE_SELECT_G1)) 11614 un->un_f_sync_cache_required = TRUE; 11615 11616 mutex_exit(SD_MUTEX(un)); 11617 11618 switch (uip->ui_flags) { 11619 case SD_PATH_DIRECT: 11620 chain_type = SD_CHAIN_DIRECT; 11621 break; 11622 case SD_PATH_DIRECT_PRIORITY: 11623 chain_type = SD_CHAIN_DIRECT_PRIORITY; 11624 break; 11625 default: 11626 chain_type = SD_CHAIN_USCSI; 11627 break; 11628 } 11629 11630 /* 11631 * We may allocate extra buf for external USCSI commands. If the 11632 * application asks for bigger than 20-byte sense data via USCSI, 11633 * SCSA layer will allocate 252 bytes sense buf for that command. 11634 */ 11635 if (((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_rqlen > 11636 SENSE_LENGTH) { 11637 xp = kmem_zalloc(sizeof (struct sd_xbuf) - SENSE_LENGTH + 11638 MAX_SENSE_LENGTH, KM_SLEEP); 11639 } else { 11640 xp = kmem_zalloc(sizeof (struct sd_xbuf), KM_SLEEP); 11641 } 11642 11643 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp); 11644 11645 /* Use the index obtained within xbuf_init */ 11646 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp); 11647 11648 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp); 11649 11650 return (0); 11651 } 11652 11653 /* 11654 * Function: sd_send_scsi_cmd 11655 * 11656 * Description: Runs a USCSI command for user (when called thru sdioctl), 11657 * or for the driver 11658 * 11659 * Arguments: dev - the dev_t for the device 11660 * incmd - ptr to a valid uscsi_cmd struct 11661 * flag - bit flag, indicating open settings, 32/64 bit type 11662 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 11663 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 11664 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 11665 * to use the USCSI "direct" chain and bypass the normal 11666 * command waitq. 11667 * 11668 * Return Code: 0 - successful completion of the given command 11669 * EIO - scsi_uscsi_handle_command() failed 11670 * ENXIO - soft state not found for specified dev 11671 * EINVAL 11672 * EFAULT - copyin/copyout error 11673 * return code of scsi_uscsi_handle_command(): 11674 * EIO 11675 * ENXIO 11676 * EACCES 11677 * 11678 * Context: Waits for command to complete. Can sleep. 11679 */ 11680 11681 static int 11682 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 11683 enum uio_seg dataspace, int path_flag) 11684 { 11685 struct sd_lun *un; 11686 sd_ssc_t *ssc; 11687 int rval; 11688 11689 un = ddi_get_soft_state(sd_state, SDUNIT(dev)); 11690 if (un == NULL) { 11691 return (ENXIO); 11692 } 11693 11694 /* 11695 * Using sd_ssc_send to handle uscsi cmd 11696 */ 11697 ssc = sd_ssc_init(un); 11698 rval = sd_ssc_send(ssc, incmd, flag, dataspace, path_flag); 11699 sd_ssc_fini(ssc); 11700 11701 return (rval); 11702 } 11703 11704 /* 11705 * Function: sd_ssc_init 11706 * 11707 * Description: Uscsi end-user call this function to initialize necessary 11708 * fields, such as uscsi_cmd and sd_uscsi_info struct. 11709 * 11710 * The return value of sd_send_scsi_cmd will be treated as a 11711 * fault in various conditions. Even it is not Zero, some 11712 * callers may ignore the return value. That is to say, we can 11713 * not make an accurate assessment in sdintr, since if a 11714 * command is failed in sdintr it does not mean the caller of 11715 * sd_send_scsi_cmd will treat it as a real failure. 11716 * 11717 * To avoid printing too many error logs for a failed uscsi 11718 * packet that the caller may not treat it as a failure, the 11719 * sd will keep silent for handling all uscsi commands. 11720 * 11721 * During detach->attach and attach-open, for some types of 11722 * problems, the driver should be providing information about 11723 * the problem encountered. Device use USCSI_SILENT, which 11724 * suppresses all driver information. The result is that no 11725 * information about the problem is available. Being 11726 * completely silent during this time is inappropriate. The 11727 * driver needs a more selective filter than USCSI_SILENT, so 11728 * that information related to faults is provided. 11729 * 11730 * To make the accurate accessment, the caller of 11731 * sd_send_scsi_USCSI_CMD should take the ownership and 11732 * get necessary information to print error messages. 11733 * 11734 * If we want to print necessary info of uscsi command, we need to 11735 * keep the uscsi_cmd and sd_uscsi_info till we can make the 11736 * assessment. We use sd_ssc_init to alloc necessary 11737 * structs for sending an uscsi command and we are also 11738 * responsible for free the memory by calling 11739 * sd_ssc_fini. 11740 * 11741 * The calling secquences will look like: 11742 * sd_ssc_init-> 11743 * 11744 * ... 11745 * 11746 * sd_send_scsi_USCSI_CMD-> 11747 * sd_ssc_send-> - - - sdintr 11748 * ... 11749 * 11750 * if we think the return value should be treated as a 11751 * failure, we make the accessment here and print out 11752 * necessary by retrieving uscsi_cmd and sd_uscsi_info' 11753 * 11754 * ... 11755 * 11756 * sd_ssc_fini 11757 * 11758 * 11759 * Arguments: un - pointer to driver soft state (unit) structure for this 11760 * target. 11761 * 11762 * Return code: sd_ssc_t - pointer to allocated sd_ssc_t struct, it contains 11763 * uscsi_cmd and sd_uscsi_info. 11764 * NULL - if can not alloc memory for sd_ssc_t struct 11765 * 11766 * Context: Kernel Thread. 11767 */ 11768 static sd_ssc_t * 11769 sd_ssc_init(struct sd_lun *un) 11770 { 11771 sd_ssc_t *ssc; 11772 struct uscsi_cmd *ucmdp; 11773 struct sd_uscsi_info *uip; 11774 11775 ASSERT(un != NULL); 11776 ASSERT(!mutex_owned(SD_MUTEX(un))); 11777 11778 /* 11779 * Allocate sd_ssc_t structure 11780 */ 11781 ssc = kmem_zalloc(sizeof (sd_ssc_t), KM_SLEEP); 11782 11783 /* 11784 * Allocate uscsi_cmd by calling scsi_uscsi_alloc common routine 11785 */ 11786 ucmdp = scsi_uscsi_alloc(); 11787 11788 /* 11789 * Allocate sd_uscsi_info structure 11790 */ 11791 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 11792 11793 ssc->ssc_uscsi_cmd = ucmdp; 11794 ssc->ssc_uscsi_info = uip; 11795 ssc->ssc_un = un; 11796 11797 return (ssc); 11798 } 11799 11800 /* 11801 * Function: sd_ssc_fini 11802 * 11803 * Description: To free sd_ssc_t and it's hanging off 11804 * 11805 * Arguments: ssc - struct pointer of sd_ssc_t. 11806 */ 11807 static void 11808 sd_ssc_fini(sd_ssc_t *ssc) 11809 { 11810 scsi_uscsi_free(ssc->ssc_uscsi_cmd); 11811 11812 if (ssc->ssc_uscsi_info != NULL) { 11813 kmem_free(ssc->ssc_uscsi_info, sizeof (struct sd_uscsi_info)); 11814 ssc->ssc_uscsi_info = NULL; 11815 } 11816 11817 kmem_free(ssc, sizeof (sd_ssc_t)); 11818 ssc = NULL; 11819 } 11820 11821 /* 11822 * Function: sd_ssc_send 11823 * 11824 * Description: Runs a USCSI command for user when called through sdioctl, 11825 * or for the driver. 11826 * 11827 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11828 * sd_uscsi_info in. 11829 * incmd - ptr to a valid uscsi_cmd struct 11830 * flag - bit flag, indicating open settings, 32/64 bit type 11831 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 11832 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 11833 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 11834 * to use the USCSI "direct" chain and bypass the normal 11835 * command waitq. 11836 * 11837 * Return Code: 0 - successful completion of the given command 11838 * EIO - scsi_uscsi_handle_command() failed 11839 * ENXIO - soft state not found for specified dev 11840 * ECANCELED - command cancelled due to low power 11841 * EINVAL 11842 * EFAULT - copyin/copyout error 11843 * return code of scsi_uscsi_handle_command(): 11844 * EIO 11845 * ENXIO 11846 * EACCES 11847 * 11848 * Context: Kernel Thread; 11849 * Waits for command to complete. Can sleep. 11850 */ 11851 static int 11852 sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, int flag, 11853 enum uio_seg dataspace, int path_flag) 11854 { 11855 struct sd_uscsi_info *uip; 11856 struct uscsi_cmd *uscmd; 11857 struct sd_lun *un; 11858 dev_t dev; 11859 11860 int format = 0; 11861 int rval; 11862 11863 ASSERT(ssc != NULL); 11864 un = ssc->ssc_un; 11865 ASSERT(un != NULL); 11866 uscmd = ssc->ssc_uscsi_cmd; 11867 ASSERT(uscmd != NULL); 11868 ASSERT(!mutex_owned(SD_MUTEX(un))); 11869 if (ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT) { 11870 /* 11871 * If enter here, it indicates that the previous uscsi 11872 * command has not been processed by sd_ssc_assessment. 11873 * This is violating our rules of FMA telemetry processing. 11874 * We should print out this message and the last undisposed 11875 * uscsi command. 11876 */ 11877 if (uscmd->uscsi_cdb != NULL) { 11878 SD_INFO(SD_LOG_SDTEST, un, 11879 "sd_ssc_send is missing the alternative " 11880 "sd_ssc_assessment when running command 0x%x.\n", 11881 uscmd->uscsi_cdb[0]); 11882 } 11883 /* 11884 * Set the ssc_flags to SSC_FLAGS_UNKNOWN, which should be 11885 * the initial status. 11886 */ 11887 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 11888 } 11889 11890 /* 11891 * We need to make sure sd_ssc_send will have sd_ssc_assessment 11892 * followed to avoid missing FMA telemetries. 11893 */ 11894 ssc->ssc_flags |= SSC_FLAGS_NEED_ASSESSMENT; 11895 11896 /* 11897 * if USCSI_PMFAILFAST is set and un is in low power, fail the 11898 * command immediately. 11899 */ 11900 mutex_enter(SD_MUTEX(un)); 11901 mutex_enter(&un->un_pm_mutex); 11902 if ((uscmd->uscsi_flags & USCSI_PMFAILFAST) && 11903 SD_DEVICE_IS_IN_LOW_POWER(un)) { 11904 SD_TRACE(SD_LOG_IO, un, "sd_ssc_send:" 11905 "un:0x%p is in low power\n", un); 11906 mutex_exit(&un->un_pm_mutex); 11907 mutex_exit(SD_MUTEX(un)); 11908 return (ECANCELED); 11909 } 11910 mutex_exit(&un->un_pm_mutex); 11911 mutex_exit(SD_MUTEX(un)); 11912 11913 #ifdef SDDEBUG 11914 switch (dataspace) { 11915 case UIO_USERSPACE: 11916 SD_TRACE(SD_LOG_IO, un, 11917 "sd_ssc_send: entry: un:0x%p UIO_USERSPACE\n", un); 11918 break; 11919 case UIO_SYSSPACE: 11920 SD_TRACE(SD_LOG_IO, un, 11921 "sd_ssc_send: entry: un:0x%p UIO_SYSSPACE\n", un); 11922 break; 11923 default: 11924 SD_TRACE(SD_LOG_IO, un, 11925 "sd_ssc_send: entry: un:0x%p UNEXPECTED SPACE\n", un); 11926 break; 11927 } 11928 #endif 11929 11930 rval = scsi_uscsi_copyin((intptr_t)incmd, flag, 11931 SD_ADDRESS(un), &uscmd); 11932 if (rval != 0) { 11933 SD_TRACE(SD_LOG_IO, un, "sd_sense_scsi_cmd: " 11934 "scsi_uscsi_alloc_and_copyin failed\n", un); 11935 return (rval); 11936 } 11937 11938 if ((uscmd->uscsi_cdb != NULL) && 11939 (uscmd->uscsi_cdb[0] == SCMD_FORMAT)) { 11940 mutex_enter(SD_MUTEX(un)); 11941 un->un_f_format_in_progress = TRUE; 11942 mutex_exit(SD_MUTEX(un)); 11943 format = 1; 11944 } 11945 11946 /* 11947 * Allocate an sd_uscsi_info struct and fill it with the info 11948 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 11949 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 11950 * since we allocate the buf here in this function, we do not 11951 * need to preserve the prior contents of b_private. 11952 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 11953 */ 11954 uip = ssc->ssc_uscsi_info; 11955 uip->ui_flags = path_flag; 11956 uip->ui_cmdp = uscmd; 11957 11958 /* 11959 * Commands sent with priority are intended for error recovery 11960 * situations, and do not have retries performed. 11961 */ 11962 if (path_flag == SD_PATH_DIRECT_PRIORITY) { 11963 uscmd->uscsi_flags |= USCSI_DIAGNOSE; 11964 } 11965 uscmd->uscsi_flags &= ~USCSI_NOINTR; 11966 11967 dev = SD_GET_DEV(un); 11968 rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd, 11969 sd_uscsi_strategy, NULL, uip); 11970 11971 /* 11972 * mark ssc_flags right after handle_cmd to make sure 11973 * the uscsi has been sent 11974 */ 11975 ssc->ssc_flags |= SSC_FLAGS_CMD_ISSUED; 11976 11977 #ifdef SDDEBUG 11978 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: " 11979 "uscsi_status: 0x%02x uscsi_resid:0x%x\n", 11980 uscmd->uscsi_status, uscmd->uscsi_resid); 11981 if (uscmd->uscsi_bufaddr != NULL) { 11982 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: " 11983 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n", 11984 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen); 11985 if (dataspace == UIO_SYSSPACE) { 11986 SD_DUMP_MEMORY(un, SD_LOG_IO, 11987 "data", (uchar_t *)uscmd->uscsi_bufaddr, 11988 uscmd->uscsi_buflen, SD_LOG_HEX); 11989 } 11990 } 11991 #endif 11992 11993 if (format == 1) { 11994 mutex_enter(SD_MUTEX(un)); 11995 un->un_f_format_in_progress = FALSE; 11996 mutex_exit(SD_MUTEX(un)); 11997 } 11998 11999 (void) scsi_uscsi_copyout((intptr_t)incmd, uscmd); 12000 12001 return (rval); 12002 } 12003 12004 /* 12005 * Function: sd_ssc_print 12006 * 12007 * Description: Print information available to the console. 12008 * 12009 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 12010 * sd_uscsi_info in. 12011 * sd_severity - log level. 12012 * Context: Kernel thread or interrupt context. 12013 */ 12014 static void 12015 sd_ssc_print(sd_ssc_t *ssc, int sd_severity) 12016 { 12017 struct uscsi_cmd *ucmdp; 12018 struct scsi_device *devp; 12019 dev_info_t *devinfo; 12020 uchar_t *sensep; 12021 int senlen; 12022 union scsi_cdb *cdbp; 12023 uchar_t com; 12024 extern struct scsi_key_strings scsi_cmds[]; 12025 12026 ASSERT(ssc != NULL); 12027 ASSERT(ssc->ssc_un != NULL); 12028 12029 if (SD_FM_LOG(ssc->ssc_un) != SD_FM_LOG_EREPORT) 12030 return; 12031 ucmdp = ssc->ssc_uscsi_cmd; 12032 devp = SD_SCSI_DEVP(ssc->ssc_un); 12033 devinfo = SD_DEVINFO(ssc->ssc_un); 12034 ASSERT(ucmdp != NULL); 12035 ASSERT(devp != NULL); 12036 ASSERT(devinfo != NULL); 12037 sensep = (uint8_t *)ucmdp->uscsi_rqbuf; 12038 senlen = ucmdp->uscsi_rqlen - ucmdp->uscsi_rqresid; 12039 cdbp = (union scsi_cdb *)ucmdp->uscsi_cdb; 12040 12041 /* In certain case (like DOORLOCK), the cdb could be NULL. */ 12042 if (cdbp == NULL) 12043 return; 12044 /* We don't print log if no sense data available. */ 12045 if (senlen == 0) 12046 sensep = NULL; 12047 com = cdbp->scc_cmd; 12048 scsi_generic_errmsg(devp, sd_label, sd_severity, 0, 0, com, 12049 scsi_cmds, sensep, ssc->ssc_un->un_additional_codes, NULL); 12050 } 12051 12052 /* 12053 * Function: sd_ssc_assessment 12054 * 12055 * Description: We use this function to make an assessment at the point 12056 * where SD driver may encounter a potential error. 12057 * 12058 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 12059 * sd_uscsi_info in. 12060 * tp_assess - a hint of strategy for ereport posting. 12061 * Possible values of tp_assess include: 12062 * SD_FMT_IGNORE - we don't post any ereport because we're 12063 * sure that it is ok to ignore the underlying problems. 12064 * SD_FMT_IGNORE_COMPROMISE - we don't post any ereport for now 12065 * but it might be not correct to ignore the underlying hardware 12066 * error. 12067 * SD_FMT_STATUS_CHECK - we will post an ereport with the 12068 * payload driver-assessment of value "fail" or 12069 * "fatal"(depending on what information we have here). This 12070 * assessment value is usually set when SD driver think there 12071 * is a potential error occurred(Typically, when return value 12072 * of the SCSI command is EIO). 12073 * SD_FMT_STANDARD - we will post an ereport with the payload 12074 * driver-assessment of value "info". This assessment value is 12075 * set when the SCSI command returned successfully and with 12076 * sense data sent back. 12077 * 12078 * Context: Kernel thread. 12079 */ 12080 static void 12081 sd_ssc_assessment(sd_ssc_t *ssc, enum sd_type_assessment tp_assess) 12082 { 12083 int senlen = 0; 12084 struct uscsi_cmd *ucmdp = NULL; 12085 struct sd_lun *un; 12086 12087 ASSERT(ssc != NULL); 12088 un = ssc->ssc_un; 12089 ASSERT(un != NULL); 12090 ucmdp = ssc->ssc_uscsi_cmd; 12091 ASSERT(ucmdp != NULL); 12092 12093 if (ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT) { 12094 ssc->ssc_flags &= ~SSC_FLAGS_NEED_ASSESSMENT; 12095 } else { 12096 /* 12097 * If enter here, it indicates that we have a wrong 12098 * calling sequence of sd_ssc_send and sd_ssc_assessment, 12099 * both of which should be called in a pair in case of 12100 * loss of FMA telemetries. 12101 */ 12102 if (ucmdp->uscsi_cdb != NULL) { 12103 SD_INFO(SD_LOG_SDTEST, un, 12104 "sd_ssc_assessment is missing the " 12105 "alternative sd_ssc_send when running 0x%x, " 12106 "or there are superfluous sd_ssc_assessment for " 12107 "the same sd_ssc_send.\n", 12108 ucmdp->uscsi_cdb[0]); 12109 } 12110 /* 12111 * Set the ssc_flags to the initial value to avoid passing 12112 * down dirty flags to the following sd_ssc_send function. 12113 */ 12114 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12115 return; 12116 } 12117 12118 /* 12119 * Only handle an issued command which is waiting for assessment. 12120 * A command which is not issued will not have 12121 * SSC_FLAGS_INVALID_DATA set, so it'ok we just return here. 12122 */ 12123 if (!(ssc->ssc_flags & SSC_FLAGS_CMD_ISSUED)) { 12124 sd_ssc_print(ssc, SCSI_ERR_INFO); 12125 return; 12126 } else { 12127 /* 12128 * For an issued command, we should clear this flag in 12129 * order to make the sd_ssc_t structure be used off 12130 * multiple uscsi commands. 12131 */ 12132 ssc->ssc_flags &= ~SSC_FLAGS_CMD_ISSUED; 12133 } 12134 12135 /* 12136 * We will not deal with non-retryable(flag USCSI_DIAGNOSE set) 12137 * commands here. And we should clear the ssc_flags before return. 12138 */ 12139 if (ucmdp->uscsi_flags & USCSI_DIAGNOSE) { 12140 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12141 return; 12142 } 12143 12144 switch (tp_assess) { 12145 case SD_FMT_IGNORE: 12146 case SD_FMT_IGNORE_COMPROMISE: 12147 break; 12148 case SD_FMT_STATUS_CHECK: 12149 /* 12150 * For a failed command(including the succeeded command 12151 * with invalid data sent back). 12152 */ 12153 sd_ssc_post(ssc, SD_FM_DRV_FATAL); 12154 break; 12155 case SD_FMT_STANDARD: 12156 /* 12157 * Always for the succeeded commands probably with sense 12158 * data sent back. 12159 * Limitation: 12160 * We can only handle a succeeded command with sense 12161 * data sent back when auto-request-sense is enabled. 12162 */ 12163 senlen = ssc->ssc_uscsi_cmd->uscsi_rqlen - 12164 ssc->ssc_uscsi_cmd->uscsi_rqresid; 12165 if ((ssc->ssc_uscsi_info->ui_pkt_state & STATE_ARQ_DONE) && 12166 (un->un_f_arq_enabled == TRUE) && 12167 senlen > 0 && 12168 ssc->ssc_uscsi_cmd->uscsi_rqbuf != NULL) { 12169 sd_ssc_post(ssc, SD_FM_DRV_NOTICE); 12170 } 12171 break; 12172 default: 12173 /* 12174 * Should not have other type of assessment. 12175 */ 12176 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 12177 "sd_ssc_assessment got wrong " 12178 "sd_type_assessment %d.\n", tp_assess); 12179 break; 12180 } 12181 /* 12182 * Clear up the ssc_flags before return. 12183 */ 12184 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12185 } 12186 12187 /* 12188 * Function: sd_ssc_post 12189 * 12190 * Description: 1. read the driver property to get fm-scsi-log flag. 12191 * 2. print log if fm_log_capable is non-zero. 12192 * 3. call sd_ssc_ereport_post to post ereport if possible. 12193 * 12194 * Context: May be called from kernel thread or interrupt context. 12195 */ 12196 static void 12197 sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess) 12198 { 12199 struct sd_lun *un; 12200 int sd_severity; 12201 12202 ASSERT(ssc != NULL); 12203 un = ssc->ssc_un; 12204 ASSERT(un != NULL); 12205 12206 /* 12207 * We may enter here from sd_ssc_assessment(for USCSI command) or 12208 * by directly called from sdintr context. 12209 * We don't handle a non-disk drive(CD-ROM, removable media). 12210 * Clear the ssc_flags before return in case we've set 12211 * SSC_FLAGS_INVALID_XXX which should be skipped for a non-disk 12212 * driver. 12213 */ 12214 if (ISCD(un) || un->un_f_has_removable_media) { 12215 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 12216 return; 12217 } 12218 12219 switch (sd_assess) { 12220 case SD_FM_DRV_FATAL: 12221 sd_severity = SCSI_ERR_FATAL; 12222 break; 12223 case SD_FM_DRV_RECOVERY: 12224 sd_severity = SCSI_ERR_RECOVERED; 12225 break; 12226 case SD_FM_DRV_RETRY: 12227 sd_severity = SCSI_ERR_RETRYABLE; 12228 break; 12229 case SD_FM_DRV_NOTICE: 12230 sd_severity = SCSI_ERR_INFO; 12231 break; 12232 default: 12233 sd_severity = SCSI_ERR_UNKNOWN; 12234 } 12235 /* print log */ 12236 sd_ssc_print(ssc, sd_severity); 12237 12238 /* always post ereport */ 12239 sd_ssc_ereport_post(ssc, sd_assess); 12240 } 12241 12242 /* 12243 * Function: sd_ssc_set_info 12244 * 12245 * Description: Mark ssc_flags and set ssc_info which would be the 12246 * payload of uderr ereport. This function will cause 12247 * sd_ssc_ereport_post to post uderr ereport only. 12248 * Besides, when ssc_flags == SSC_FLAGS_INVALID_DATA(USCSI), 12249 * the function will also call SD_ERROR or scsi_log for a 12250 * CDROM/removable-media/DDI_FM_NOT_CAPABLE device. 12251 * 12252 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 12253 * sd_uscsi_info in. 12254 * ssc_flags - indicate the sub-category of a uderr. 12255 * comp - this argument is meaningful only when 12256 * ssc_flags == SSC_FLAGS_INVALID_DATA, and its possible 12257 * values include: 12258 * > 0, SD_ERROR is used with comp as the driver logging 12259 * component; 12260 * = 0, scsi-log is used to log error telemetries; 12261 * < 0, no log available for this telemetry. 12262 * 12263 * Context: Kernel thread or interrupt context 12264 */ 12265 static void 12266 sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, uint_t comp, const char *fmt, ...) 12267 { 12268 va_list ap; 12269 12270 ASSERT(ssc != NULL); 12271 ASSERT(ssc->ssc_un != NULL); 12272 12273 ssc->ssc_flags |= ssc_flags; 12274 va_start(ap, fmt); 12275 (void) vsnprintf(ssc->ssc_info, sizeof (ssc->ssc_info), fmt, ap); 12276 va_end(ap); 12277 12278 /* 12279 * If SSC_FLAGS_INVALID_DATA is set, it should be a uscsi command 12280 * with invalid data sent back. For non-uscsi command, the 12281 * following code will be bypassed. 12282 */ 12283 if (ssc_flags & SSC_FLAGS_INVALID_DATA) { 12284 if (SD_FM_LOG(ssc->ssc_un) == SD_FM_LOG_NSUP) { 12285 /* 12286 * If the error belong to certain component and we 12287 * do not want it to show up on the console, we 12288 * will use SD_ERROR, otherwise scsi_log is 12289 * preferred. 12290 */ 12291 if (comp > 0) { 12292 SD_ERROR(comp, ssc->ssc_un, ssc->ssc_info); 12293 } else if (comp == 0) { 12294 scsi_log(SD_DEVINFO(ssc->ssc_un), sd_label, 12295 CE_WARN, ssc->ssc_info); 12296 } 12297 } 12298 } 12299 } 12300 12301 /* 12302 * Function: sd_buf_iodone 12303 * 12304 * Description: Frees the sd_xbuf & returns the buf to its originator. 12305 * 12306 * Context: May be called from interrupt context. 12307 */ 12308 /* ARGSUSED */ 12309 static void 12310 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp) 12311 { 12312 struct sd_xbuf *xp; 12313 12314 ASSERT(un != NULL); 12315 ASSERT(bp != NULL); 12316 ASSERT(!mutex_owned(SD_MUTEX(un))); 12317 12318 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n"); 12319 12320 xp = SD_GET_XBUF(bp); 12321 ASSERT(xp != NULL); 12322 12323 /* xbuf is gone after this */ 12324 if (ddi_xbuf_done(bp, un->un_xbuf_attr)) { 12325 mutex_enter(SD_MUTEX(un)); 12326 12327 /* 12328 * Grab time when the cmd completed. 12329 * This is used for determining if the system has been 12330 * idle long enough to make it idle to the PM framework. 12331 * This is for lowering the overhead, and therefore improving 12332 * performance per I/O operation. 12333 */ 12334 un->un_pm_idle_time = gethrtime(); 12335 12336 un->un_ncmds_in_driver--; 12337 ASSERT(un->un_ncmds_in_driver >= 0); 12338 SD_INFO(SD_LOG_IO, un, 12339 "sd_buf_iodone: un_ncmds_in_driver = %ld\n", 12340 un->un_ncmds_in_driver); 12341 12342 mutex_exit(SD_MUTEX(un)); 12343 } 12344 12345 biodone(bp); /* bp is gone after this */ 12346 12347 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n"); 12348 } 12349 12350 12351 /* 12352 * Function: sd_uscsi_iodone 12353 * 12354 * Description: Frees the sd_xbuf & returns the buf to its originator. 12355 * 12356 * Context: May be called from interrupt context. 12357 */ 12358 /* ARGSUSED */ 12359 static void 12360 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 12361 { 12362 struct sd_xbuf *xp; 12363 12364 ASSERT(un != NULL); 12365 ASSERT(bp != NULL); 12366 12367 xp = SD_GET_XBUF(bp); 12368 ASSERT(xp != NULL); 12369 ASSERT(!mutex_owned(SD_MUTEX(un))); 12370 12371 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n"); 12372 12373 bp->b_private = xp->xb_private; 12374 12375 mutex_enter(SD_MUTEX(un)); 12376 12377 /* 12378 * Grab time when the cmd completed. 12379 * This is used for determining if the system has been 12380 * idle long enough to make it idle to the PM framework. 12381 * This is for lowering the overhead, and therefore improving 12382 * performance per I/O operation. 12383 */ 12384 un->un_pm_idle_time = gethrtime(); 12385 12386 un->un_ncmds_in_driver--; 12387 ASSERT(un->un_ncmds_in_driver >= 0); 12388 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n", 12389 un->un_ncmds_in_driver); 12390 12391 mutex_exit(SD_MUTEX(un)); 12392 12393 if (((struct uscsi_cmd *)(xp->xb_pktinfo))->uscsi_rqlen > 12394 SENSE_LENGTH) { 12395 kmem_free(xp, sizeof (struct sd_xbuf) - SENSE_LENGTH + 12396 MAX_SENSE_LENGTH); 12397 } else { 12398 kmem_free(xp, sizeof (struct sd_xbuf)); 12399 } 12400 12401 biodone(bp); 12402 12403 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n"); 12404 } 12405 12406 12407 /* 12408 * Function: sd_mapblockaddr_iostart 12409 * 12410 * Description: Verify request lies within the partition limits for 12411 * the indicated minor device. Issue "overrun" buf if 12412 * request would exceed partition range. Converts 12413 * partition-relative block address to absolute. 12414 * 12415 * Upon exit of this function: 12416 * 1.I/O is aligned 12417 * xp->xb_blkno represents the absolute sector address 12418 * 2.I/O is misaligned 12419 * xp->xb_blkno represents the absolute logical block address 12420 * based on DEV_BSIZE. The logical block address will be 12421 * converted to physical sector address in sd_mapblocksize_\ 12422 * iostart. 12423 * 3.I/O is misaligned but is aligned in "overrun" buf 12424 * xp->xb_blkno represents the absolute logical block address 12425 * based on DEV_BSIZE. The logical block address will be 12426 * converted to physical sector address in sd_mapblocksize_\ 12427 * iostart. But no RMW will be issued in this case. 12428 * 12429 * Context: Can sleep 12430 * 12431 * Issues: This follows what the old code did, in terms of accessing 12432 * some of the partition info in the unit struct without holding 12433 * the mutext. This is a general issue, if the partition info 12434 * can be altered while IO is in progress... as soon as we send 12435 * a buf, its partitioning can be invalid before it gets to the 12436 * device. Probably the right fix is to move partitioning out 12437 * of the driver entirely. 12438 */ 12439 12440 static void 12441 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp) 12442 { 12443 diskaddr_t nblocks; /* #blocks in the given partition */ 12444 daddr_t blocknum; /* Block number specified by the buf */ 12445 size_t requested_nblocks; 12446 size_t available_nblocks; 12447 int partition; 12448 diskaddr_t partition_offset; 12449 struct sd_xbuf *xp; 12450 int secmask = 0, blknomask = 0; 12451 ushort_t is_aligned = TRUE; 12452 12453 ASSERT(un != NULL); 12454 ASSERT(bp != NULL); 12455 ASSERT(!mutex_owned(SD_MUTEX(un))); 12456 12457 SD_TRACE(SD_LOG_IO_PARTITION, un, 12458 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp); 12459 12460 xp = SD_GET_XBUF(bp); 12461 ASSERT(xp != NULL); 12462 12463 /* 12464 * If the geometry is not indicated as valid, attempt to access 12465 * the unit & verify the geometry/label. This can be the case for 12466 * removable-media devices, of if the device was opened in 12467 * NDELAY/NONBLOCK mode. 12468 */ 12469 partition = SDPART(bp->b_edev); 12470 12471 if (!SD_IS_VALID_LABEL(un)) { 12472 sd_ssc_t *ssc; 12473 /* 12474 * Initialize sd_ssc_t for internal uscsi commands 12475 * In case of potential porformance issue, we need 12476 * to alloc memory only if there is invalid label 12477 */ 12478 ssc = sd_ssc_init(un); 12479 12480 if (sd_ready_and_valid(ssc, partition) != SD_READY_VALID) { 12481 /* 12482 * For removable devices it is possible to start an 12483 * I/O without a media by opening the device in nodelay 12484 * mode. Also for writable CDs there can be many 12485 * scenarios where there is no geometry yet but volume 12486 * manager is trying to issue a read() just because 12487 * it can see TOC on the CD. So do not print a message 12488 * for removables. 12489 */ 12490 if (!un->un_f_has_removable_media) { 12491 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12492 "i/o to invalid geometry\n"); 12493 } 12494 bioerror(bp, EIO); 12495 bp->b_resid = bp->b_bcount; 12496 SD_BEGIN_IODONE(index, un, bp); 12497 12498 sd_ssc_fini(ssc); 12499 return; 12500 } 12501 sd_ssc_fini(ssc); 12502 } 12503 12504 nblocks = 0; 12505 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 12506 &nblocks, &partition_offset, NULL, NULL, (void *)SD_PATH_DIRECT); 12507 12508 if (un->un_f_enable_rmw) { 12509 blknomask = (un->un_phy_blocksize / DEV_BSIZE) - 1; 12510 secmask = un->un_phy_blocksize - 1; 12511 } else { 12512 blknomask = (un->un_tgt_blocksize / DEV_BSIZE) - 1; 12513 secmask = un->un_tgt_blocksize - 1; 12514 } 12515 12516 if ((bp->b_lblkno & (blknomask)) || (bp->b_bcount & (secmask))) { 12517 is_aligned = FALSE; 12518 } 12519 12520 if (!(NOT_DEVBSIZE(un)) || un->un_f_enable_rmw) { 12521 /* 12522 * If I/O is aligned, no need to involve RMW(Read Modify Write) 12523 * Convert the logical block number to target's physical sector 12524 * number. 12525 */ 12526 if (is_aligned) { 12527 xp->xb_blkno = SD_SYS2TGTBLOCK(un, xp->xb_blkno); 12528 } else { 12529 /* 12530 * There is no RMW if we're just reading, so don't 12531 * warn or error out because of it. 12532 */ 12533 if (bp->b_flags & B_READ) { 12534 /*EMPTY*/ 12535 } else if (!un->un_f_enable_rmw && 12536 un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR) { 12537 bp->b_flags |= B_ERROR; 12538 goto error_exit; 12539 } else if (un->un_f_rmw_type == SD_RMW_TYPE_DEFAULT) { 12540 mutex_enter(SD_MUTEX(un)); 12541 if (!un->un_f_enable_rmw && 12542 un->un_rmw_msg_timeid == NULL) { 12543 scsi_log(SD_DEVINFO(un), sd_label, 12544 CE_WARN, "I/O request is not " 12545 "aligned with %d disk sector size. " 12546 "It is handled through Read Modify " 12547 "Write but the performance is " 12548 "very low.\n", 12549 un->un_tgt_blocksize); 12550 un->un_rmw_msg_timeid = 12551 timeout(sd_rmw_msg_print_handler, 12552 un, SD_RMW_MSG_PRINT_TIMEOUT); 12553 } else { 12554 un->un_rmw_incre_count ++; 12555 } 12556 mutex_exit(SD_MUTEX(un)); 12557 } 12558 12559 nblocks = SD_TGT2SYSBLOCK(un, nblocks); 12560 partition_offset = SD_TGT2SYSBLOCK(un, 12561 partition_offset); 12562 } 12563 } 12564 12565 /* 12566 * blocknum is the starting block number of the request. At this 12567 * point it is still relative to the start of the minor device. 12568 */ 12569 blocknum = xp->xb_blkno; 12570 12571 /* 12572 * Legacy: If the starting block number is one past the last block 12573 * in the partition, do not set B_ERROR in the buf. 12574 */ 12575 if (blocknum == nblocks) { 12576 goto error_exit; 12577 } 12578 12579 /* 12580 * Confirm that the first block of the request lies within the 12581 * partition limits. Also the requested number of bytes must be 12582 * a multiple of the system block size. 12583 */ 12584 if ((blocknum < 0) || (blocknum >= nblocks) || 12585 ((bp->b_bcount & (DEV_BSIZE - 1)) != 0)) { 12586 bp->b_flags |= B_ERROR; 12587 goto error_exit; 12588 } 12589 12590 /* 12591 * If the requsted # blocks exceeds the available # blocks, that 12592 * is an overrun of the partition. 12593 */ 12594 if ((!NOT_DEVBSIZE(un)) && is_aligned) { 12595 requested_nblocks = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 12596 } else { 12597 requested_nblocks = SD_BYTES2SYSBLOCKS(bp->b_bcount); 12598 } 12599 12600 available_nblocks = (size_t)(nblocks - blocknum); 12601 ASSERT(nblocks >= blocknum); 12602 12603 if (requested_nblocks > available_nblocks) { 12604 size_t resid; 12605 12606 /* 12607 * Allocate an "overrun" buf to allow the request to proceed 12608 * for the amount of space available in the partition. The 12609 * amount not transferred will be added into the b_resid 12610 * when the operation is complete. The overrun buf 12611 * replaces the original buf here, and the original buf 12612 * is saved inside the overrun buf, for later use. 12613 */ 12614 if ((!NOT_DEVBSIZE(un)) && is_aligned) { 12615 resid = SD_TGTBLOCKS2BYTES(un, 12616 (offset_t)(requested_nblocks - available_nblocks)); 12617 } else { 12618 resid = SD_SYSBLOCKS2BYTES( 12619 (offset_t)(requested_nblocks - available_nblocks)); 12620 } 12621 12622 size_t count = bp->b_bcount - resid; 12623 /* 12624 * Note: count is an unsigned entity thus it'll NEVER 12625 * be less than 0 so ASSERT the original values are 12626 * correct. 12627 */ 12628 ASSERT(bp->b_bcount >= resid); 12629 12630 bp = sd_bioclone_alloc(bp, count, blocknum, 12631 (int (*)(struct buf *))(uintptr_t)sd_mapblockaddr_iodone); 12632 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */ 12633 ASSERT(xp != NULL); 12634 } 12635 12636 /* At this point there should be no residual for this buf. */ 12637 ASSERT(bp->b_resid == 0); 12638 12639 /* Convert the block number to an absolute address. */ 12640 xp->xb_blkno += partition_offset; 12641 12642 SD_NEXT_IOSTART(index, un, bp); 12643 12644 SD_TRACE(SD_LOG_IO_PARTITION, un, 12645 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp); 12646 12647 return; 12648 12649 error_exit: 12650 bp->b_resid = bp->b_bcount; 12651 SD_BEGIN_IODONE(index, un, bp); 12652 SD_TRACE(SD_LOG_IO_PARTITION, un, 12653 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp); 12654 } 12655 12656 12657 /* 12658 * Function: sd_mapblockaddr_iodone 12659 * 12660 * Description: Completion-side processing for partition management. 12661 * 12662 * Context: May be called under interrupt context 12663 */ 12664 12665 static void 12666 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp) 12667 { 12668 /* int partition; */ /* Not used, see below. */ 12669 ASSERT(un != NULL); 12670 ASSERT(bp != NULL); 12671 ASSERT(!mutex_owned(SD_MUTEX(un))); 12672 12673 SD_TRACE(SD_LOG_IO_PARTITION, un, 12674 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp); 12675 12676 if ((uintptr_t)bp->b_iodone == (uintptr_t)sd_mapblockaddr_iodone) { 12677 /* 12678 * We have an "overrun" buf to deal with... 12679 */ 12680 struct sd_xbuf *xp; 12681 struct buf *obp; /* ptr to the original buf */ 12682 12683 xp = SD_GET_XBUF(bp); 12684 ASSERT(xp != NULL); 12685 12686 /* Retrieve the pointer to the original buf */ 12687 obp = (struct buf *)xp->xb_private; 12688 ASSERT(obp != NULL); 12689 12690 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid); 12691 bioerror(obp, bp->b_error); 12692 12693 sd_bioclone_free(bp); 12694 12695 /* 12696 * Get back the original buf. 12697 * Note that since the restoration of xb_blkno below 12698 * was removed, the sd_xbuf is not needed. 12699 */ 12700 bp = obp; 12701 /* 12702 * xp = SD_GET_XBUF(bp); 12703 * ASSERT(xp != NULL); 12704 */ 12705 } 12706 12707 /* 12708 * Convert sd->xb_blkno back to a minor-device relative value. 12709 * Note: this has been commented out, as it is not needed in the 12710 * current implementation of the driver (ie, since this function 12711 * is at the top of the layering chains, so the info will be 12712 * discarded) and it is in the "hot" IO path. 12713 * 12714 * partition = getminor(bp->b_edev) & SDPART_MASK; 12715 * xp->xb_blkno -= un->un_offset[partition]; 12716 */ 12717 12718 SD_NEXT_IODONE(index, un, bp); 12719 12720 SD_TRACE(SD_LOG_IO_PARTITION, un, 12721 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp); 12722 } 12723 12724 12725 /* 12726 * Function: sd_mapblocksize_iostart 12727 * 12728 * Description: Convert between system block size (un->un_sys_blocksize) 12729 * and target block size (un->un_tgt_blocksize). 12730 * 12731 * Context: Can sleep to allocate resources. 12732 * 12733 * Assumptions: A higher layer has already performed any partition validation, 12734 * and converted the xp->xb_blkno to an absolute value relative 12735 * to the start of the device. 12736 * 12737 * It is also assumed that the higher layer has implemented 12738 * an "overrun" mechanism for the case where the request would 12739 * read/write beyond the end of a partition. In this case we 12740 * assume (and ASSERT) that bp->b_resid == 0. 12741 * 12742 * Note: The implementation for this routine assumes the target 12743 * block size remains constant between allocation and transport. 12744 */ 12745 12746 static void 12747 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp) 12748 { 12749 struct sd_mapblocksize_info *bsp; 12750 struct sd_xbuf *xp; 12751 offset_t first_byte; 12752 daddr_t start_block, end_block; 12753 daddr_t request_bytes; 12754 ushort_t is_aligned = FALSE; 12755 12756 ASSERT(un != NULL); 12757 ASSERT(bp != NULL); 12758 ASSERT(!mutex_owned(SD_MUTEX(un))); 12759 ASSERT(bp->b_resid == 0); 12760 12761 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12762 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp); 12763 12764 /* 12765 * For a non-writable CD, a write request is an error 12766 */ 12767 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) && 12768 (un->un_f_mmc_writable_media == FALSE)) { 12769 bioerror(bp, EIO); 12770 bp->b_resid = bp->b_bcount; 12771 SD_BEGIN_IODONE(index, un, bp); 12772 return; 12773 } 12774 12775 /* 12776 * We do not need a shadow buf if the device is using 12777 * un->un_sys_blocksize as its block size or if bcount == 0. 12778 * In this case there is no layer-private data block allocated. 12779 */ 12780 if ((un->un_tgt_blocksize == DEV_BSIZE && !un->un_f_enable_rmw) || 12781 (bp->b_bcount == 0)) { 12782 goto done; 12783 } 12784 12785 #if defined(__x86) 12786 /* We do not support non-block-aligned transfers for ROD devices */ 12787 ASSERT(!ISROD(un)); 12788 #endif 12789 12790 xp = SD_GET_XBUF(bp); 12791 ASSERT(xp != NULL); 12792 12793 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12794 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n", 12795 un->un_tgt_blocksize, DEV_BSIZE); 12796 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12797 "request start block:0x%x\n", xp->xb_blkno); 12798 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12799 "request len:0x%x\n", bp->b_bcount); 12800 12801 /* 12802 * Allocate the layer-private data area for the mapblocksize layer. 12803 * Layers are allowed to use the xp_private member of the sd_xbuf 12804 * struct to store the pointer to their layer-private data block, but 12805 * each layer also has the responsibility of restoring the prior 12806 * contents of xb_private before returning the buf/xbuf to the 12807 * higher layer that sent it. 12808 * 12809 * Here we save the prior contents of xp->xb_private into the 12810 * bsp->mbs_oprivate field of our layer-private data area. This value 12811 * is restored by sd_mapblocksize_iodone() just prior to freeing up 12812 * the layer-private area and returning the buf/xbuf to the layer 12813 * that sent it. 12814 * 12815 * Note that here we use kmem_zalloc for the allocation as there are 12816 * parts of the mapblocksize code that expect certain fields to be 12817 * zero unless explicitly set to a required value. 12818 */ 12819 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 12820 bsp->mbs_oprivate = xp->xb_private; 12821 xp->xb_private = bsp; 12822 12823 /* 12824 * This treats the data on the disk (target) as an array of bytes. 12825 * first_byte is the byte offset, from the beginning of the device, 12826 * to the location of the request. This is converted from a 12827 * un->un_sys_blocksize block address to a byte offset, and then back 12828 * to a block address based upon a un->un_tgt_blocksize block size. 12829 * 12830 * xp->xb_blkno should be absolute upon entry into this function, 12831 * but, but it is based upon partitions that use the "system" 12832 * block size. It must be adjusted to reflect the block size of 12833 * the target. 12834 * 12835 * Note that end_block is actually the block that follows the last 12836 * block of the request, but that's what is needed for the computation. 12837 */ 12838 first_byte = SD_SYSBLOCKS2BYTES((offset_t)xp->xb_blkno); 12839 if (un->un_f_enable_rmw) { 12840 start_block = xp->xb_blkno = 12841 (first_byte / un->un_phy_blocksize) * 12842 (un->un_phy_blocksize / DEV_BSIZE); 12843 end_block = ((first_byte + bp->b_bcount + 12844 un->un_phy_blocksize - 1) / un->un_phy_blocksize) * 12845 (un->un_phy_blocksize / DEV_BSIZE); 12846 } else { 12847 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize; 12848 end_block = (first_byte + bp->b_bcount + 12849 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize; 12850 } 12851 12852 /* request_bytes is rounded up to a multiple of the target block size */ 12853 request_bytes = (end_block - start_block) * un->un_tgt_blocksize; 12854 12855 /* 12856 * See if the starting address of the request and the request 12857 * length are aligned on a un->un_tgt_blocksize boundary. If aligned 12858 * then we do not need to allocate a shadow buf to handle the request. 12859 */ 12860 if (un->un_f_enable_rmw) { 12861 if (((first_byte % un->un_phy_blocksize) == 0) && 12862 ((bp->b_bcount % un->un_phy_blocksize) == 0)) { 12863 is_aligned = TRUE; 12864 } 12865 } else { 12866 if (((first_byte % un->un_tgt_blocksize) == 0) && 12867 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) { 12868 is_aligned = TRUE; 12869 } 12870 } 12871 12872 if ((bp->b_flags & B_READ) == 0) { 12873 /* 12874 * Lock the range for a write operation. An aligned request is 12875 * considered a simple write; otherwise the request must be a 12876 * read-modify-write. 12877 */ 12878 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1, 12879 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW); 12880 } 12881 12882 /* 12883 * Alloc a shadow buf if the request is not aligned. Also, this is 12884 * where the READ command is generated for a read-modify-write. (The 12885 * write phase is deferred until after the read completes.) 12886 */ 12887 if (is_aligned == FALSE) { 12888 12889 struct sd_mapblocksize_info *shadow_bsp; 12890 struct sd_xbuf *shadow_xp; 12891 struct buf *shadow_bp; 12892 12893 /* 12894 * Allocate the shadow buf and it associated xbuf. Note that 12895 * after this call the xb_blkno value in both the original 12896 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the 12897 * same: absolute relative to the start of the device, and 12898 * adjusted for the target block size. The b_blkno in the 12899 * shadow buf will also be set to this value. We should never 12900 * change b_blkno in the original bp however. 12901 * 12902 * Note also that the shadow buf will always need to be a 12903 * READ command, regardless of whether the incoming command 12904 * is a READ or a WRITE. 12905 */ 12906 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ, 12907 xp->xb_blkno, 12908 (int (*)(struct buf *))(uintptr_t)sd_mapblocksize_iodone); 12909 12910 shadow_xp = SD_GET_XBUF(shadow_bp); 12911 12912 /* 12913 * Allocate the layer-private data for the shadow buf. 12914 * (No need to preserve xb_private in the shadow xbuf.) 12915 */ 12916 shadow_xp->xb_private = shadow_bsp = 12917 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 12918 12919 /* 12920 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone 12921 * to figure out where the start of the user data is (based upon 12922 * the system block size) in the data returned by the READ 12923 * command (which will be based upon the target blocksize). Note 12924 * that this is only really used if the request is unaligned. 12925 */ 12926 if (un->un_f_enable_rmw) { 12927 bsp->mbs_copy_offset = (ssize_t)(first_byte - 12928 ((offset_t)xp->xb_blkno * un->un_sys_blocksize)); 12929 ASSERT((bsp->mbs_copy_offset >= 0) && 12930 (bsp->mbs_copy_offset < un->un_phy_blocksize)); 12931 } else { 12932 bsp->mbs_copy_offset = (ssize_t)(first_byte - 12933 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize)); 12934 ASSERT((bsp->mbs_copy_offset >= 0) && 12935 (bsp->mbs_copy_offset < un->un_tgt_blocksize)); 12936 } 12937 12938 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset; 12939 12940 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index; 12941 12942 /* Transfer the wmap (if any) to the shadow buf */ 12943 shadow_bsp->mbs_wmp = bsp->mbs_wmp; 12944 bsp->mbs_wmp = NULL; 12945 12946 /* 12947 * The shadow buf goes on from here in place of the 12948 * original buf. 12949 */ 12950 shadow_bsp->mbs_orig_bp = bp; 12951 bp = shadow_bp; 12952 } 12953 12954 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12955 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno); 12956 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12957 "sd_mapblocksize_iostart: tgt request len:0x%x\n", 12958 request_bytes); 12959 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12960 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp); 12961 12962 done: 12963 SD_NEXT_IOSTART(index, un, bp); 12964 12965 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12966 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp); 12967 } 12968 12969 12970 /* 12971 * Function: sd_mapblocksize_iodone 12972 * 12973 * Description: Completion side processing for block-size mapping. 12974 * 12975 * Context: May be called under interrupt context 12976 */ 12977 12978 static void 12979 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp) 12980 { 12981 struct sd_mapblocksize_info *bsp; 12982 struct sd_xbuf *xp; 12983 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */ 12984 struct buf *orig_bp; /* ptr to the original buf */ 12985 offset_t shadow_end; 12986 offset_t request_end; 12987 offset_t shadow_start; 12988 ssize_t copy_offset; 12989 size_t copy_length; 12990 size_t shortfall; 12991 uint_t is_write; /* TRUE if this bp is a WRITE */ 12992 uint_t has_wmap; /* TRUE is this bp has a wmap */ 12993 12994 ASSERT(un != NULL); 12995 ASSERT(bp != NULL); 12996 12997 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12998 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp); 12999 13000 /* 13001 * There is no shadow buf or layer-private data if the target is 13002 * using un->un_sys_blocksize as its block size or if bcount == 0. 13003 */ 13004 if ((un->un_tgt_blocksize == DEV_BSIZE && !un->un_f_enable_rmw) || 13005 (bp->b_bcount == 0)) { 13006 goto exit; 13007 } 13008 13009 xp = SD_GET_XBUF(bp); 13010 ASSERT(xp != NULL); 13011 13012 /* Retrieve the pointer to the layer-private data area from the xbuf. */ 13013 bsp = xp->xb_private; 13014 13015 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE; 13016 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE; 13017 13018 if (is_write) { 13019 /* 13020 * For a WRITE request we must free up the block range that 13021 * we have locked up. This holds regardless of whether this is 13022 * an aligned write request or a read-modify-write request. 13023 */ 13024 sd_range_unlock(un, bsp->mbs_wmp); 13025 bsp->mbs_wmp = NULL; 13026 } 13027 13028 if ((uintptr_t)bp->b_iodone != (uintptr_t)sd_mapblocksize_iodone) { 13029 /* 13030 * An aligned read or write command will have no shadow buf; 13031 * there is not much else to do with it. 13032 */ 13033 goto done; 13034 } 13035 13036 orig_bp = bsp->mbs_orig_bp; 13037 ASSERT(orig_bp != NULL); 13038 orig_xp = SD_GET_XBUF(orig_bp); 13039 ASSERT(orig_xp != NULL); 13040 ASSERT(!mutex_owned(SD_MUTEX(un))); 13041 13042 if (!is_write && has_wmap) { 13043 /* 13044 * A READ with a wmap means this is the READ phase of a 13045 * read-modify-write. If an error occurred on the READ then 13046 * we do not proceed with the WRITE phase or copy any data. 13047 * Just release the write maps and return with an error. 13048 */ 13049 if ((bp->b_resid != 0) || (bp->b_error != 0)) { 13050 orig_bp->b_resid = orig_bp->b_bcount; 13051 bioerror(orig_bp, bp->b_error); 13052 sd_range_unlock(un, bsp->mbs_wmp); 13053 goto freebuf_done; 13054 } 13055 } 13056 13057 /* 13058 * Here is where we set up to copy the data from the shadow buf 13059 * into the space associated with the original buf. 13060 * 13061 * To deal with the conversion between block sizes, these 13062 * computations treat the data as an array of bytes, with the 13063 * first byte (byte 0) corresponding to the first byte in the 13064 * first block on the disk. 13065 */ 13066 13067 /* 13068 * shadow_start and shadow_len indicate the location and size of 13069 * the data returned with the shadow IO request. 13070 */ 13071 if (un->un_f_enable_rmw) { 13072 shadow_start = SD_SYSBLOCKS2BYTES((offset_t)xp->xb_blkno); 13073 } else { 13074 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 13075 } 13076 shadow_end = shadow_start + bp->b_bcount - bp->b_resid; 13077 13078 /* 13079 * copy_offset gives the offset (in bytes) from the start of the first 13080 * block of the READ request to the beginning of the data. We retrieve 13081 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved 13082 * there by sd_mapblockize_iostart(). copy_length gives the amount of 13083 * data to be copied (in bytes). 13084 */ 13085 copy_offset = bsp->mbs_copy_offset; 13086 if (un->un_f_enable_rmw) { 13087 ASSERT((copy_offset >= 0) && 13088 (copy_offset < un->un_phy_blocksize)); 13089 } else { 13090 ASSERT((copy_offset >= 0) && 13091 (copy_offset < un->un_tgt_blocksize)); 13092 } 13093 13094 copy_length = orig_bp->b_bcount; 13095 request_end = shadow_start + copy_offset + orig_bp->b_bcount; 13096 13097 /* 13098 * Set up the resid and error fields of orig_bp as appropriate. 13099 */ 13100 if (shadow_end >= request_end) { 13101 /* We got all the requested data; set resid to zero */ 13102 orig_bp->b_resid = 0; 13103 } else { 13104 /* 13105 * We failed to get enough data to fully satisfy the original 13106 * request. Just copy back whatever data we got and set 13107 * up the residual and error code as required. 13108 * 13109 * 'shortfall' is the amount by which the data received with the 13110 * shadow buf has "fallen short" of the requested amount. 13111 */ 13112 shortfall = (size_t)(request_end - shadow_end); 13113 13114 if (shortfall > orig_bp->b_bcount) { 13115 /* 13116 * We did not get enough data to even partially 13117 * fulfill the original request. The residual is 13118 * equal to the amount requested. 13119 */ 13120 orig_bp->b_resid = orig_bp->b_bcount; 13121 } else { 13122 /* 13123 * We did not get all the data that we requested 13124 * from the device, but we will try to return what 13125 * portion we did get. 13126 */ 13127 orig_bp->b_resid = shortfall; 13128 } 13129 ASSERT(copy_length >= orig_bp->b_resid); 13130 copy_length -= orig_bp->b_resid; 13131 } 13132 13133 /* Propagate the error code from the shadow buf to the original buf */ 13134 bioerror(orig_bp, bp->b_error); 13135 13136 if (is_write) { 13137 goto freebuf_done; /* No data copying for a WRITE */ 13138 } 13139 13140 if (has_wmap) { 13141 /* 13142 * This is a READ command from the READ phase of a 13143 * read-modify-write request. We have to copy the data given 13144 * by the user OVER the data returned by the READ command, 13145 * then convert the command from a READ to a WRITE and send 13146 * it back to the target. 13147 */ 13148 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset, 13149 copy_length); 13150 13151 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */ 13152 13153 /* 13154 * Dispatch the WRITE command to the taskq thread, which 13155 * will in turn send the command to the target. When the 13156 * WRITE command completes, we (sd_mapblocksize_iodone()) 13157 * will get called again as part of the iodone chain 13158 * processing for it. Note that we will still be dealing 13159 * with the shadow buf at that point. 13160 */ 13161 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp, 13162 KM_NOSLEEP) != TASKQID_INVALID) { 13163 /* 13164 * Dispatch was successful so we are done. Return 13165 * without going any higher up the iodone chain. Do 13166 * not free up any layer-private data until after the 13167 * WRITE completes. 13168 */ 13169 return; 13170 } 13171 13172 /* 13173 * Dispatch of the WRITE command failed; set up the error 13174 * condition and send this IO back up the iodone chain. 13175 */ 13176 bioerror(orig_bp, EIO); 13177 orig_bp->b_resid = orig_bp->b_bcount; 13178 13179 } else { 13180 /* 13181 * This is a regular READ request (ie, not a RMW). Copy the 13182 * data from the shadow buf into the original buf. The 13183 * copy_offset compensates for any "misalignment" between the 13184 * shadow buf (with its un->un_tgt_blocksize blocks) and the 13185 * original buf (with its un->un_sys_blocksize blocks). 13186 */ 13187 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr, 13188 copy_length); 13189 } 13190 13191 freebuf_done: 13192 13193 /* 13194 * At this point we still have both the shadow buf AND the original 13195 * buf to deal with, as well as the layer-private data area in each. 13196 * Local variables are as follows: 13197 * 13198 * bp -- points to shadow buf 13199 * xp -- points to xbuf of shadow buf 13200 * bsp -- points to layer-private data area of shadow buf 13201 * orig_bp -- points to original buf 13202 * 13203 * First free the shadow buf and its associated xbuf, then free the 13204 * layer-private data area from the shadow buf. There is no need to 13205 * restore xb_private in the shadow xbuf. 13206 */ 13207 sd_shadow_buf_free(bp); 13208 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 13209 13210 /* 13211 * Now update the local variables to point to the original buf, xbuf, 13212 * and layer-private area. 13213 */ 13214 bp = orig_bp; 13215 xp = SD_GET_XBUF(bp); 13216 ASSERT(xp != NULL); 13217 ASSERT(xp == orig_xp); 13218 bsp = xp->xb_private; 13219 ASSERT(bsp != NULL); 13220 13221 done: 13222 /* 13223 * Restore xb_private to whatever it was set to by the next higher 13224 * layer in the chain, then free the layer-private data area. 13225 */ 13226 xp->xb_private = bsp->mbs_oprivate; 13227 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 13228 13229 exit: 13230 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp), 13231 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp); 13232 13233 SD_NEXT_IODONE(index, un, bp); 13234 } 13235 13236 13237 /* 13238 * Function: sd_checksum_iostart 13239 * 13240 * Description: A stub function for a layer that's currently not used. 13241 * For now just a placeholder. 13242 * 13243 * Context: Kernel thread context 13244 */ 13245 13246 static void 13247 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp) 13248 { 13249 ASSERT(un != NULL); 13250 ASSERT(bp != NULL); 13251 ASSERT(!mutex_owned(SD_MUTEX(un))); 13252 SD_NEXT_IOSTART(index, un, bp); 13253 } 13254 13255 13256 /* 13257 * Function: sd_checksum_iodone 13258 * 13259 * Description: A stub function for a layer that's currently not used. 13260 * For now just a placeholder. 13261 * 13262 * Context: May be called under interrupt context 13263 */ 13264 13265 static void 13266 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp) 13267 { 13268 ASSERT(un != NULL); 13269 ASSERT(bp != NULL); 13270 ASSERT(!mutex_owned(SD_MUTEX(un))); 13271 SD_NEXT_IODONE(index, un, bp); 13272 } 13273 13274 13275 /* 13276 * Function: sd_checksum_uscsi_iostart 13277 * 13278 * Description: A stub function for a layer that's currently not used. 13279 * For now just a placeholder. 13280 * 13281 * Context: Kernel thread context 13282 */ 13283 13284 static void 13285 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp) 13286 { 13287 ASSERT(un != NULL); 13288 ASSERT(bp != NULL); 13289 ASSERT(!mutex_owned(SD_MUTEX(un))); 13290 SD_NEXT_IOSTART(index, un, bp); 13291 } 13292 13293 13294 /* 13295 * Function: sd_checksum_uscsi_iodone 13296 * 13297 * Description: A stub function for a layer that's currently not used. 13298 * For now just a placeholder. 13299 * 13300 * Context: May be called under interrupt context 13301 */ 13302 13303 static void 13304 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 13305 { 13306 ASSERT(un != NULL); 13307 ASSERT(bp != NULL); 13308 ASSERT(!mutex_owned(SD_MUTEX(un))); 13309 SD_NEXT_IODONE(index, un, bp); 13310 } 13311 13312 13313 /* 13314 * Function: sd_pm_iostart 13315 * 13316 * Description: iostart-side routine for Power mangement. 13317 * 13318 * Context: Kernel thread context 13319 */ 13320 13321 static void 13322 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp) 13323 { 13324 ASSERT(un != NULL); 13325 ASSERT(bp != NULL); 13326 ASSERT(!mutex_owned(SD_MUTEX(un))); 13327 ASSERT(!mutex_owned(&un->un_pm_mutex)); 13328 13329 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n"); 13330 13331 if (sd_pm_entry(un) != DDI_SUCCESS) { 13332 /* 13333 * Set up to return the failed buf back up the 'iodone' 13334 * side of the calling chain. 13335 */ 13336 bioerror(bp, EIO); 13337 bp->b_resid = bp->b_bcount; 13338 13339 SD_BEGIN_IODONE(index, un, bp); 13340 13341 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 13342 return; 13343 } 13344 13345 SD_NEXT_IOSTART(index, un, bp); 13346 13347 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 13348 } 13349 13350 13351 /* 13352 * Function: sd_pm_iodone 13353 * 13354 * Description: iodone-side routine for power mangement. 13355 * 13356 * Context: may be called from interrupt context 13357 */ 13358 13359 static void 13360 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp) 13361 { 13362 ASSERT(un != NULL); 13363 ASSERT(bp != NULL); 13364 ASSERT(!mutex_owned(&un->un_pm_mutex)); 13365 13366 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n"); 13367 13368 /* 13369 * After attach the following flag is only read, so don't 13370 * take the penalty of acquiring a mutex for it. 13371 */ 13372 if (un->un_f_pm_is_enabled == TRUE) { 13373 sd_pm_exit(un); 13374 } 13375 13376 SD_NEXT_IODONE(index, un, bp); 13377 13378 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n"); 13379 } 13380 13381 13382 /* 13383 * Function: sd_core_iostart 13384 * 13385 * Description: Primary driver function for enqueuing buf(9S) structs from 13386 * the system and initiating IO to the target device 13387 * 13388 * Context: Kernel thread context. Can sleep. 13389 * 13390 * Assumptions: - The given xp->xb_blkno is absolute 13391 * (ie, relative to the start of the device). 13392 * - The IO is to be done using the native blocksize of 13393 * the device, as specified in un->un_tgt_blocksize. 13394 */ 13395 /* ARGSUSED */ 13396 static void 13397 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp) 13398 { 13399 struct sd_xbuf *xp; 13400 13401 ASSERT(un != NULL); 13402 ASSERT(bp != NULL); 13403 ASSERT(!mutex_owned(SD_MUTEX(un))); 13404 ASSERT(bp->b_resid == 0); 13405 13406 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp); 13407 13408 xp = SD_GET_XBUF(bp); 13409 ASSERT(xp != NULL); 13410 13411 mutex_enter(SD_MUTEX(un)); 13412 13413 /* 13414 * If we are currently in the failfast state, fail any new IO 13415 * that has B_FAILFAST set, then return. 13416 */ 13417 if ((bp->b_flags & B_FAILFAST) && 13418 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) { 13419 mutex_exit(SD_MUTEX(un)); 13420 bioerror(bp, EIO); 13421 bp->b_resid = bp->b_bcount; 13422 SD_BEGIN_IODONE(index, un, bp); 13423 return; 13424 } 13425 13426 if (SD_IS_DIRECT_PRIORITY(xp)) { 13427 /* 13428 * Priority command -- transport it immediately. 13429 * 13430 * Note: We may want to assert that USCSI_DIAGNOSE is set, 13431 * because all direct priority commands should be associated 13432 * with error recovery actions which we don't want to retry. 13433 */ 13434 sd_start_cmds(un, bp); 13435 } else { 13436 /* 13437 * Normal command -- add it to the wait queue, then start 13438 * transporting commands from the wait queue. 13439 */ 13440 sd_add_buf_to_waitq(un, bp); 13441 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 13442 sd_start_cmds(un, NULL); 13443 } 13444 13445 mutex_exit(SD_MUTEX(un)); 13446 13447 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp); 13448 } 13449 13450 13451 /* 13452 * Function: sd_init_cdb_limits 13453 * 13454 * Description: This is to handle scsi_pkt initialization differences 13455 * between the driver platforms. 13456 * 13457 * Legacy behaviors: 13458 * 13459 * If the block number or the sector count exceeds the 13460 * capabilities of a Group 0 command, shift over to a 13461 * Group 1 command. We don't blindly use Group 1 13462 * commands because a) some drives (CDC Wren IVs) get a 13463 * bit confused, and b) there is probably a fair amount 13464 * of speed difference for a target to receive and decode 13465 * a 10 byte command instead of a 6 byte command. 13466 * 13467 * The xfer time difference of 6 vs 10 byte CDBs is 13468 * still significant so this code is still worthwhile. 13469 * 10 byte CDBs are very inefficient with the fas HBA driver 13470 * and older disks. Each CDB byte took 1 usec with some 13471 * popular disks. 13472 * 13473 * Context: Must be called at attach time 13474 */ 13475 13476 static void 13477 sd_init_cdb_limits(struct sd_lun *un) 13478 { 13479 int hba_cdb_limit; 13480 13481 /* 13482 * Use CDB_GROUP1 commands for most devices except for 13483 * parallel SCSI fixed drives in which case we get better 13484 * performance using CDB_GROUP0 commands (where applicable). 13485 */ 13486 un->un_mincdb = SD_CDB_GROUP1; 13487 #if !defined(__fibre) 13488 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) && 13489 !un->un_f_has_removable_media) { 13490 un->un_mincdb = SD_CDB_GROUP0; 13491 } 13492 #endif 13493 13494 /* 13495 * Try to read the max-cdb-length supported by HBA. 13496 */ 13497 un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1); 13498 if (0 >= un->un_max_hba_cdb) { 13499 un->un_max_hba_cdb = CDB_GROUP4; 13500 hba_cdb_limit = SD_CDB_GROUP4; 13501 } else if (0 < un->un_max_hba_cdb && 13502 un->un_max_hba_cdb < CDB_GROUP1) { 13503 hba_cdb_limit = SD_CDB_GROUP0; 13504 } else if (CDB_GROUP1 <= un->un_max_hba_cdb && 13505 un->un_max_hba_cdb < CDB_GROUP5) { 13506 hba_cdb_limit = SD_CDB_GROUP1; 13507 } else if (CDB_GROUP5 <= un->un_max_hba_cdb && 13508 un->un_max_hba_cdb < CDB_GROUP4) { 13509 hba_cdb_limit = SD_CDB_GROUP5; 13510 } else { 13511 hba_cdb_limit = SD_CDB_GROUP4; 13512 } 13513 13514 /* 13515 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4 13516 * commands for fixed disks unless we are building for a 32 bit 13517 * kernel. 13518 */ 13519 #ifdef _LP64 13520 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 13521 min(hba_cdb_limit, SD_CDB_GROUP4); 13522 #else 13523 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 13524 min(hba_cdb_limit, SD_CDB_GROUP1); 13525 #endif 13526 13527 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) 13528 ? sizeof (struct scsi_arq_status) : 1); 13529 if (!ISCD(un)) 13530 un->un_cmd_timeout = (ushort_t)sd_io_time; 13531 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; 13532 } 13533 13534 13535 /* 13536 * Function: sd_initpkt_for_buf 13537 * 13538 * Description: Allocate and initialize for transport a scsi_pkt struct, 13539 * based upon the info specified in the given buf struct. 13540 * 13541 * Assumes the xb_blkno in the request is absolute (ie, 13542 * relative to the start of the device (NOT partition!). 13543 * Also assumes that the request is using the native block 13544 * size of the device (as returned by the READ CAPACITY 13545 * command). 13546 * 13547 * Return Code: SD_PKT_ALLOC_SUCCESS 13548 * SD_PKT_ALLOC_FAILURE 13549 * SD_PKT_ALLOC_FAILURE_NO_DMA 13550 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13551 * 13552 * Context: Kernel thread and may be called from software interrupt context 13553 * as part of a sdrunout callback. This function may not block or 13554 * call routines that block 13555 */ 13556 13557 static int 13558 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp) 13559 { 13560 struct sd_xbuf *xp; 13561 struct scsi_pkt *pktp = NULL; 13562 struct sd_lun *un; 13563 size_t blockcount; 13564 daddr_t startblock; 13565 int rval; 13566 int cmd_flags; 13567 13568 ASSERT(bp != NULL); 13569 ASSERT(pktpp != NULL); 13570 xp = SD_GET_XBUF(bp); 13571 ASSERT(xp != NULL); 13572 un = SD_GET_UN(bp); 13573 ASSERT(un != NULL); 13574 ASSERT(mutex_owned(SD_MUTEX(un))); 13575 ASSERT(bp->b_resid == 0); 13576 13577 SD_TRACE(SD_LOG_IO_CORE, un, 13578 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp); 13579 13580 mutex_exit(SD_MUTEX(un)); 13581 13582 #if defined(__x86) /* DMAFREE for x86 only */ 13583 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) { 13584 /* 13585 * Already have a scsi_pkt -- just need DMA resources. 13586 * We must recompute the CDB in case the mapping returns 13587 * a nonzero pkt_resid. 13588 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer 13589 * that is being retried, the unmap/remap of the DMA resouces 13590 * will result in the entire transfer starting over again 13591 * from the very first block. 13592 */ 13593 ASSERT(xp->xb_pktp != NULL); 13594 pktp = xp->xb_pktp; 13595 } else { 13596 pktp = NULL; 13597 } 13598 #endif /* __x86 */ 13599 13600 startblock = xp->xb_blkno; /* Absolute block num. */ 13601 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 13602 13603 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK); 13604 13605 /* 13606 * sd_setup_rw_pkt will determine the appropriate CDB group to use, 13607 * call scsi_init_pkt, and build the CDB. 13608 */ 13609 rval = sd_setup_rw_pkt(un, &pktp, bp, 13610 cmd_flags, sdrunout, (caddr_t)un, 13611 startblock, blockcount); 13612 13613 if (rval == 0) { 13614 /* 13615 * Success. 13616 * 13617 * If partial DMA is being used and required for this transfer. 13618 * set it up here. 13619 */ 13620 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 && 13621 (pktp->pkt_resid != 0)) { 13622 13623 /* 13624 * Save the CDB length and pkt_resid for the 13625 * next xfer 13626 */ 13627 xp->xb_dma_resid = pktp->pkt_resid; 13628 13629 /* rezero resid */ 13630 pktp->pkt_resid = 0; 13631 13632 } else { 13633 xp->xb_dma_resid = 0; 13634 } 13635 13636 pktp->pkt_flags = un->un_tagflags; 13637 pktp->pkt_time = un->un_cmd_timeout; 13638 pktp->pkt_comp = sdintr; 13639 13640 pktp->pkt_private = bp; 13641 *pktpp = pktp; 13642 13643 SD_TRACE(SD_LOG_IO_CORE, un, 13644 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp); 13645 13646 #if defined(__x86) /* DMAFREE for x86 only */ 13647 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED; 13648 #endif 13649 13650 mutex_enter(SD_MUTEX(un)); 13651 return (SD_PKT_ALLOC_SUCCESS); 13652 13653 } 13654 13655 /* 13656 * SD_PKT_ALLOC_FAILURE is the only expected failure code 13657 * from sd_setup_rw_pkt. 13658 */ 13659 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 13660 13661 if (rval == SD_PKT_ALLOC_FAILURE) { 13662 *pktpp = NULL; 13663 /* 13664 * Set the driver state to RWAIT to indicate the driver 13665 * is waiting on resource allocations. The driver will not 13666 * suspend, pm_suspend, or detatch while the state is RWAIT. 13667 */ 13668 mutex_enter(SD_MUTEX(un)); 13669 New_state(un, SD_STATE_RWAIT); 13670 13671 SD_ERROR(SD_LOG_IO_CORE, un, 13672 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp); 13673 13674 if ((bp->b_flags & B_ERROR) != 0) { 13675 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 13676 } 13677 return (SD_PKT_ALLOC_FAILURE); 13678 } else { 13679 /* 13680 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13681 * 13682 * This should never happen. Maybe someone messed with the 13683 * kernel's minphys? 13684 */ 13685 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13686 "Request rejected: too large for CDB: " 13687 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount); 13688 SD_ERROR(SD_LOG_IO_CORE, un, 13689 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp); 13690 mutex_enter(SD_MUTEX(un)); 13691 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13692 13693 } 13694 } 13695 13696 13697 /* 13698 * Function: sd_destroypkt_for_buf 13699 * 13700 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing). 13701 * 13702 * Context: Kernel thread or interrupt context 13703 */ 13704 13705 static void 13706 sd_destroypkt_for_buf(struct buf *bp) 13707 { 13708 ASSERT(bp != NULL); 13709 ASSERT(SD_GET_UN(bp) != NULL); 13710 13711 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13712 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp); 13713 13714 ASSERT(SD_GET_PKTP(bp) != NULL); 13715 scsi_destroy_pkt(SD_GET_PKTP(bp)); 13716 13717 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13718 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp); 13719 } 13720 13721 /* 13722 * Function: sd_setup_rw_pkt 13723 * 13724 * Description: Determines appropriate CDB group for the requested LBA 13725 * and transfer length, calls scsi_init_pkt, and builds 13726 * the CDB. Do not use for partial DMA transfers except 13727 * for the initial transfer since the CDB size must 13728 * remain constant. 13729 * 13730 * Context: Kernel thread and may be called from software interrupt 13731 * context as part of a sdrunout callback. This function may not 13732 * block or call routines that block 13733 */ 13734 13735 13736 int 13737 sd_setup_rw_pkt(struct sd_lun *un, 13738 struct scsi_pkt **pktpp, struct buf *bp, int flags, 13739 int (*callback)(caddr_t), caddr_t callback_arg, 13740 diskaddr_t lba, uint32_t blockcount) 13741 { 13742 struct scsi_pkt *return_pktp; 13743 union scsi_cdb *cdbp; 13744 struct sd_cdbinfo *cp = NULL; 13745 int i; 13746 13747 /* 13748 * See which size CDB to use, based upon the request. 13749 */ 13750 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) { 13751 13752 /* 13753 * Check lba and block count against sd_cdbtab limits. 13754 * In the partial DMA case, we have to use the same size 13755 * CDB for all the transfers. Check lba + blockcount 13756 * against the max LBA so we know that segment of the 13757 * transfer can use the CDB we select. 13758 */ 13759 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) && 13760 (blockcount <= sd_cdbtab[i].sc_maxlen)) { 13761 13762 /* 13763 * The command will fit into the CDB type 13764 * specified by sd_cdbtab[i]. 13765 */ 13766 cp = sd_cdbtab + i; 13767 13768 /* 13769 * Call scsi_init_pkt so we can fill in the 13770 * CDB. 13771 */ 13772 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp, 13773 bp, cp->sc_grpcode, un->un_status_len, 0, 13774 flags, callback, callback_arg); 13775 13776 if (return_pktp != NULL) { 13777 13778 /* 13779 * Return new value of pkt 13780 */ 13781 *pktpp = return_pktp; 13782 13783 /* 13784 * To be safe, zero the CDB insuring there is 13785 * no leftover data from a previous command. 13786 */ 13787 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode); 13788 13789 /* 13790 * Handle partial DMA mapping 13791 */ 13792 if (return_pktp->pkt_resid != 0) { 13793 13794 /* 13795 * Not going to xfer as many blocks as 13796 * originally expected 13797 */ 13798 blockcount -= 13799 SD_BYTES2TGTBLOCKS(un, 13800 return_pktp->pkt_resid); 13801 } 13802 13803 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp; 13804 13805 /* 13806 * Set command byte based on the CDB 13807 * type we matched. 13808 */ 13809 cdbp->scc_cmd = cp->sc_grpmask | 13810 ((bp->b_flags & B_READ) ? 13811 SCMD_READ : SCMD_WRITE); 13812 13813 SD_FILL_SCSI1_LUN(un, return_pktp); 13814 13815 /* 13816 * Fill in LBA and length 13817 */ 13818 ASSERT((cp->sc_grpcode == CDB_GROUP1) || 13819 (cp->sc_grpcode == CDB_GROUP4) || 13820 (cp->sc_grpcode == CDB_GROUP0) || 13821 (cp->sc_grpcode == CDB_GROUP5)); 13822 13823 if (cp->sc_grpcode == CDB_GROUP1) { 13824 FORMG1ADDR(cdbp, lba); 13825 FORMG1COUNT(cdbp, blockcount); 13826 return (0); 13827 } else if (cp->sc_grpcode == CDB_GROUP4) { 13828 FORMG4LONGADDR(cdbp, lba); 13829 FORMG4COUNT(cdbp, blockcount); 13830 return (0); 13831 } else if (cp->sc_grpcode == CDB_GROUP0) { 13832 FORMG0ADDR(cdbp, lba); 13833 FORMG0COUNT(cdbp, blockcount); 13834 return (0); 13835 } else if (cp->sc_grpcode == CDB_GROUP5) { 13836 FORMG5ADDR(cdbp, lba); 13837 FORMG5COUNT(cdbp, blockcount); 13838 return (0); 13839 } 13840 13841 /* 13842 * It should be impossible to not match one 13843 * of the CDB types above, so we should never 13844 * reach this point. Set the CDB command byte 13845 * to test-unit-ready to avoid writing 13846 * to somewhere we don't intend. 13847 */ 13848 cdbp->scc_cmd = SCMD_TEST_UNIT_READY; 13849 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13850 } else { 13851 /* 13852 * Couldn't get scsi_pkt 13853 */ 13854 return (SD_PKT_ALLOC_FAILURE); 13855 } 13856 } 13857 } 13858 13859 /* 13860 * None of the available CDB types were suitable. This really 13861 * should never happen: on a 64 bit system we support 13862 * READ16/WRITE16 which will hold an entire 64 bit disk address 13863 * and on a 32 bit system we will refuse to bind to a device 13864 * larger than 2TB so addresses will never be larger than 32 bits. 13865 */ 13866 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13867 } 13868 13869 /* 13870 * Function: sd_setup_next_rw_pkt 13871 * 13872 * Description: Setup packet for partial DMA transfers, except for the 13873 * initial transfer. sd_setup_rw_pkt should be used for 13874 * the initial transfer. 13875 * 13876 * Context: Kernel thread and may be called from interrupt context. 13877 */ 13878 13879 int 13880 sd_setup_next_rw_pkt(struct sd_lun *un, 13881 struct scsi_pkt *pktp, struct buf *bp, 13882 diskaddr_t lba, uint32_t blockcount) 13883 { 13884 uchar_t com; 13885 union scsi_cdb *cdbp; 13886 uchar_t cdb_group_id; 13887 13888 ASSERT(pktp != NULL); 13889 ASSERT(pktp->pkt_cdbp != NULL); 13890 13891 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 13892 com = cdbp->scc_cmd; 13893 cdb_group_id = CDB_GROUPID(com); 13894 13895 ASSERT((cdb_group_id == CDB_GROUPID_0) || 13896 (cdb_group_id == CDB_GROUPID_1) || 13897 (cdb_group_id == CDB_GROUPID_4) || 13898 (cdb_group_id == CDB_GROUPID_5)); 13899 13900 /* 13901 * Move pkt to the next portion of the xfer. 13902 * func is NULL_FUNC so we do not have to release 13903 * the disk mutex here. 13904 */ 13905 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0, 13906 NULL_FUNC, NULL) == pktp) { 13907 /* Success. Handle partial DMA */ 13908 if (pktp->pkt_resid != 0) { 13909 blockcount -= 13910 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid); 13911 } 13912 13913 cdbp->scc_cmd = com; 13914 SD_FILL_SCSI1_LUN(un, pktp); 13915 if (cdb_group_id == CDB_GROUPID_1) { 13916 FORMG1ADDR(cdbp, lba); 13917 FORMG1COUNT(cdbp, blockcount); 13918 return (0); 13919 } else if (cdb_group_id == CDB_GROUPID_4) { 13920 FORMG4LONGADDR(cdbp, lba); 13921 FORMG4COUNT(cdbp, blockcount); 13922 return (0); 13923 } else if (cdb_group_id == CDB_GROUPID_0) { 13924 FORMG0ADDR(cdbp, lba); 13925 FORMG0COUNT(cdbp, blockcount); 13926 return (0); 13927 } else if (cdb_group_id == CDB_GROUPID_5) { 13928 FORMG5ADDR(cdbp, lba); 13929 FORMG5COUNT(cdbp, blockcount); 13930 return (0); 13931 } 13932 13933 /* Unreachable */ 13934 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13935 } 13936 13937 /* 13938 * Error setting up next portion of cmd transfer. 13939 * Something is definitely very wrong and this 13940 * should not happen. 13941 */ 13942 return (SD_PKT_ALLOC_FAILURE); 13943 } 13944 13945 /* 13946 * Function: sd_initpkt_for_uscsi 13947 * 13948 * Description: Allocate and initialize for transport a scsi_pkt struct, 13949 * based upon the info specified in the given uscsi_cmd struct. 13950 * 13951 * Return Code: SD_PKT_ALLOC_SUCCESS 13952 * SD_PKT_ALLOC_FAILURE 13953 * SD_PKT_ALLOC_FAILURE_NO_DMA 13954 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13955 * 13956 * Context: Kernel thread and may be called from software interrupt context 13957 * as part of a sdrunout callback. This function may not block or 13958 * call routines that block 13959 */ 13960 13961 static int 13962 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp) 13963 { 13964 struct uscsi_cmd *uscmd; 13965 struct sd_xbuf *xp; 13966 struct scsi_pkt *pktp; 13967 struct sd_lun *un; 13968 uint32_t flags = 0; 13969 13970 ASSERT(bp != NULL); 13971 ASSERT(pktpp != NULL); 13972 xp = SD_GET_XBUF(bp); 13973 ASSERT(xp != NULL); 13974 un = SD_GET_UN(bp); 13975 ASSERT(un != NULL); 13976 ASSERT(mutex_owned(SD_MUTEX(un))); 13977 13978 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 13979 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 13980 ASSERT(uscmd != NULL); 13981 13982 SD_TRACE(SD_LOG_IO_CORE, un, 13983 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp); 13984 13985 /* 13986 * Allocate the scsi_pkt for the command. 13987 * 13988 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path 13989 * during scsi_init_pkt time and will continue to use the 13990 * same path as long as the same scsi_pkt is used without 13991 * intervening scsi_dmafree(). Since uscsi command does 13992 * not call scsi_dmafree() before retry failed command, it 13993 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT 13994 * set such that scsi_vhci can use other available path for 13995 * retry. Besides, ucsci command does not allow DMA breakup, 13996 * so there is no need to set PKT_DMA_PARTIAL flag. 13997 * 13998 * More fundamentally, we can't support breaking up this DMA into 13999 * multiple windows on x86. There is, in general, no guarantee 14000 * that arbitrary SCSI commands are idempotent, which is required 14001 * if we want to use multiple windows for a given command. 14002 */ 14003 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 14004 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 14005 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 14006 ((int)(uscmd->uscsi_rqlen) + sizeof (struct scsi_arq_status) 14007 - sizeof (struct scsi_extended_sense)), 0, 14008 (un->un_pkt_flags & ~PKT_DMA_PARTIAL) | PKT_XARQ, 14009 sdrunout, (caddr_t)un); 14010 } else { 14011 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 14012 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 14013 sizeof (struct scsi_arq_status), 0, 14014 (un->un_pkt_flags & ~PKT_DMA_PARTIAL), 14015 sdrunout, (caddr_t)un); 14016 } 14017 14018 if (pktp == NULL) { 14019 *pktpp = NULL; 14020 /* 14021 * Set the driver state to RWAIT to indicate the driver 14022 * is waiting on resource allocations. The driver will not 14023 * suspend, pm_suspend, or detatch while the state is RWAIT. 14024 */ 14025 New_state(un, SD_STATE_RWAIT); 14026 14027 SD_ERROR(SD_LOG_IO_CORE, un, 14028 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp); 14029 14030 if ((bp->b_flags & B_ERROR) != 0) { 14031 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 14032 } 14033 return (SD_PKT_ALLOC_FAILURE); 14034 } 14035 14036 /* 14037 * We do not do DMA breakup for USCSI commands, so return failure 14038 * here if all the needed DMA resources were not allocated. 14039 */ 14040 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) && 14041 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) { 14042 scsi_destroy_pkt(pktp); 14043 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: " 14044 "No partial DMA for USCSI. exit: buf:0x%p\n", bp); 14045 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL); 14046 } 14047 14048 /* Init the cdb from the given uscsi struct */ 14049 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp, 14050 uscmd->uscsi_cdb[0], 0, 0, 0); 14051 14052 SD_FILL_SCSI1_LUN(un, pktp); 14053 14054 /* 14055 * Set up the optional USCSI flags. See the uscsi(4I) man page 14056 * for listing of the supported flags. 14057 */ 14058 14059 if (uscmd->uscsi_flags & USCSI_SILENT) { 14060 flags |= FLAG_SILENT; 14061 } 14062 14063 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) { 14064 flags |= FLAG_DIAGNOSE; 14065 } 14066 14067 if (uscmd->uscsi_flags & USCSI_ISOLATE) { 14068 flags |= FLAG_ISOLATE; 14069 } 14070 14071 if (un->un_f_is_fibre == FALSE) { 14072 if (uscmd->uscsi_flags & USCSI_RENEGOT) { 14073 flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 14074 } 14075 } 14076 14077 /* 14078 * Set the pkt flags here so we save time later. 14079 * Note: These flags are NOT in the uscsi man page!!! 14080 */ 14081 if (uscmd->uscsi_flags & USCSI_HEAD) { 14082 flags |= FLAG_HEAD; 14083 } 14084 14085 if (uscmd->uscsi_flags & USCSI_NOINTR) { 14086 flags |= FLAG_NOINTR; 14087 } 14088 14089 /* 14090 * For tagged queueing, things get a bit complicated. 14091 * Check first for head of queue and last for ordered queue. 14092 * If neither head nor order, use the default driver tag flags. 14093 */ 14094 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) { 14095 if (uscmd->uscsi_flags & USCSI_HTAG) { 14096 flags |= FLAG_HTAG; 14097 } else if (uscmd->uscsi_flags & USCSI_OTAG) { 14098 flags |= FLAG_OTAG; 14099 } else { 14100 flags |= un->un_tagflags & FLAG_TAGMASK; 14101 } 14102 } 14103 14104 if (uscmd->uscsi_flags & USCSI_NODISCON) { 14105 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON; 14106 } 14107 14108 pktp->pkt_flags = flags; 14109 14110 /* Transfer uscsi information to scsi_pkt */ 14111 (void) scsi_uscsi_pktinit(uscmd, pktp); 14112 14113 /* Copy the caller's CDB into the pkt... */ 14114 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen); 14115 14116 if (uscmd->uscsi_timeout == 0) { 14117 pktp->pkt_time = un->un_uscsi_timeout; 14118 } else { 14119 pktp->pkt_time = uscmd->uscsi_timeout; 14120 } 14121 14122 /* need it later to identify USCSI request in sdintr */ 14123 xp->xb_pkt_flags |= SD_XB_USCSICMD; 14124 14125 xp->xb_sense_resid = uscmd->uscsi_rqresid; 14126 14127 pktp->pkt_private = bp; 14128 pktp->pkt_comp = sdintr; 14129 *pktpp = pktp; 14130 14131 SD_TRACE(SD_LOG_IO_CORE, un, 14132 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp); 14133 14134 return (SD_PKT_ALLOC_SUCCESS); 14135 } 14136 14137 14138 /* 14139 * Function: sd_destroypkt_for_uscsi 14140 * 14141 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi 14142 * IOs.. Also saves relevant info into the associated uscsi_cmd 14143 * struct. 14144 * 14145 * Context: May be called under interrupt context 14146 */ 14147 14148 static void 14149 sd_destroypkt_for_uscsi(struct buf *bp) 14150 { 14151 struct uscsi_cmd *uscmd; 14152 struct sd_xbuf *xp; 14153 struct scsi_pkt *pktp; 14154 struct sd_lun *un; 14155 struct sd_uscsi_info *suip; 14156 14157 ASSERT(bp != NULL); 14158 xp = SD_GET_XBUF(bp); 14159 ASSERT(xp != NULL); 14160 un = SD_GET_UN(bp); 14161 ASSERT(un != NULL); 14162 ASSERT(!mutex_owned(SD_MUTEX(un))); 14163 pktp = SD_GET_PKTP(bp); 14164 ASSERT(pktp != NULL); 14165 14166 SD_TRACE(SD_LOG_IO_CORE, un, 14167 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp); 14168 14169 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 14170 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 14171 ASSERT(uscmd != NULL); 14172 14173 /* Save the status and the residual into the uscsi_cmd struct */ 14174 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 14175 uscmd->uscsi_resid = bp->b_resid; 14176 14177 /* Transfer scsi_pkt information to uscsi */ 14178 (void) scsi_uscsi_pktfini(pktp, uscmd); 14179 14180 /* 14181 * If enabled, copy any saved sense data into the area specified 14182 * by the uscsi command. 14183 */ 14184 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 14185 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 14186 /* 14187 * Note: uscmd->uscsi_rqbuf should always point to a buffer 14188 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd()) 14189 */ 14190 uscmd->uscsi_rqstatus = xp->xb_sense_status; 14191 uscmd->uscsi_rqresid = xp->xb_sense_resid; 14192 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 14193 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 14194 MAX_SENSE_LENGTH); 14195 } else { 14196 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 14197 SENSE_LENGTH); 14198 } 14199 } 14200 /* 14201 * The following assignments are for SCSI FMA. 14202 */ 14203 ASSERT(xp->xb_private != NULL); 14204 suip = (struct sd_uscsi_info *)xp->xb_private; 14205 suip->ui_pkt_reason = pktp->pkt_reason; 14206 suip->ui_pkt_state = pktp->pkt_state; 14207 suip->ui_pkt_statistics = pktp->pkt_statistics; 14208 suip->ui_lba = (uint64_t)SD_GET_BLKNO(bp); 14209 14210 /* We are done with the scsi_pkt; free it now */ 14211 ASSERT(SD_GET_PKTP(bp) != NULL); 14212 scsi_destroy_pkt(SD_GET_PKTP(bp)); 14213 14214 SD_TRACE(SD_LOG_IO_CORE, un, 14215 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp); 14216 } 14217 14218 14219 /* 14220 * Function: sd_bioclone_alloc 14221 * 14222 * Description: Allocate a buf(9S) and init it as per the given buf 14223 * and the various arguments. The associated sd_xbuf 14224 * struct is (nearly) duplicated. The struct buf *bp 14225 * argument is saved in new_xp->xb_private. 14226 * 14227 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 14228 * datalen - size of data area for the shadow bp 14229 * blkno - starting LBA 14230 * func - function pointer for b_iodone in the shadow buf. (May 14231 * be NULL if none.) 14232 * 14233 * Return Code: Pointer to allocates buf(9S) struct 14234 * 14235 * Context: Can sleep. 14236 */ 14237 14238 static struct buf * 14239 sd_bioclone_alloc(struct buf *bp, size_t datalen, daddr_t blkno, 14240 int (*func)(struct buf *)) 14241 { 14242 struct sd_lun *un; 14243 struct sd_xbuf *xp; 14244 struct sd_xbuf *new_xp; 14245 struct buf *new_bp; 14246 14247 ASSERT(bp != NULL); 14248 xp = SD_GET_XBUF(bp); 14249 ASSERT(xp != NULL); 14250 un = SD_GET_UN(bp); 14251 ASSERT(un != NULL); 14252 ASSERT(!mutex_owned(SD_MUTEX(un))); 14253 14254 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func, 14255 NULL, KM_SLEEP); 14256 14257 new_bp->b_lblkno = blkno; 14258 14259 /* 14260 * Allocate an xbuf for the shadow bp and copy the contents of the 14261 * original xbuf into it. 14262 */ 14263 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 14264 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 14265 14266 /* 14267 * The given bp is automatically saved in the xb_private member 14268 * of the new xbuf. Callers are allowed to depend on this. 14269 */ 14270 new_xp->xb_private = bp; 14271 14272 new_bp->b_private = new_xp; 14273 14274 return (new_bp); 14275 } 14276 14277 /* 14278 * Function: sd_shadow_buf_alloc 14279 * 14280 * Description: Allocate a buf(9S) and init it as per the given buf 14281 * and the various arguments. The associated sd_xbuf 14282 * struct is (nearly) duplicated. The struct buf *bp 14283 * argument is saved in new_xp->xb_private. 14284 * 14285 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 14286 * datalen - size of data area for the shadow bp 14287 * bflags - B_READ or B_WRITE (pseudo flag) 14288 * blkno - starting LBA 14289 * func - function pointer for b_iodone in the shadow buf. (May 14290 * be NULL if none.) 14291 * 14292 * Return Code: Pointer to allocates buf(9S) struct 14293 * 14294 * Context: Can sleep. 14295 */ 14296 14297 static struct buf * 14298 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags, 14299 daddr_t blkno, int (*func)(struct buf *)) 14300 { 14301 struct sd_lun *un; 14302 struct sd_xbuf *xp; 14303 struct sd_xbuf *new_xp; 14304 struct buf *new_bp; 14305 14306 ASSERT(bp != NULL); 14307 xp = SD_GET_XBUF(bp); 14308 ASSERT(xp != NULL); 14309 un = SD_GET_UN(bp); 14310 ASSERT(un != NULL); 14311 ASSERT(!mutex_owned(SD_MUTEX(un))); 14312 14313 if (bp->b_flags & (B_PAGEIO | B_PHYS)) { 14314 bp_mapin(bp); 14315 } 14316 14317 bflags &= (B_READ | B_WRITE); 14318 #if defined(__x86) 14319 new_bp = getrbuf(KM_SLEEP); 14320 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP); 14321 new_bp->b_bcount = datalen; 14322 new_bp->b_flags = bflags | 14323 (bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW)); 14324 #else 14325 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL, 14326 datalen, bflags, SLEEP_FUNC, NULL); 14327 #endif 14328 new_bp->av_forw = NULL; 14329 new_bp->av_back = NULL; 14330 new_bp->b_dev = bp->b_dev; 14331 new_bp->b_blkno = blkno; 14332 new_bp->b_iodone = func; 14333 new_bp->b_edev = bp->b_edev; 14334 new_bp->b_resid = 0; 14335 14336 /* We need to preserve the B_FAILFAST flag */ 14337 if (bp->b_flags & B_FAILFAST) { 14338 new_bp->b_flags |= B_FAILFAST; 14339 } 14340 14341 /* 14342 * Allocate an xbuf for the shadow bp and copy the contents of the 14343 * original xbuf into it. 14344 */ 14345 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 14346 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 14347 14348 /* Need later to copy data between the shadow buf & original buf! */ 14349 new_xp->xb_pkt_flags |= PKT_CONSISTENT; 14350 14351 /* 14352 * The given bp is automatically saved in the xb_private member 14353 * of the new xbuf. Callers are allowed to depend on this. 14354 */ 14355 new_xp->xb_private = bp; 14356 14357 new_bp->b_private = new_xp; 14358 14359 return (new_bp); 14360 } 14361 14362 /* 14363 * Function: sd_bioclone_free 14364 * 14365 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations 14366 * in the larger than partition operation. 14367 * 14368 * Context: May be called under interrupt context 14369 */ 14370 14371 static void 14372 sd_bioclone_free(struct buf *bp) 14373 { 14374 struct sd_xbuf *xp; 14375 14376 ASSERT(bp != NULL); 14377 xp = SD_GET_XBUF(bp); 14378 ASSERT(xp != NULL); 14379 14380 /* 14381 * Call bp_mapout() before freeing the buf, in case a lower 14382 * layer or HBA had done a bp_mapin(). we must do this here 14383 * as we are the "originator" of the shadow buf. 14384 */ 14385 bp_mapout(bp); 14386 14387 /* 14388 * Null out b_iodone before freeing the bp, to ensure that the driver 14389 * never gets confused by a stale value in this field. (Just a little 14390 * extra defensiveness here.) 14391 */ 14392 bp->b_iodone = NULL; 14393 14394 freerbuf(bp); 14395 14396 kmem_free(xp, sizeof (struct sd_xbuf)); 14397 } 14398 14399 /* 14400 * Function: sd_shadow_buf_free 14401 * 14402 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations. 14403 * 14404 * Context: May be called under interrupt context 14405 */ 14406 14407 static void 14408 sd_shadow_buf_free(struct buf *bp) 14409 { 14410 struct sd_xbuf *xp; 14411 14412 ASSERT(bp != NULL); 14413 xp = SD_GET_XBUF(bp); 14414 ASSERT(xp != NULL); 14415 14416 #if defined(__sparc) 14417 /* 14418 * Call bp_mapout() before freeing the buf, in case a lower 14419 * layer or HBA had done a bp_mapin(). we must do this here 14420 * as we are the "originator" of the shadow buf. 14421 */ 14422 bp_mapout(bp); 14423 #endif 14424 14425 /* 14426 * Null out b_iodone before freeing the bp, to ensure that the driver 14427 * never gets confused by a stale value in this field. (Just a little 14428 * extra defensiveness here.) 14429 */ 14430 bp->b_iodone = NULL; 14431 14432 #if defined(__x86) 14433 kmem_free(bp->b_un.b_addr, bp->b_bcount); 14434 freerbuf(bp); 14435 #else 14436 scsi_free_consistent_buf(bp); 14437 #endif 14438 14439 kmem_free(xp, sizeof (struct sd_xbuf)); 14440 } 14441 14442 14443 /* 14444 * Function: sd_print_transport_rejected_message 14445 * 14446 * Description: This implements the ludicrously complex rules for printing 14447 * a "transport rejected" message. This is to address the 14448 * specific problem of having a flood of this error message 14449 * produced when a failover occurs. 14450 * 14451 * Context: Any. 14452 */ 14453 14454 static void 14455 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp, 14456 int code) 14457 { 14458 ASSERT(un != NULL); 14459 ASSERT(mutex_owned(SD_MUTEX(un))); 14460 ASSERT(xp != NULL); 14461 14462 /* 14463 * Print the "transport rejected" message under the following 14464 * conditions: 14465 * 14466 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set 14467 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR. 14468 * - If the error code IS a TRAN_FATAL_ERROR, then the message is 14469 * printed the FIRST time a TRAN_FATAL_ERROR is returned from 14470 * scsi_transport(9F) (which indicates that the target might have 14471 * gone off-line). This uses the un->un_tran_fatal_count 14472 * count, which is incremented whenever a TRAN_FATAL_ERROR is 14473 * received, and reset to zero whenver a TRAN_ACCEPT is returned 14474 * from scsi_transport(). 14475 * 14476 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of 14477 * the preceeding cases in order for the message to be printed. 14478 */ 14479 if (((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) && 14480 (SD_FM_LOG(un) == SD_FM_LOG_NSUP)) { 14481 if ((sd_level_mask & SD_LOGMASK_DIAG) || 14482 (code != TRAN_FATAL_ERROR) || 14483 (un->un_tran_fatal_count == 1)) { 14484 switch (code) { 14485 case TRAN_BADPKT: 14486 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14487 "transport rejected bad packet\n"); 14488 break; 14489 case TRAN_FATAL_ERROR: 14490 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14491 "transport rejected fatal error\n"); 14492 break; 14493 default: 14494 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14495 "transport rejected (%d)\n", code); 14496 break; 14497 } 14498 } 14499 } 14500 } 14501 14502 14503 /* 14504 * Function: sd_add_buf_to_waitq 14505 * 14506 * Description: Add the given buf(9S) struct to the wait queue for the 14507 * instance. If sorting is enabled, then the buf is added 14508 * to the queue via an elevator sort algorithm (a la 14509 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key. 14510 * If sorting is not enabled, then the buf is just added 14511 * to the end of the wait queue. 14512 * 14513 * Return Code: void 14514 * 14515 * Context: Does not sleep/block, therefore technically can be called 14516 * from any context. However if sorting is enabled then the 14517 * execution time is indeterminate, and may take long if 14518 * the wait queue grows large. 14519 */ 14520 14521 static void 14522 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp) 14523 { 14524 struct buf *ap; 14525 14526 ASSERT(bp != NULL); 14527 ASSERT(un != NULL); 14528 ASSERT(mutex_owned(SD_MUTEX(un))); 14529 14530 /* If the queue is empty, add the buf as the only entry & return. */ 14531 if (un->un_waitq_headp == NULL) { 14532 ASSERT(un->un_waitq_tailp == NULL); 14533 un->un_waitq_headp = un->un_waitq_tailp = bp; 14534 bp->av_forw = NULL; 14535 return; 14536 } 14537 14538 ASSERT(un->un_waitq_tailp != NULL); 14539 14540 /* 14541 * If sorting is disabled, just add the buf to the tail end of 14542 * the wait queue and return. 14543 */ 14544 if (un->un_f_disksort_disabled || un->un_f_enable_rmw) { 14545 un->un_waitq_tailp->av_forw = bp; 14546 un->un_waitq_tailp = bp; 14547 bp->av_forw = NULL; 14548 return; 14549 } 14550 14551 /* 14552 * Sort thru the list of requests currently on the wait queue 14553 * and add the new buf request at the appropriate position. 14554 * 14555 * The un->un_waitq_headp is an activity chain pointer on which 14556 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The 14557 * first queue holds those requests which are positioned after 14558 * the current SD_GET_BLKNO() (in the first request); the second holds 14559 * requests which came in after their SD_GET_BLKNO() number was passed. 14560 * Thus we implement a one way scan, retracting after reaching 14561 * the end of the drive to the first request on the second 14562 * queue, at which time it becomes the first queue. 14563 * A one-way scan is natural because of the way UNIX read-ahead 14564 * blocks are allocated. 14565 * 14566 * If we lie after the first request, then we must locate the 14567 * second request list and add ourselves to it. 14568 */ 14569 ap = un->un_waitq_headp; 14570 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) { 14571 while (ap->av_forw != NULL) { 14572 /* 14573 * Look for an "inversion" in the (normally 14574 * ascending) block numbers. This indicates 14575 * the start of the second request list. 14576 */ 14577 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) { 14578 /* 14579 * Search the second request list for the 14580 * first request at a larger block number. 14581 * We go before that; however if there is 14582 * no such request, we go at the end. 14583 */ 14584 do { 14585 if (SD_GET_BLKNO(bp) < 14586 SD_GET_BLKNO(ap->av_forw)) { 14587 goto insert; 14588 } 14589 ap = ap->av_forw; 14590 } while (ap->av_forw != NULL); 14591 goto insert; /* after last */ 14592 } 14593 ap = ap->av_forw; 14594 } 14595 14596 /* 14597 * No inversions... we will go after the last, and 14598 * be the first request in the second request list. 14599 */ 14600 goto insert; 14601 } 14602 14603 /* 14604 * Request is at/after the current request... 14605 * sort in the first request list. 14606 */ 14607 while (ap->av_forw != NULL) { 14608 /* 14609 * We want to go after the current request (1) if 14610 * there is an inversion after it (i.e. it is the end 14611 * of the first request list), or (2) if the next 14612 * request is a larger block no. than our request. 14613 */ 14614 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) || 14615 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) { 14616 goto insert; 14617 } 14618 ap = ap->av_forw; 14619 } 14620 14621 /* 14622 * Neither a second list nor a larger request, therefore 14623 * we go at the end of the first list (which is the same 14624 * as the end of the whole schebang). 14625 */ 14626 insert: 14627 bp->av_forw = ap->av_forw; 14628 ap->av_forw = bp; 14629 14630 /* 14631 * If we inserted onto the tail end of the waitq, make sure the 14632 * tail pointer is updated. 14633 */ 14634 if (ap == un->un_waitq_tailp) { 14635 un->un_waitq_tailp = bp; 14636 } 14637 } 14638 14639 14640 /* 14641 * Function: sd_start_cmds 14642 * 14643 * Description: Remove and transport cmds from the driver queues. 14644 * 14645 * Arguments: un - pointer to the unit (soft state) struct for the target. 14646 * 14647 * immed_bp - ptr to a buf to be transported immediately. Only 14648 * the immed_bp is transported; bufs on the waitq are not 14649 * processed and the un_retry_bp is not checked. If immed_bp is 14650 * NULL, then normal queue processing is performed. 14651 * 14652 * Context: May be called from kernel thread context, interrupt context, 14653 * or runout callback context. This function may not block or 14654 * call routines that block. 14655 */ 14656 14657 static void 14658 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp) 14659 { 14660 struct sd_xbuf *xp; 14661 struct buf *bp; 14662 void (*statp)(kstat_io_t *); 14663 #if defined(__x86) /* DMAFREE for x86 only */ 14664 void (*saved_statp)(kstat_io_t *); 14665 #endif 14666 int rval; 14667 struct sd_fm_internal *sfip = NULL; 14668 14669 ASSERT(un != NULL); 14670 ASSERT(mutex_owned(SD_MUTEX(un))); 14671 ASSERT(un->un_ncmds_in_transport >= 0); 14672 ASSERT(un->un_throttle >= 0); 14673 14674 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); 14675 14676 do { 14677 #if defined(__x86) /* DMAFREE for x86 only */ 14678 saved_statp = NULL; 14679 #endif 14680 14681 /* 14682 * If we are syncing or dumping, fail the command to 14683 * avoid recursively calling back into scsi_transport(). 14684 * The dump I/O itself uses a separate code path so this 14685 * only prevents non-dump I/O from being sent while dumping. 14686 * File system sync takes place before dumping begins. 14687 * During panic, filesystem I/O is allowed provided 14688 * un_in_callback is <= 1. This is to prevent recursion 14689 * such as sd_start_cmds -> scsi_transport -> sdintr -> 14690 * sd_start_cmds and so on. See panic.c for more information 14691 * about the states the system can be in during panic. 14692 */ 14693 if ((un->un_state == SD_STATE_DUMPING) || 14694 (ddi_in_panic() && (un->un_in_callback > 1))) { 14695 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14696 "sd_start_cmds: panicking\n"); 14697 goto exit; 14698 } 14699 14700 if ((bp = immed_bp) != NULL) { 14701 /* 14702 * We have a bp that must be transported immediately. 14703 * It's OK to transport the immed_bp here without doing 14704 * the throttle limit check because the immed_bp is 14705 * always used in a retry/recovery case. This means 14706 * that we know we are not at the throttle limit by 14707 * virtue of the fact that to get here we must have 14708 * already gotten a command back via sdintr(). This also 14709 * relies on (1) the command on un_retry_bp preventing 14710 * further commands from the waitq from being issued; 14711 * and (2) the code in sd_retry_command checking the 14712 * throttle limit before issuing a delayed or immediate 14713 * retry. This holds even if the throttle limit is 14714 * currently ratcheted down from its maximum value. 14715 */ 14716 statp = kstat_runq_enter; 14717 if (bp == un->un_retry_bp) { 14718 ASSERT((un->un_retry_statp == NULL) || 14719 (un->un_retry_statp == kstat_waitq_enter) || 14720 (un->un_retry_statp == 14721 kstat_runq_back_to_waitq)); 14722 /* 14723 * If the waitq kstat was incremented when 14724 * sd_set_retry_bp() queued this bp for a retry, 14725 * then we must set up statp so that the waitq 14726 * count will get decremented correctly below. 14727 * Also we must clear un->un_retry_statp to 14728 * ensure that we do not act on a stale value 14729 * in this field. 14730 */ 14731 if ((un->un_retry_statp == kstat_waitq_enter) || 14732 (un->un_retry_statp == 14733 kstat_runq_back_to_waitq)) { 14734 statp = kstat_waitq_to_runq; 14735 } 14736 #if defined(__x86) /* DMAFREE for x86 only */ 14737 saved_statp = un->un_retry_statp; 14738 #endif 14739 un->un_retry_statp = NULL; 14740 14741 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14742 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p " 14743 "un_throttle:%d un_ncmds_in_transport:%d\n", 14744 un, un->un_retry_bp, un->un_throttle, 14745 un->un_ncmds_in_transport); 14746 } else { 14747 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: " 14748 "processing priority bp:0x%p\n", bp); 14749 } 14750 14751 } else if ((bp = un->un_waitq_headp) != NULL) { 14752 /* 14753 * A command on the waitq is ready to go, but do not 14754 * send it if: 14755 * 14756 * (1) the throttle limit has been reached, or 14757 * (2) a retry is pending, or 14758 * (3) a START_STOP_UNIT callback pending, or 14759 * (4) a callback for a SD_PATH_DIRECT_PRIORITY 14760 * command is pending. 14761 * 14762 * For all of these conditions, IO processing will 14763 * restart after the condition is cleared. 14764 */ 14765 if (un->un_ncmds_in_transport >= un->un_throttle) { 14766 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14767 "sd_start_cmds: exiting, " 14768 "throttle limit reached!\n"); 14769 goto exit; 14770 } 14771 if (un->un_retry_bp != NULL) { 14772 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14773 "sd_start_cmds: exiting, retry pending!\n"); 14774 goto exit; 14775 } 14776 if (un->un_startstop_timeid != NULL) { 14777 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14778 "sd_start_cmds: exiting, " 14779 "START_STOP pending!\n"); 14780 goto exit; 14781 } 14782 if (un->un_direct_priority_timeid != NULL) { 14783 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14784 "sd_start_cmds: exiting, " 14785 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n"); 14786 goto exit; 14787 } 14788 14789 /* Dequeue the command */ 14790 un->un_waitq_headp = bp->av_forw; 14791 if (un->un_waitq_headp == NULL) { 14792 un->un_waitq_tailp = NULL; 14793 } 14794 bp->av_forw = NULL; 14795 statp = kstat_waitq_to_runq; 14796 SD_TRACE(SD_LOG_IO_CORE, un, 14797 "sd_start_cmds: processing waitq bp:0x%p\n", bp); 14798 14799 } else { 14800 /* No work to do so bail out now */ 14801 SD_TRACE(SD_LOG_IO_CORE, un, 14802 "sd_start_cmds: no more work, exiting!\n"); 14803 goto exit; 14804 } 14805 14806 /* 14807 * Reset the state to normal. This is the mechanism by which 14808 * the state transitions from either SD_STATE_RWAIT or 14809 * SD_STATE_OFFLINE to SD_STATE_NORMAL. 14810 * If state is SD_STATE_PM_CHANGING then this command is 14811 * part of the device power control and the state must 14812 * not be put back to normal. Doing so would would 14813 * allow new commands to proceed when they shouldn't, 14814 * the device may be going off. 14815 */ 14816 if ((un->un_state != SD_STATE_SUSPENDED) && 14817 (un->un_state != SD_STATE_PM_CHANGING)) { 14818 New_state(un, SD_STATE_NORMAL); 14819 } 14820 14821 xp = SD_GET_XBUF(bp); 14822 ASSERT(xp != NULL); 14823 14824 #if defined(__x86) /* DMAFREE for x86 only */ 14825 /* 14826 * Allocate the scsi_pkt if we need one, or attach DMA 14827 * resources if we have a scsi_pkt that needs them. The 14828 * latter should only occur for commands that are being 14829 * retried. 14830 */ 14831 if ((xp->xb_pktp == NULL) || 14832 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { 14833 #else 14834 if (xp->xb_pktp == NULL) { 14835 #endif 14836 /* 14837 * There is no scsi_pkt allocated for this buf. Call 14838 * the initpkt function to allocate & init one. 14839 * 14840 * The scsi_init_pkt runout callback functionality is 14841 * implemented as follows: 14842 * 14843 * 1) The initpkt function always calls 14844 * scsi_init_pkt(9F) with sdrunout specified as the 14845 * callback routine. 14846 * 2) A successful packet allocation is initialized and 14847 * the I/O is transported. 14848 * 3) The I/O associated with an allocation resource 14849 * failure is left on its queue to be retried via 14850 * runout or the next I/O. 14851 * 4) The I/O associated with a DMA error is removed 14852 * from the queue and failed with EIO. Processing of 14853 * the transport queues is also halted to be 14854 * restarted via runout or the next I/O. 14855 * 5) The I/O associated with a CDB size or packet 14856 * size error is removed from the queue and failed 14857 * with EIO. Processing of the transport queues is 14858 * continued. 14859 * 14860 * Note: there is no interface for canceling a runout 14861 * callback. To prevent the driver from detaching or 14862 * suspending while a runout is pending the driver 14863 * state is set to SD_STATE_RWAIT 14864 * 14865 * Note: using the scsi_init_pkt callback facility can 14866 * result in an I/O request persisting at the head of 14867 * the list which cannot be satisfied even after 14868 * multiple retries. In the future the driver may 14869 * implement some kind of maximum runout count before 14870 * failing an I/O. 14871 * 14872 * Note: the use of funcp below may seem superfluous, 14873 * but it helps warlock figure out the correct 14874 * initpkt function calls (see [s]sd.wlcmd). 14875 */ 14876 struct scsi_pkt *pktp; 14877 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp); 14878 14879 ASSERT(bp != un->un_rqs_bp); 14880 14881 funcp = sd_initpkt_map[xp->xb_chain_iostart]; 14882 switch ((*funcp)(bp, &pktp)) { 14883 case SD_PKT_ALLOC_SUCCESS: 14884 xp->xb_pktp = pktp; 14885 SD_TRACE(SD_LOG_IO_CORE, un, 14886 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n", 14887 pktp); 14888 goto got_pkt; 14889 14890 case SD_PKT_ALLOC_FAILURE: 14891 /* 14892 * Temporary (hopefully) resource depletion. 14893 * Since retries and RQS commands always have a 14894 * scsi_pkt allocated, these cases should never 14895 * get here. So the only cases this needs to 14896 * handle is a bp from the waitq (which we put 14897 * back onto the waitq for sdrunout), or a bp 14898 * sent as an immed_bp (which we just fail). 14899 */ 14900 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14901 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n"); 14902 14903 #if defined(__x86) /* DMAFREE for x86 only */ 14904 14905 if (bp == immed_bp) { 14906 /* 14907 * If SD_XB_DMA_FREED is clear, then 14908 * this is a failure to allocate a 14909 * scsi_pkt, and we must fail the 14910 * command. 14911 */ 14912 if ((xp->xb_pkt_flags & 14913 SD_XB_DMA_FREED) == 0) { 14914 break; 14915 } 14916 14917 /* 14918 * If this immediate command is NOT our 14919 * un_retry_bp, then we must fail it. 14920 */ 14921 if (bp != un->un_retry_bp) { 14922 break; 14923 } 14924 14925 /* 14926 * We get here if this cmd is our 14927 * un_retry_bp that was DMAFREED, but 14928 * scsi_init_pkt() failed to reallocate 14929 * DMA resources when we attempted to 14930 * retry it. This can happen when an 14931 * mpxio failover is in progress, but 14932 * we don't want to just fail the 14933 * command in this case. 14934 * 14935 * Use timeout(9F) to restart it after 14936 * a 100ms delay. We don't want to 14937 * let sdrunout() restart it, because 14938 * sdrunout() is just supposed to start 14939 * commands that are sitting on the 14940 * wait queue. The un_retry_bp stays 14941 * set until the command completes, but 14942 * sdrunout can be called many times 14943 * before that happens. Since sdrunout 14944 * cannot tell if the un_retry_bp is 14945 * already in the transport, it could 14946 * end up calling scsi_transport() for 14947 * the un_retry_bp multiple times. 14948 * 14949 * Also: don't schedule the callback 14950 * if some other callback is already 14951 * pending. 14952 */ 14953 if (un->un_retry_statp == NULL) { 14954 /* 14955 * restore the kstat pointer to 14956 * keep kstat counts coherent 14957 * when we do retry the command. 14958 */ 14959 un->un_retry_statp = 14960 saved_statp; 14961 } 14962 14963 if ((un->un_startstop_timeid == NULL) && 14964 (un->un_retry_timeid == NULL) && 14965 (un->un_direct_priority_timeid == 14966 NULL)) { 14967 14968 un->un_retry_timeid = 14969 timeout( 14970 sd_start_retry_command, 14971 un, SD_RESTART_TIMEOUT); 14972 } 14973 goto exit; 14974 } 14975 14976 #else 14977 if (bp == immed_bp) { 14978 break; /* Just fail the command */ 14979 } 14980 #endif 14981 14982 /* Add the buf back to the head of the waitq */ 14983 bp->av_forw = un->un_waitq_headp; 14984 un->un_waitq_headp = bp; 14985 if (un->un_waitq_tailp == NULL) { 14986 un->un_waitq_tailp = bp; 14987 } 14988 goto exit; 14989 14990 case SD_PKT_ALLOC_FAILURE_NO_DMA: 14991 /* 14992 * HBA DMA resource failure. Fail the command 14993 * and continue processing of the queues. 14994 */ 14995 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14996 "sd_start_cmds: " 14997 "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); 14998 break; 14999 15000 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: 15001 /* 15002 * Note:x86: Partial DMA mapping not supported 15003 * for USCSI commands, and all the needed DMA 15004 * resources were not allocated. 15005 */ 15006 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15007 "sd_start_cmds: " 15008 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); 15009 break; 15010 15011 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: 15012 /* 15013 * Note:x86: Request cannot fit into CDB based 15014 * on lba and len. 15015 */ 15016 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15017 "sd_start_cmds: " 15018 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); 15019 break; 15020 15021 default: 15022 /* Should NEVER get here! */ 15023 panic("scsi_initpkt error"); 15024 /*NOTREACHED*/ 15025 } 15026 15027 /* 15028 * Fatal error in allocating a scsi_pkt for this buf. 15029 * Update kstats & return the buf with an error code. 15030 * We must use sd_return_failed_command_no_restart() to 15031 * avoid a recursive call back into sd_start_cmds(). 15032 * However this also means that we must keep processing 15033 * the waitq here in order to avoid stalling. 15034 */ 15035 if (statp == kstat_waitq_to_runq) { 15036 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 15037 } 15038 sd_return_failed_command_no_restart(un, bp, EIO); 15039 if (bp == immed_bp) { 15040 /* immed_bp is gone by now, so clear this */ 15041 immed_bp = NULL; 15042 } 15043 continue; 15044 } 15045 got_pkt: 15046 if (bp == immed_bp) { 15047 /* goto the head of the class.... */ 15048 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 15049 } 15050 15051 un->un_ncmds_in_transport++; 15052 SD_UPDATE_KSTATS(un, statp, bp); 15053 15054 /* 15055 * Call scsi_transport() to send the command to the target. 15056 * According to SCSA architecture, we must drop the mutex here 15057 * before calling scsi_transport() in order to avoid deadlock. 15058 * Note that the scsi_pkt's completion routine can be executed 15059 * (from interrupt context) even before the call to 15060 * scsi_transport() returns. 15061 */ 15062 SD_TRACE(SD_LOG_IO_CORE, un, 15063 "sd_start_cmds: calling scsi_transport()\n"); 15064 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp); 15065 15066 mutex_exit(SD_MUTEX(un)); 15067 rval = scsi_transport(xp->xb_pktp); 15068 mutex_enter(SD_MUTEX(un)); 15069 15070 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15071 "sd_start_cmds: scsi_transport() returned %d\n", rval); 15072 15073 switch (rval) { 15074 case TRAN_ACCEPT: 15075 /* Clear this with every pkt accepted by the HBA */ 15076 un->un_tran_fatal_count = 0; 15077 break; /* Success; try the next cmd (if any) */ 15078 15079 case TRAN_BUSY: 15080 un->un_ncmds_in_transport--; 15081 ASSERT(un->un_ncmds_in_transport >= 0); 15082 15083 /* 15084 * Don't retry request sense, the sense data 15085 * is lost when another request is sent. 15086 * Free up the rqs buf and retry 15087 * the original failed cmd. Update kstat. 15088 */ 15089 if (bp == un->un_rqs_bp) { 15090 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 15091 bp = sd_mark_rqs_idle(un, xp); 15092 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 15093 NULL, NULL, EIO, un->un_busy_timeout / 500, 15094 kstat_waitq_enter); 15095 goto exit; 15096 } 15097 15098 #if defined(__x86) /* DMAFREE for x86 only */ 15099 /* 15100 * Free the DMA resources for the scsi_pkt. This will 15101 * allow mpxio to select another path the next time 15102 * we call scsi_transport() with this scsi_pkt. 15103 * See sdintr() for the rationalization behind this. 15104 */ 15105 if ((un->un_f_is_fibre == TRUE) && 15106 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 15107 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) { 15108 scsi_dmafree(xp->xb_pktp); 15109 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 15110 } 15111 #endif 15112 15113 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) { 15114 /* 15115 * Commands that are SD_PATH_DIRECT_PRIORITY 15116 * are for error recovery situations. These do 15117 * not use the normal command waitq, so if they 15118 * get a TRAN_BUSY we cannot put them back onto 15119 * the waitq for later retry. One possible 15120 * problem is that there could already be some 15121 * other command on un_retry_bp that is waiting 15122 * for this one to complete, so we would be 15123 * deadlocked if we put this command back onto 15124 * the waitq for later retry (since un_retry_bp 15125 * must complete before the driver gets back to 15126 * commands on the waitq). 15127 * 15128 * To avoid deadlock we must schedule a callback 15129 * that will restart this command after a set 15130 * interval. This should keep retrying for as 15131 * long as the underlying transport keeps 15132 * returning TRAN_BUSY (just like for other 15133 * commands). Use the same timeout interval as 15134 * for the ordinary TRAN_BUSY retry. 15135 */ 15136 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15137 "sd_start_cmds: scsi_transport() returned " 15138 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n"); 15139 15140 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 15141 un->un_direct_priority_timeid = 15142 timeout(sd_start_direct_priority_command, 15143 bp, un->un_busy_timeout / 500); 15144 15145 goto exit; 15146 } 15147 15148 /* 15149 * For TRAN_BUSY, we want to reduce the throttle value, 15150 * unless we are retrying a command. 15151 */ 15152 if (bp != un->un_retry_bp) { 15153 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); 15154 } 15155 15156 /* 15157 * Set up the bp to be tried again 10 ms later. 15158 * Note:x86: Is there a timeout value in the sd_lun 15159 * for this condition? 15160 */ 15161 sd_set_retry_bp(un, bp, un->un_busy_timeout / 500, 15162 kstat_runq_back_to_waitq); 15163 goto exit; 15164 15165 case TRAN_FATAL_ERROR: 15166 un->un_tran_fatal_count++; 15167 /* FALLTHRU */ 15168 15169 case TRAN_BADPKT: 15170 default: 15171 un->un_ncmds_in_transport--; 15172 ASSERT(un->un_ncmds_in_transport >= 0); 15173 15174 /* 15175 * If this is our REQUEST SENSE command with a 15176 * transport error, we must get back the pointers 15177 * to the original buf, and mark the REQUEST 15178 * SENSE command as "available". 15179 */ 15180 if (bp == un->un_rqs_bp) { 15181 bp = sd_mark_rqs_idle(un, xp); 15182 xp = SD_GET_XBUF(bp); 15183 } else { 15184 /* 15185 * Legacy behavior: do not update transport 15186 * error count for request sense commands. 15187 */ 15188 SD_UPDATE_ERRSTATS(un, sd_transerrs); 15189 } 15190 15191 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 15192 sd_print_transport_rejected_message(un, xp, rval); 15193 15194 /* 15195 * This command will be terminated by SD driver due 15196 * to a fatal transport error. We should post 15197 * ereport.io.scsi.cmd.disk.tran with driver-assessment 15198 * of "fail" for any command to indicate this 15199 * situation. 15200 */ 15201 if (xp->xb_ena > 0) { 15202 ASSERT(un->un_fm_private != NULL); 15203 sfip = un->un_fm_private; 15204 sfip->fm_ssc.ssc_flags |= SSC_FLAGS_TRAN_ABORT; 15205 sd_ssc_extract_info(&sfip->fm_ssc, un, 15206 xp->xb_pktp, bp, xp); 15207 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL); 15208 } 15209 15210 /* 15211 * We must use sd_return_failed_command_no_restart() to 15212 * avoid a recursive call back into sd_start_cmds(). 15213 * However this also means that we must keep processing 15214 * the waitq here in order to avoid stalling. 15215 */ 15216 sd_return_failed_command_no_restart(un, bp, EIO); 15217 15218 /* 15219 * Notify any threads waiting in sd_ddi_suspend() that 15220 * a command completion has occurred. 15221 */ 15222 if (un->un_state == SD_STATE_SUSPENDED) { 15223 cv_broadcast(&un->un_disk_busy_cv); 15224 } 15225 15226 if (bp == immed_bp) { 15227 /* immed_bp is gone by now, so clear this */ 15228 immed_bp = NULL; 15229 } 15230 break; 15231 } 15232 15233 } while (immed_bp == NULL); 15234 15235 exit: 15236 ASSERT(mutex_owned(SD_MUTEX(un))); 15237 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n"); 15238 } 15239 15240 15241 /* 15242 * Function: sd_return_command 15243 * 15244 * Description: Returns a command to its originator (with or without an 15245 * error). Also starts commands waiting to be transported 15246 * to the target. 15247 * 15248 * Context: May be called from interrupt, kernel, or timeout context 15249 */ 15250 15251 static void 15252 sd_return_command(struct sd_lun *un, struct buf *bp) 15253 { 15254 struct sd_xbuf *xp; 15255 struct scsi_pkt *pktp; 15256 struct sd_fm_internal *sfip; 15257 15258 ASSERT(bp != NULL); 15259 ASSERT(un != NULL); 15260 ASSERT(mutex_owned(SD_MUTEX(un))); 15261 ASSERT(bp != un->un_rqs_bp); 15262 xp = SD_GET_XBUF(bp); 15263 ASSERT(xp != NULL); 15264 15265 pktp = SD_GET_PKTP(bp); 15266 sfip = (struct sd_fm_internal *)un->un_fm_private; 15267 ASSERT(sfip != NULL); 15268 15269 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n"); 15270 15271 /* 15272 * Note: check for the "sdrestart failed" case. 15273 */ 15274 if ((un->un_partial_dma_supported == 1) && 15275 ((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) && 15276 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) && 15277 (xp->xb_pktp->pkt_resid == 0)) { 15278 15279 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) { 15280 /* 15281 * Successfully set up next portion of cmd 15282 * transfer, try sending it 15283 */ 15284 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 15285 NULL, NULL, 0, (clock_t)0, NULL); 15286 sd_start_cmds(un, NULL); 15287 return; /* Note:x86: need a return here? */ 15288 } 15289 } 15290 15291 /* 15292 * If this is the failfast bp, clear it from un_failfast_bp. This 15293 * can happen if upon being re-tried the failfast bp either 15294 * succeeded or encountered another error (possibly even a different 15295 * error than the one that precipitated the failfast state, but in 15296 * that case it would have had to exhaust retries as well). Regardless, 15297 * this should not occur whenever the instance is in the active 15298 * failfast state. 15299 */ 15300 if (bp == un->un_failfast_bp) { 15301 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 15302 un->un_failfast_bp = NULL; 15303 } 15304 15305 /* 15306 * Clear the failfast state upon successful completion of ANY cmd. 15307 */ 15308 if (bp->b_error == 0) { 15309 un->un_failfast_state = SD_FAILFAST_INACTIVE; 15310 /* 15311 * If this is a successful command, but used to be retried, 15312 * we will take it as a recovered command and post an 15313 * ereport with driver-assessment of "recovered". 15314 */ 15315 if (xp->xb_ena > 0) { 15316 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 15317 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RECOVERY); 15318 } 15319 } else { 15320 /* 15321 * If this is a failed non-USCSI command we will post an 15322 * ereport with driver-assessment set accordingly("fail" or 15323 * "fatal"). 15324 */ 15325 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 15326 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 15327 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL); 15328 } 15329 } 15330 15331 /* 15332 * This is used if the command was retried one or more times. Show that 15333 * we are done with it, and allow processing of the waitq to resume. 15334 */ 15335 if (bp == un->un_retry_bp) { 15336 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15337 "sd_return_command: un:0x%p: " 15338 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 15339 un->un_retry_bp = NULL; 15340 un->un_retry_statp = NULL; 15341 } 15342 15343 SD_UPDATE_RDWR_STATS(un, bp); 15344 SD_UPDATE_PARTITION_STATS(un, bp); 15345 15346 switch (un->un_state) { 15347 case SD_STATE_SUSPENDED: 15348 /* 15349 * Notify any threads waiting in sd_ddi_suspend() that 15350 * a command completion has occurred. 15351 */ 15352 cv_broadcast(&un->un_disk_busy_cv); 15353 break; 15354 default: 15355 sd_start_cmds(un, NULL); 15356 break; 15357 } 15358 15359 /* Return this command up the iodone chain to its originator. */ 15360 mutex_exit(SD_MUTEX(un)); 15361 15362 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 15363 xp->xb_pktp = NULL; 15364 15365 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 15366 15367 ASSERT(!mutex_owned(SD_MUTEX(un))); 15368 mutex_enter(SD_MUTEX(un)); 15369 15370 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n"); 15371 } 15372 15373 15374 /* 15375 * Function: sd_return_failed_command 15376 * 15377 * Description: Command completion when an error occurred. 15378 * 15379 * Context: May be called from interrupt context 15380 */ 15381 15382 static void 15383 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode) 15384 { 15385 ASSERT(bp != NULL); 15386 ASSERT(un != NULL); 15387 ASSERT(mutex_owned(SD_MUTEX(un))); 15388 15389 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15390 "sd_return_failed_command: entry\n"); 15391 15392 /* 15393 * b_resid could already be nonzero due to a partial data 15394 * transfer, so do not change it here. 15395 */ 15396 SD_BIOERROR(bp, errcode); 15397 15398 sd_return_command(un, bp); 15399 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15400 "sd_return_failed_command: exit\n"); 15401 } 15402 15403 15404 /* 15405 * Function: sd_return_failed_command_no_restart 15406 * 15407 * Description: Same as sd_return_failed_command, but ensures that no 15408 * call back into sd_start_cmds will be issued. 15409 * 15410 * Context: May be called from interrupt context 15411 */ 15412 15413 static void 15414 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp, 15415 int errcode) 15416 { 15417 struct sd_xbuf *xp; 15418 15419 ASSERT(bp != NULL); 15420 ASSERT(un != NULL); 15421 ASSERT(mutex_owned(SD_MUTEX(un))); 15422 xp = SD_GET_XBUF(bp); 15423 ASSERT(xp != NULL); 15424 ASSERT(errcode != 0); 15425 15426 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15427 "sd_return_failed_command_no_restart: entry\n"); 15428 15429 /* 15430 * b_resid could already be nonzero due to a partial data 15431 * transfer, so do not change it here. 15432 */ 15433 SD_BIOERROR(bp, errcode); 15434 15435 /* 15436 * If this is the failfast bp, clear it. This can happen if the 15437 * failfast bp encounterd a fatal error when we attempted to 15438 * re-try it (such as a scsi_transport(9F) failure). However 15439 * we should NOT be in an active failfast state if the failfast 15440 * bp is not NULL. 15441 */ 15442 if (bp == un->un_failfast_bp) { 15443 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 15444 un->un_failfast_bp = NULL; 15445 } 15446 15447 if (bp == un->un_retry_bp) { 15448 /* 15449 * This command was retried one or more times. Show that we are 15450 * done with it, and allow processing of the waitq to resume. 15451 */ 15452 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15453 "sd_return_failed_command_no_restart: " 15454 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 15455 un->un_retry_bp = NULL; 15456 un->un_retry_statp = NULL; 15457 } 15458 15459 SD_UPDATE_RDWR_STATS(un, bp); 15460 SD_UPDATE_PARTITION_STATS(un, bp); 15461 15462 mutex_exit(SD_MUTEX(un)); 15463 15464 if (xp->xb_pktp != NULL) { 15465 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 15466 xp->xb_pktp = NULL; 15467 } 15468 15469 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 15470 15471 mutex_enter(SD_MUTEX(un)); 15472 15473 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15474 "sd_return_failed_command_no_restart: exit\n"); 15475 } 15476 15477 15478 /* 15479 * Function: sd_retry_command 15480 * 15481 * Description: queue up a command for retry, or (optionally) fail it 15482 * if retry counts are exhausted. 15483 * 15484 * Arguments: un - Pointer to the sd_lun struct for the target. 15485 * 15486 * bp - Pointer to the buf for the command to be retried. 15487 * 15488 * retry_check_flag - Flag to see which (if any) of the retry 15489 * counts should be decremented/checked. If the indicated 15490 * retry count is exhausted, then the command will not be 15491 * retried; it will be failed instead. This should use a 15492 * value equal to one of the following: 15493 * 15494 * SD_RETRIES_NOCHECK 15495 * SD_RESD_RETRIES_STANDARD 15496 * SD_RETRIES_VICTIM 15497 * 15498 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE 15499 * if the check should be made to see of FLAG_ISOLATE is set 15500 * in the pkt. If FLAG_ISOLATE is set, then the command is 15501 * not retried, it is simply failed. 15502 * 15503 * user_funcp - Ptr to function to call before dispatching the 15504 * command. May be NULL if no action needs to be performed. 15505 * (Primarily intended for printing messages.) 15506 * 15507 * user_arg - Optional argument to be passed along to 15508 * the user_funcp call. 15509 * 15510 * failure_code - errno return code to set in the bp if the 15511 * command is going to be failed. 15512 * 15513 * retry_delay - Retry delay interval in (clock_t) units. May 15514 * be zero which indicates that the retry should be retried 15515 * immediately (ie, without an intervening delay). 15516 * 15517 * statp - Ptr to kstat function to be updated if the command 15518 * is queued for a delayed retry. May be NULL if no kstat 15519 * update is desired. 15520 * 15521 * Context: May be called from interrupt context. 15522 */ 15523 15524 static void 15525 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag, 15526 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int code), 15527 void *user_arg, int failure_code, clock_t retry_delay, 15528 void (*statp)(kstat_io_t *)) 15529 { 15530 struct sd_xbuf *xp; 15531 struct scsi_pkt *pktp; 15532 struct sd_fm_internal *sfip; 15533 15534 ASSERT(un != NULL); 15535 ASSERT(mutex_owned(SD_MUTEX(un))); 15536 ASSERT(bp != NULL); 15537 xp = SD_GET_XBUF(bp); 15538 ASSERT(xp != NULL); 15539 pktp = SD_GET_PKTP(bp); 15540 ASSERT(pktp != NULL); 15541 15542 sfip = (struct sd_fm_internal *)un->un_fm_private; 15543 ASSERT(sfip != NULL); 15544 15545 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15546 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp); 15547 15548 /* 15549 * If we are syncing or dumping, fail the command to avoid 15550 * recursively calling back into scsi_transport(). 15551 */ 15552 if (ddi_in_panic()) { 15553 goto fail_command_no_log; 15554 } 15555 15556 /* 15557 * We should never be be retrying a command with FLAG_DIAGNOSE set, so 15558 * log an error and fail the command. 15559 */ 15560 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 15561 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 15562 "ERROR, retrying FLAG_DIAGNOSE command.\n"); 15563 sd_dump_memory(un, SD_LOG_IO, "CDB", 15564 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15565 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 15566 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 15567 goto fail_command; 15568 } 15569 15570 /* 15571 * If we are suspended, then put the command onto head of the 15572 * wait queue since we don't want to start more commands, and 15573 * clear the un_retry_bp. Next time when we are resumed, will 15574 * handle the command in the wait queue. 15575 */ 15576 switch (un->un_state) { 15577 case SD_STATE_SUSPENDED: 15578 case SD_STATE_DUMPING: 15579 bp->av_forw = un->un_waitq_headp; 15580 un->un_waitq_headp = bp; 15581 if (un->un_waitq_tailp == NULL) { 15582 un->un_waitq_tailp = bp; 15583 } 15584 if (bp == un->un_retry_bp) { 15585 un->un_retry_bp = NULL; 15586 un->un_retry_statp = NULL; 15587 } 15588 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 15589 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: " 15590 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp); 15591 return; 15592 default: 15593 break; 15594 } 15595 15596 /* 15597 * If the caller wants us to check FLAG_ISOLATE, then see if that 15598 * is set; if it is then we do not want to retry the command. 15599 * Normally, FLAG_ISOLATE is only used with USCSI cmds. 15600 */ 15601 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) { 15602 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) { 15603 goto fail_command; 15604 } 15605 } 15606 15607 15608 /* 15609 * If SD_RETRIES_FAILFAST is set, it indicates that either a 15610 * command timeout or a selection timeout has occurred. This means 15611 * that we were unable to establish an kind of communication with 15612 * the target, and subsequent retries and/or commands are likely 15613 * to encounter similar results and take a long time to complete. 15614 * 15615 * If this is a failfast error condition, we need to update the 15616 * failfast state, even if this bp does not have B_FAILFAST set. 15617 */ 15618 if (retry_check_flag & SD_RETRIES_FAILFAST) { 15619 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) { 15620 ASSERT(un->un_failfast_bp == NULL); 15621 /* 15622 * If we are already in the active failfast state, and 15623 * another failfast error condition has been detected, 15624 * then fail this command if it has B_FAILFAST set. 15625 * If B_FAILFAST is clear, then maintain the legacy 15626 * behavior of retrying heroically, even tho this will 15627 * take a lot more time to fail the command. 15628 */ 15629 if (bp->b_flags & B_FAILFAST) { 15630 goto fail_command; 15631 } 15632 } else { 15633 /* 15634 * We're not in the active failfast state, but we 15635 * have a failfast error condition, so we must begin 15636 * transition to the next state. We do this regardless 15637 * of whether or not this bp has B_FAILFAST set. 15638 */ 15639 if (un->un_failfast_bp == NULL) { 15640 /* 15641 * This is the first bp to meet a failfast 15642 * condition so save it on un_failfast_bp & 15643 * do normal retry processing. Do not enter 15644 * active failfast state yet. This marks 15645 * entry into the "failfast pending" state. 15646 */ 15647 un->un_failfast_bp = bp; 15648 15649 } else if (un->un_failfast_bp == bp) { 15650 /* 15651 * This is the second time *this* bp has 15652 * encountered a failfast error condition, 15653 * so enter active failfast state & flush 15654 * queues as appropriate. 15655 */ 15656 un->un_failfast_state = SD_FAILFAST_ACTIVE; 15657 un->un_failfast_bp = NULL; 15658 sd_failfast_flushq(un); 15659 15660 /* 15661 * Fail this bp now if B_FAILFAST set; 15662 * otherwise continue with retries. (It would 15663 * be pretty ironic if this bp succeeded on a 15664 * subsequent retry after we just flushed all 15665 * the queues). 15666 */ 15667 if (bp->b_flags & B_FAILFAST) { 15668 goto fail_command; 15669 } 15670 15671 #if !defined(lint) && !defined(__lint) 15672 } else { 15673 /* 15674 * If neither of the preceeding conditionals 15675 * was true, it means that there is some 15676 * *other* bp that has met an inital failfast 15677 * condition and is currently either being 15678 * retried or is waiting to be retried. In 15679 * that case we should perform normal retry 15680 * processing on *this* bp, since there is a 15681 * chance that the current failfast condition 15682 * is transient and recoverable. If that does 15683 * not turn out to be the case, then retries 15684 * will be cleared when the wait queue is 15685 * flushed anyway. 15686 */ 15687 #endif 15688 } 15689 } 15690 } else { 15691 /* 15692 * SD_RETRIES_FAILFAST is clear, which indicates that we 15693 * likely were able to at least establish some level of 15694 * communication with the target and subsequent commands 15695 * and/or retries are likely to get through to the target, 15696 * In this case we want to be aggressive about clearing 15697 * the failfast state. Note that this does not affect 15698 * the "failfast pending" condition. 15699 */ 15700 un->un_failfast_state = SD_FAILFAST_INACTIVE; 15701 } 15702 15703 15704 /* 15705 * Check the specified retry count to see if we can still do 15706 * any retries with this pkt before we should fail it. 15707 */ 15708 switch (retry_check_flag & SD_RETRIES_MASK) { 15709 case SD_RETRIES_VICTIM: 15710 /* 15711 * Check the victim retry count. If exhausted, then fall 15712 * thru & check against the standard retry count. 15713 */ 15714 if (xp->xb_victim_retry_count < un->un_victim_retry_count) { 15715 /* Increment count & proceed with the retry */ 15716 xp->xb_victim_retry_count++; 15717 break; 15718 } 15719 /* Victim retries exhausted, fall back to std. retries... */ 15720 /* FALLTHRU */ 15721 15722 case SD_RETRIES_STANDARD: 15723 if (xp->xb_retry_count >= un->un_retry_count) { 15724 /* Retries exhausted, fail the command */ 15725 SD_TRACE(SD_LOG_IO_CORE, un, 15726 "sd_retry_command: retries exhausted!\n"); 15727 /* 15728 * update b_resid for failed SCMD_READ & SCMD_WRITE 15729 * commands with nonzero pkt_resid. 15730 */ 15731 if ((pktp->pkt_reason == CMD_CMPLT) && 15732 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) && 15733 (pktp->pkt_resid != 0)) { 15734 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F; 15735 if ((op == SCMD_READ) || (op == SCMD_WRITE)) { 15736 SD_UPDATE_B_RESID(bp, pktp); 15737 } 15738 } 15739 goto fail_command; 15740 } 15741 xp->xb_retry_count++; 15742 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15743 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 15744 break; 15745 15746 case SD_RETRIES_UA: 15747 if (xp->xb_ua_retry_count >= sd_ua_retry_count) { 15748 /* Retries exhausted, fail the command */ 15749 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15750 "Unit Attention retries exhausted. " 15751 "Check the target.\n"); 15752 goto fail_command; 15753 } 15754 xp->xb_ua_retry_count++; 15755 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15756 "sd_retry_command: retry count:%d\n", 15757 xp->xb_ua_retry_count); 15758 break; 15759 15760 case SD_RETRIES_BUSY: 15761 if (xp->xb_retry_count >= un->un_busy_retry_count) { 15762 /* Retries exhausted, fail the command */ 15763 SD_TRACE(SD_LOG_IO_CORE, un, 15764 "sd_retry_command: retries exhausted!\n"); 15765 goto fail_command; 15766 } 15767 xp->xb_retry_count++; 15768 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15769 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 15770 break; 15771 15772 case SD_RETRIES_NOCHECK: 15773 default: 15774 /* No retry count to check. Just proceed with the retry */ 15775 break; 15776 } 15777 15778 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 15779 15780 /* 15781 * If this is a non-USCSI command being retried 15782 * during execution last time, we should post an ereport with 15783 * driver-assessment of the value "retry". 15784 * For partial DMA, request sense and STATUS_QFULL, there are no 15785 * hardware errors, we bypass ereport posting. 15786 */ 15787 if (failure_code != 0) { 15788 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 15789 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 15790 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RETRY); 15791 } 15792 } 15793 15794 /* 15795 * If we were given a zero timeout, we must attempt to retry the 15796 * command immediately (ie, without a delay). 15797 */ 15798 if (retry_delay == 0) { 15799 /* 15800 * Check some limiting conditions to see if we can actually 15801 * do the immediate retry. If we cannot, then we must 15802 * fall back to queueing up a delayed retry. 15803 */ 15804 if (un->un_ncmds_in_transport >= un->un_throttle) { 15805 /* 15806 * We are at the throttle limit for the target, 15807 * fall back to delayed retry. 15808 */ 15809 retry_delay = un->un_busy_timeout; 15810 statp = kstat_waitq_enter; 15811 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15812 "sd_retry_command: immed. retry hit " 15813 "throttle!\n"); 15814 } else { 15815 /* 15816 * We're clear to proceed with the immediate retry. 15817 * First call the user-provided function (if any) 15818 */ 15819 if (user_funcp != NULL) { 15820 (*user_funcp)(un, bp, user_arg, 15821 SD_IMMEDIATE_RETRY_ISSUED); 15822 #ifdef __lock_lint 15823 sd_print_incomplete_msg(un, bp, user_arg, 15824 SD_IMMEDIATE_RETRY_ISSUED); 15825 sd_print_cmd_incomplete_msg(un, bp, user_arg, 15826 SD_IMMEDIATE_RETRY_ISSUED); 15827 sd_print_sense_failed_msg(un, bp, user_arg, 15828 SD_IMMEDIATE_RETRY_ISSUED); 15829 #endif 15830 } 15831 15832 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15833 "sd_retry_command: issuing immediate retry\n"); 15834 15835 /* 15836 * Call sd_start_cmds() to transport the command to 15837 * the target. 15838 */ 15839 sd_start_cmds(un, bp); 15840 15841 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15842 "sd_retry_command exit\n"); 15843 return; 15844 } 15845 } 15846 15847 /* 15848 * Set up to retry the command after a delay. 15849 * First call the user-provided function (if any) 15850 */ 15851 if (user_funcp != NULL) { 15852 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED); 15853 } 15854 15855 sd_set_retry_bp(un, bp, retry_delay, statp); 15856 15857 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15858 return; 15859 15860 fail_command: 15861 15862 if (user_funcp != NULL) { 15863 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED); 15864 } 15865 15866 fail_command_no_log: 15867 15868 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15869 "sd_retry_command: returning failed command\n"); 15870 15871 sd_return_failed_command(un, bp, failure_code); 15872 15873 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15874 } 15875 15876 15877 /* 15878 * Function: sd_set_retry_bp 15879 * 15880 * Description: Set up the given bp for retry. 15881 * 15882 * Arguments: un - ptr to associated softstate 15883 * bp - ptr to buf(9S) for the command 15884 * retry_delay - time interval before issuing retry (may be 0) 15885 * statp - optional pointer to kstat function 15886 * 15887 * Context: May be called under interrupt context 15888 */ 15889 15890 static void 15891 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, 15892 void (*statp)(kstat_io_t *)) 15893 { 15894 ASSERT(un != NULL); 15895 ASSERT(mutex_owned(SD_MUTEX(un))); 15896 ASSERT(bp != NULL); 15897 15898 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15899 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp); 15900 15901 /* 15902 * Indicate that the command is being retried. This will not allow any 15903 * other commands on the wait queue to be transported to the target 15904 * until this command has been completed (success or failure). The 15905 * "retry command" is not transported to the target until the given 15906 * time delay expires, unless the user specified a 0 retry_delay. 15907 * 15908 * Note: the timeout(9F) callback routine is what actually calls 15909 * sd_start_cmds() to transport the command, with the exception of a 15910 * zero retry_delay. The only current implementor of a zero retry delay 15911 * is the case where a START_STOP_UNIT is sent to spin-up a device. 15912 */ 15913 if (un->un_retry_bp == NULL) { 15914 ASSERT(un->un_retry_statp == NULL); 15915 un->un_retry_bp = bp; 15916 15917 /* 15918 * If the user has not specified a delay the command should 15919 * be queued and no timeout should be scheduled. 15920 */ 15921 if (retry_delay == 0) { 15922 /* 15923 * Save the kstat pointer that will be used in the 15924 * call to SD_UPDATE_KSTATS() below, so that 15925 * sd_start_cmds() can correctly decrement the waitq 15926 * count when it is time to transport this command. 15927 */ 15928 un->un_retry_statp = statp; 15929 goto done; 15930 } 15931 } 15932 15933 if (un->un_retry_bp == bp) { 15934 /* 15935 * Save the kstat pointer that will be used in the call to 15936 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can 15937 * correctly decrement the waitq count when it is time to 15938 * transport this command. 15939 */ 15940 un->un_retry_statp = statp; 15941 15942 /* 15943 * Schedule a timeout if: 15944 * 1) The user has specified a delay. 15945 * 2) There is not a START_STOP_UNIT callback pending. 15946 * 15947 * If no delay has been specified, then it is up to the caller 15948 * to ensure that IO processing continues without stalling. 15949 * Effectively, this means that the caller will issue the 15950 * required call to sd_start_cmds(). The START_STOP_UNIT 15951 * callback does this after the START STOP UNIT command has 15952 * completed. In either of these cases we should not schedule 15953 * a timeout callback here. Also don't schedule the timeout if 15954 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart. 15955 */ 15956 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) && 15957 (un->un_direct_priority_timeid == NULL)) { 15958 un->un_retry_timeid = 15959 timeout(sd_start_retry_command, un, retry_delay); 15960 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15961 "sd_set_retry_bp: setting timeout: un: 0x%p" 15962 " bp:0x%p un_retry_timeid:0x%p\n", 15963 un, bp, un->un_retry_timeid); 15964 } 15965 } else { 15966 /* 15967 * We only get in here if there is already another command 15968 * waiting to be retried. In this case, we just put the 15969 * given command onto the wait queue, so it can be transported 15970 * after the current retry command has completed. 15971 * 15972 * Also we have to make sure that if the command at the head 15973 * of the wait queue is the un_failfast_bp, that we do not 15974 * put ahead of it any other commands that are to be retried. 15975 */ 15976 if ((un->un_failfast_bp != NULL) && 15977 (un->un_failfast_bp == un->un_waitq_headp)) { 15978 /* 15979 * Enqueue this command AFTER the first command on 15980 * the wait queue (which is also un_failfast_bp). 15981 */ 15982 bp->av_forw = un->un_waitq_headp->av_forw; 15983 un->un_waitq_headp->av_forw = bp; 15984 if (un->un_waitq_headp == un->un_waitq_tailp) { 15985 un->un_waitq_tailp = bp; 15986 } 15987 } else { 15988 /* Enqueue this command at the head of the waitq. */ 15989 bp->av_forw = un->un_waitq_headp; 15990 un->un_waitq_headp = bp; 15991 if (un->un_waitq_tailp == NULL) { 15992 un->un_waitq_tailp = bp; 15993 } 15994 } 15995 15996 if (statp == NULL) { 15997 statp = kstat_waitq_enter; 15998 } 15999 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16000 "sd_set_retry_bp: un:0x%p already delayed retry\n", un); 16001 } 16002 16003 done: 16004 if (statp != NULL) { 16005 SD_UPDATE_KSTATS(un, statp, bp); 16006 } 16007 16008 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16009 "sd_set_retry_bp: exit un:0x%p\n", un); 16010 } 16011 16012 16013 /* 16014 * Function: sd_start_retry_command 16015 * 16016 * Description: Start the command that has been waiting on the target's 16017 * retry queue. Called from timeout(9F) context after the 16018 * retry delay interval has expired. 16019 * 16020 * Arguments: arg - pointer to associated softstate for the device. 16021 * 16022 * Context: timeout(9F) thread context. May not sleep. 16023 */ 16024 16025 static void 16026 sd_start_retry_command(void *arg) 16027 { 16028 struct sd_lun *un = arg; 16029 16030 ASSERT(un != NULL); 16031 ASSERT(!mutex_owned(SD_MUTEX(un))); 16032 16033 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16034 "sd_start_retry_command: entry\n"); 16035 16036 mutex_enter(SD_MUTEX(un)); 16037 16038 un->un_retry_timeid = NULL; 16039 16040 if (un->un_retry_bp != NULL) { 16041 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16042 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n", 16043 un, un->un_retry_bp); 16044 sd_start_cmds(un, un->un_retry_bp); 16045 } 16046 16047 mutex_exit(SD_MUTEX(un)); 16048 16049 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16050 "sd_start_retry_command: exit\n"); 16051 } 16052 16053 /* 16054 * Function: sd_rmw_msg_print_handler 16055 * 16056 * Description: If RMW mode is enabled and warning message is triggered 16057 * print I/O count during a fixed interval. 16058 * 16059 * Arguments: arg - pointer to associated softstate for the device. 16060 * 16061 * Context: timeout(9F) thread context. May not sleep. 16062 */ 16063 static void 16064 sd_rmw_msg_print_handler(void *arg) 16065 { 16066 struct sd_lun *un = arg; 16067 16068 ASSERT(un != NULL); 16069 ASSERT(!mutex_owned(SD_MUTEX(un))); 16070 16071 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16072 "sd_rmw_msg_print_handler: entry\n"); 16073 16074 mutex_enter(SD_MUTEX(un)); 16075 16076 if (un->un_rmw_incre_count > 0) { 16077 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16078 "%"PRIu64" I/O requests are not aligned with %d disk " 16079 "sector size in %ld seconds. They are handled through " 16080 "Read Modify Write but the performance is very low!\n", 16081 un->un_rmw_incre_count, un->un_tgt_blocksize, 16082 drv_hztousec(SD_RMW_MSG_PRINT_TIMEOUT) / 1000000); 16083 un->un_rmw_incre_count = 0; 16084 un->un_rmw_msg_timeid = timeout(sd_rmw_msg_print_handler, 16085 un, SD_RMW_MSG_PRINT_TIMEOUT); 16086 } else { 16087 un->un_rmw_msg_timeid = NULL; 16088 } 16089 16090 mutex_exit(SD_MUTEX(un)); 16091 16092 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16093 "sd_rmw_msg_print_handler: exit\n"); 16094 } 16095 16096 /* 16097 * Function: sd_start_direct_priority_command 16098 * 16099 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had 16100 * received TRAN_BUSY when we called scsi_transport() to send it 16101 * to the underlying HBA. This function is called from timeout(9F) 16102 * context after the delay interval has expired. 16103 * 16104 * Arguments: arg - pointer to associated buf(9S) to be restarted. 16105 * 16106 * Context: timeout(9F) thread context. May not sleep. 16107 */ 16108 16109 static void 16110 sd_start_direct_priority_command(void *arg) 16111 { 16112 struct buf *priority_bp = arg; 16113 struct sd_lun *un; 16114 16115 ASSERT(priority_bp != NULL); 16116 un = SD_GET_UN(priority_bp); 16117 ASSERT(un != NULL); 16118 ASSERT(!mutex_owned(SD_MUTEX(un))); 16119 16120 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16121 "sd_start_direct_priority_command: entry\n"); 16122 16123 mutex_enter(SD_MUTEX(un)); 16124 un->un_direct_priority_timeid = NULL; 16125 sd_start_cmds(un, priority_bp); 16126 mutex_exit(SD_MUTEX(un)); 16127 16128 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16129 "sd_start_direct_priority_command: exit\n"); 16130 } 16131 16132 16133 /* 16134 * Function: sd_send_request_sense_command 16135 * 16136 * Description: Sends a REQUEST SENSE command to the target 16137 * 16138 * Context: May be called from interrupt context. 16139 */ 16140 16141 static void 16142 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 16143 struct scsi_pkt *pktp) 16144 { 16145 ASSERT(bp != NULL); 16146 ASSERT(un != NULL); 16147 ASSERT(mutex_owned(SD_MUTEX(un))); 16148 16149 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: " 16150 "entry: buf:0x%p\n", bp); 16151 16152 /* 16153 * If we are syncing or dumping, then fail the command to avoid a 16154 * recursive callback into scsi_transport(). Also fail the command 16155 * if we are suspended (legacy behavior). 16156 */ 16157 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 16158 (un->un_state == SD_STATE_DUMPING)) { 16159 sd_return_failed_command(un, bp, EIO); 16160 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16161 "sd_send_request_sense_command: syncing/dumping, exit\n"); 16162 return; 16163 } 16164 16165 /* 16166 * Retry the failed command and don't issue the request sense if: 16167 * 1) the sense buf is busy 16168 * 2) we have 1 or more outstanding commands on the target 16169 * (the sense data will be cleared or invalidated any way) 16170 * 16171 * Note: There could be an issue with not checking a retry limit here, 16172 * the problem is determining which retry limit to check. 16173 */ 16174 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { 16175 /* Don't retry if the command is flagged as non-retryable */ 16176 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 16177 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 16178 NULL, NULL, 0, un->un_busy_timeout, 16179 kstat_waitq_enter); 16180 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16181 "sd_send_request_sense_command: " 16182 "at full throttle, retrying exit\n"); 16183 } else { 16184 sd_return_failed_command(un, bp, EIO); 16185 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16186 "sd_send_request_sense_command: " 16187 "at full throttle, non-retryable exit\n"); 16188 } 16189 return; 16190 } 16191 16192 sd_mark_rqs_busy(un, bp); 16193 sd_start_cmds(un, un->un_rqs_bp); 16194 16195 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16196 "sd_send_request_sense_command: exit\n"); 16197 } 16198 16199 16200 /* 16201 * Function: sd_mark_rqs_busy 16202 * 16203 * Description: Indicate that the request sense bp for this instance is 16204 * in use. 16205 * 16206 * Context: May be called under interrupt context 16207 */ 16208 16209 static void 16210 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp) 16211 { 16212 struct sd_xbuf *sense_xp; 16213 16214 ASSERT(un != NULL); 16215 ASSERT(bp != NULL); 16216 ASSERT(mutex_owned(SD_MUTEX(un))); 16217 ASSERT(un->un_sense_isbusy == 0); 16218 16219 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: " 16220 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un); 16221 16222 sense_xp = SD_GET_XBUF(un->un_rqs_bp); 16223 ASSERT(sense_xp != NULL); 16224 16225 SD_INFO(SD_LOG_IO, un, 16226 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp); 16227 16228 ASSERT(sense_xp->xb_pktp != NULL); 16229 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) 16230 == (FLAG_SENSING | FLAG_HEAD)); 16231 16232 un->un_sense_isbusy = 1; 16233 un->un_rqs_bp->b_resid = 0; 16234 sense_xp->xb_pktp->pkt_resid = 0; 16235 sense_xp->xb_pktp->pkt_reason = 0; 16236 16237 /* So we can get back the bp at interrupt time! */ 16238 sense_xp->xb_sense_bp = bp; 16239 16240 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH); 16241 16242 /* 16243 * Mark this buf as awaiting sense data. (This is already set in 16244 * the pkt_flags for the RQS packet.) 16245 */ 16246 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING; 16247 16248 /* Request sense down same path */ 16249 if (scsi_pkt_allocated_correctly((SD_GET_XBUF(bp))->xb_pktp) && 16250 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance) 16251 sense_xp->xb_pktp->pkt_path_instance = 16252 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance; 16253 16254 sense_xp->xb_retry_count = 0; 16255 sense_xp->xb_victim_retry_count = 0; 16256 sense_xp->xb_ua_retry_count = 0; 16257 sense_xp->xb_nr_retry_count = 0; 16258 sense_xp->xb_dma_resid = 0; 16259 16260 /* Clean up the fields for auto-request sense */ 16261 sense_xp->xb_sense_status = 0; 16262 sense_xp->xb_sense_state = 0; 16263 sense_xp->xb_sense_resid = 0; 16264 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data)); 16265 16266 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n"); 16267 } 16268 16269 16270 /* 16271 * Function: sd_mark_rqs_idle 16272 * 16273 * Description: SD_MUTEX must be held continuously through this routine 16274 * to prevent reuse of the rqs struct before the caller can 16275 * complete it's processing. 16276 * 16277 * Return Code: Pointer to the RQS buf 16278 * 16279 * Context: May be called under interrupt context 16280 */ 16281 16282 static struct buf * 16283 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp) 16284 { 16285 struct buf *bp; 16286 ASSERT(un != NULL); 16287 ASSERT(sense_xp != NULL); 16288 ASSERT(mutex_owned(SD_MUTEX(un))); 16289 ASSERT(un->un_sense_isbusy != 0); 16290 16291 un->un_sense_isbusy = 0; 16292 bp = sense_xp->xb_sense_bp; 16293 sense_xp->xb_sense_bp = NULL; 16294 16295 /* This pkt is no longer interested in getting sense data */ 16296 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING; 16297 16298 return (bp); 16299 } 16300 16301 16302 16303 /* 16304 * Function: sd_alloc_rqs 16305 * 16306 * Description: Set up the unit to receive auto request sense data 16307 * 16308 * Return Code: DDI_SUCCESS or DDI_FAILURE 16309 * 16310 * Context: Called under attach(9E) context 16311 */ 16312 16313 static int 16314 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un) 16315 { 16316 struct sd_xbuf *xp; 16317 16318 ASSERT(un != NULL); 16319 ASSERT(!mutex_owned(SD_MUTEX(un))); 16320 ASSERT(un->un_rqs_bp == NULL); 16321 ASSERT(un->un_rqs_pktp == NULL); 16322 16323 /* 16324 * First allocate the required buf and scsi_pkt structs, then set up 16325 * the CDB in the scsi_pkt for a REQUEST SENSE command. 16326 */ 16327 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL, 16328 MAX_SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL); 16329 if (un->un_rqs_bp == NULL) { 16330 return (DDI_FAILURE); 16331 } 16332 16333 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp, 16334 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL); 16335 16336 if (un->un_rqs_pktp == NULL) { 16337 sd_free_rqs(un); 16338 return (DDI_FAILURE); 16339 } 16340 16341 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */ 16342 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp, 16343 SCMD_REQUEST_SENSE, 0, MAX_SENSE_LENGTH, 0); 16344 16345 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp); 16346 16347 /* Set up the other needed members in the ARQ scsi_pkt. */ 16348 un->un_rqs_pktp->pkt_comp = sdintr; 16349 un->un_rqs_pktp->pkt_time = sd_io_time; 16350 un->un_rqs_pktp->pkt_flags |= 16351 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */ 16352 16353 /* 16354 * Allocate & init the sd_xbuf struct for the RQS command. Do not 16355 * provide any intpkt, destroypkt routines as we take care of 16356 * scsi_pkt allocation/freeing here and in sd_free_rqs(). 16357 */ 16358 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 16359 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL); 16360 xp->xb_pktp = un->un_rqs_pktp; 16361 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16362 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n", 16363 un, xp, un->un_rqs_pktp, un->un_rqs_bp); 16364 16365 /* 16366 * Save the pointer to the request sense private bp so it can 16367 * be retrieved in sdintr. 16368 */ 16369 un->un_rqs_pktp->pkt_private = un->un_rqs_bp; 16370 ASSERT(un->un_rqs_bp->b_private == xp); 16371 16372 /* 16373 * See if the HBA supports auto-request sense for the specified 16374 * target/lun. If it does, then try to enable it (if not already 16375 * enabled). 16376 * 16377 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return 16378 * failure, while for other HBAs (pln) scsi_ifsetcap will always 16379 * return success. However, in both of these cases ARQ is always 16380 * enabled and scsi_ifgetcap will always return true. The best approach 16381 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). 16382 * 16383 * The 3rd case is the HBA (adp) always return enabled on 16384 * scsi_ifgetgetcap even when it's not enable, the best approach 16385 * is issue a scsi_ifsetcap then a scsi_ifgetcap 16386 * Note: this case is to circumvent the Adaptec bug. (x86 only) 16387 */ 16388 16389 if (un->un_f_is_fibre == TRUE) { 16390 un->un_f_arq_enabled = TRUE; 16391 } else { 16392 #if defined(__x86) 16393 /* 16394 * Circumvent the Adaptec bug, remove this code when 16395 * the bug is fixed 16396 */ 16397 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); 16398 #endif 16399 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { 16400 case 0: 16401 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16402 "sd_alloc_rqs: HBA supports ARQ\n"); 16403 /* 16404 * ARQ is supported by this HBA but currently is not 16405 * enabled. Attempt to enable it and if successful then 16406 * mark this instance as ARQ enabled. 16407 */ 16408 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1) 16409 == 1) { 16410 /* Successfully enabled ARQ in the HBA */ 16411 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16412 "sd_alloc_rqs: ARQ enabled\n"); 16413 un->un_f_arq_enabled = TRUE; 16414 } else { 16415 /* Could not enable ARQ in the HBA */ 16416 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16417 "sd_alloc_rqs: failed ARQ enable\n"); 16418 un->un_f_arq_enabled = FALSE; 16419 } 16420 break; 16421 case 1: 16422 /* 16423 * ARQ is supported by this HBA and is already enabled. 16424 * Just mark ARQ as enabled for this instance. 16425 */ 16426 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16427 "sd_alloc_rqs: ARQ already enabled\n"); 16428 un->un_f_arq_enabled = TRUE; 16429 break; 16430 default: 16431 /* 16432 * ARQ is not supported by this HBA; disable it for this 16433 * instance. 16434 */ 16435 SD_INFO(SD_LOG_ATTACH_DETACH, un, 16436 "sd_alloc_rqs: HBA does not support ARQ\n"); 16437 un->un_f_arq_enabled = FALSE; 16438 break; 16439 } 16440 } 16441 16442 return (DDI_SUCCESS); 16443 } 16444 16445 16446 /* 16447 * Function: sd_free_rqs 16448 * 16449 * Description: Cleanup for the pre-instance RQS command. 16450 * 16451 * Context: Kernel thread context 16452 */ 16453 16454 static void 16455 sd_free_rqs(struct sd_lun *un) 16456 { 16457 ASSERT(un != NULL); 16458 16459 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n"); 16460 16461 /* 16462 * If consistent memory is bound to a scsi_pkt, the pkt 16463 * has to be destroyed *before* freeing the consistent memory. 16464 * Don't change the sequence of this operations. 16465 * scsi_destroy_pkt() might access memory, which isn't allowed, 16466 * after it was freed in scsi_free_consistent_buf(). 16467 */ 16468 if (un->un_rqs_pktp != NULL) { 16469 scsi_destroy_pkt(un->un_rqs_pktp); 16470 un->un_rqs_pktp = NULL; 16471 } 16472 16473 if (un->un_rqs_bp != NULL) { 16474 struct sd_xbuf *xp = SD_GET_XBUF(un->un_rqs_bp); 16475 if (xp != NULL) { 16476 kmem_free(xp, sizeof (struct sd_xbuf)); 16477 } 16478 scsi_free_consistent_buf(un->un_rqs_bp); 16479 un->un_rqs_bp = NULL; 16480 } 16481 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n"); 16482 } 16483 16484 16485 16486 /* 16487 * Function: sd_reduce_throttle 16488 * 16489 * Description: Reduces the maximum # of outstanding commands on a 16490 * target to the current number of outstanding commands. 16491 * Queues a tiemout(9F) callback to restore the limit 16492 * after a specified interval has elapsed. 16493 * Typically used when we get a TRAN_BUSY return code 16494 * back from scsi_transport(). 16495 * 16496 * Arguments: un - ptr to the sd_lun softstate struct 16497 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL 16498 * 16499 * Context: May be called from interrupt context 16500 */ 16501 16502 static void 16503 sd_reduce_throttle(struct sd_lun *un, int throttle_type) 16504 { 16505 ASSERT(un != NULL); 16506 ASSERT(mutex_owned(SD_MUTEX(un))); 16507 ASSERT(un->un_ncmds_in_transport >= 0); 16508 16509 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 16510 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n", 16511 un, un->un_throttle, un->un_ncmds_in_transport); 16512 16513 if (un->un_throttle > 1) { 16514 if (un->un_f_use_adaptive_throttle == TRUE) { 16515 switch (throttle_type) { 16516 case SD_THROTTLE_TRAN_BUSY: 16517 if (un->un_busy_throttle == 0) { 16518 un->un_busy_throttle = un->un_throttle; 16519 } 16520 break; 16521 case SD_THROTTLE_QFULL: 16522 un->un_busy_throttle = 0; 16523 break; 16524 default: 16525 ASSERT(FALSE); 16526 } 16527 16528 if (un->un_ncmds_in_transport > 0) { 16529 un->un_throttle = un->un_ncmds_in_transport; 16530 } 16531 16532 } else { 16533 if (un->un_ncmds_in_transport == 0) { 16534 un->un_throttle = 1; 16535 } else { 16536 un->un_throttle = un->un_ncmds_in_transport; 16537 } 16538 } 16539 } 16540 16541 /* Reschedule the timeout if none is currently active */ 16542 if (un->un_reset_throttle_timeid == NULL) { 16543 un->un_reset_throttle_timeid = timeout(sd_restore_throttle, 16544 un, SD_THROTTLE_RESET_INTERVAL); 16545 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16546 "sd_reduce_throttle: timeout scheduled!\n"); 16547 } 16548 16549 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 16550 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle); 16551 } 16552 16553 16554 16555 /* 16556 * Function: sd_restore_throttle 16557 * 16558 * Description: Callback function for timeout(9F). Resets the current 16559 * value of un->un_throttle to its default. 16560 * 16561 * Arguments: arg - pointer to associated softstate for the device. 16562 * 16563 * Context: May be called from interrupt context 16564 */ 16565 16566 static void 16567 sd_restore_throttle(void *arg) 16568 { 16569 struct sd_lun *un = arg; 16570 16571 ASSERT(un != NULL); 16572 ASSERT(!mutex_owned(SD_MUTEX(un))); 16573 16574 mutex_enter(SD_MUTEX(un)); 16575 16576 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 16577 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle); 16578 16579 un->un_reset_throttle_timeid = NULL; 16580 16581 if (un->un_f_use_adaptive_throttle == TRUE) { 16582 /* 16583 * If un_busy_throttle is nonzero, then it contains the 16584 * value that un_throttle was when we got a TRAN_BUSY back 16585 * from scsi_transport(). We want to revert back to this 16586 * value. 16587 * 16588 * In the QFULL case, the throttle limit will incrementally 16589 * increase until it reaches max throttle. 16590 */ 16591 if (un->un_busy_throttle > 0) { 16592 un->un_throttle = un->un_busy_throttle; 16593 un->un_busy_throttle = 0; 16594 } else { 16595 /* 16596 * increase throttle by 10% open gate slowly, schedule 16597 * another restore if saved throttle has not been 16598 * reached 16599 */ 16600 short throttle; 16601 if (sd_qfull_throttle_enable) { 16602 throttle = un->un_throttle + 16603 max((un->un_throttle / 10), 1); 16604 un->un_throttle = 16605 (throttle < un->un_saved_throttle) ? 16606 throttle : un->un_saved_throttle; 16607 if (un->un_throttle < un->un_saved_throttle) { 16608 un->un_reset_throttle_timeid = 16609 timeout(sd_restore_throttle, 16610 un, 16611 SD_QFULL_THROTTLE_RESET_INTERVAL); 16612 } 16613 } 16614 } 16615 16616 /* 16617 * If un_throttle has fallen below the low-water mark, we 16618 * restore the maximum value here (and allow it to ratchet 16619 * down again if necessary). 16620 */ 16621 if (un->un_throttle < un->un_min_throttle) { 16622 un->un_throttle = un->un_saved_throttle; 16623 } 16624 } else { 16625 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 16626 "restoring limit from 0x%x to 0x%x\n", 16627 un->un_throttle, un->un_saved_throttle); 16628 un->un_throttle = un->un_saved_throttle; 16629 } 16630 16631 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 16632 "sd_restore_throttle: calling sd_start_cmds!\n"); 16633 16634 sd_start_cmds(un, NULL); 16635 16636 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 16637 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n", 16638 un, un->un_throttle); 16639 16640 mutex_exit(SD_MUTEX(un)); 16641 16642 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n"); 16643 } 16644 16645 /* 16646 * Function: sdrunout 16647 * 16648 * Description: Callback routine for scsi_init_pkt when a resource allocation 16649 * fails. 16650 * 16651 * Arguments: arg - a pointer to the sd_lun unit struct for the particular 16652 * soft state instance. 16653 * 16654 * Return Code: The scsi_init_pkt routine allows for the callback function to 16655 * return a 0 indicating the callback should be rescheduled or a 1 16656 * indicating not to reschedule. This routine always returns 1 16657 * because the driver always provides a callback function to 16658 * scsi_init_pkt. This results in a callback always being scheduled 16659 * (via the scsi_init_pkt callback implementation) if a resource 16660 * failure occurs. 16661 * 16662 * Context: This callback function may not block or call routines that block 16663 * 16664 * Note: Using the scsi_init_pkt callback facility can result in an I/O 16665 * request persisting at the head of the list which cannot be 16666 * satisfied even after multiple retries. In the future the driver 16667 * may implement some time of maximum runout count before failing 16668 * an I/O. 16669 */ 16670 16671 static int 16672 sdrunout(caddr_t arg) 16673 { 16674 struct sd_lun *un = (struct sd_lun *)arg; 16675 16676 ASSERT(un != NULL); 16677 ASSERT(!mutex_owned(SD_MUTEX(un))); 16678 16679 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n"); 16680 16681 mutex_enter(SD_MUTEX(un)); 16682 sd_start_cmds(un, NULL); 16683 mutex_exit(SD_MUTEX(un)); 16684 /* 16685 * This callback routine always returns 1 (i.e. do not reschedule) 16686 * because we always specify sdrunout as the callback handler for 16687 * scsi_init_pkt inside the call to sd_start_cmds. 16688 */ 16689 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n"); 16690 return (1); 16691 } 16692 16693 16694 /* 16695 * Function: sdintr 16696 * 16697 * Description: Completion callback routine for scsi_pkt(9S) structs 16698 * sent to the HBA driver via scsi_transport(9F). 16699 * 16700 * Context: Interrupt context 16701 */ 16702 16703 static void 16704 sdintr(struct scsi_pkt *pktp) 16705 { 16706 struct buf *bp; 16707 struct sd_xbuf *xp; 16708 struct sd_lun *un; 16709 size_t actual_len; 16710 sd_ssc_t *sscp; 16711 16712 ASSERT(pktp != NULL); 16713 bp = (struct buf *)pktp->pkt_private; 16714 ASSERT(bp != NULL); 16715 xp = SD_GET_XBUF(bp); 16716 ASSERT(xp != NULL); 16717 ASSERT(xp->xb_pktp != NULL); 16718 un = SD_GET_UN(bp); 16719 ASSERT(un != NULL); 16720 ASSERT(!mutex_owned(SD_MUTEX(un))); 16721 16722 #ifdef SD_FAULT_INJECTION 16723 16724 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n"); 16725 /* SD FaultInjection */ 16726 sd_faultinjection(pktp); 16727 16728 #endif /* SD_FAULT_INJECTION */ 16729 16730 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p," 16731 " xp:0x%p, un:0x%p\n", bp, xp, un); 16732 16733 mutex_enter(SD_MUTEX(un)); 16734 16735 ASSERT(un->un_fm_private != NULL); 16736 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc; 16737 ASSERT(sscp != NULL); 16738 16739 /* Reduce the count of the #commands currently in transport */ 16740 un->un_ncmds_in_transport--; 16741 ASSERT(un->un_ncmds_in_transport >= 0); 16742 16743 /* Increment counter to indicate that the callback routine is active */ 16744 un->un_in_callback++; 16745 16746 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 16747 16748 #ifdef SDDEBUG 16749 if (bp == un->un_retry_bp) { 16750 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: " 16751 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n", 16752 un, un->un_retry_bp, un->un_ncmds_in_transport); 16753 } 16754 #endif 16755 16756 /* 16757 * If pkt_reason is CMD_DEV_GONE, fail the command, and update the media 16758 * state if needed. 16759 */ 16760 if (pktp->pkt_reason == CMD_DEV_GONE) { 16761 /* Prevent multiple console messages for the same failure. */ 16762 if (un->un_last_pkt_reason != CMD_DEV_GONE) { 16763 un->un_last_pkt_reason = CMD_DEV_GONE; 16764 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16765 "Command failed to complete...Device is gone\n"); 16766 } 16767 if (un->un_mediastate != DKIO_DEV_GONE) { 16768 un->un_mediastate = DKIO_DEV_GONE; 16769 cv_broadcast(&un->un_state_cv); 16770 } 16771 /* 16772 * If the command happens to be the REQUEST SENSE command, 16773 * free up the rqs buf and fail the original command. 16774 */ 16775 if (bp == un->un_rqs_bp) { 16776 bp = sd_mark_rqs_idle(un, xp); 16777 } 16778 sd_return_failed_command(un, bp, EIO); 16779 goto exit; 16780 } 16781 16782 if (pktp->pkt_state & STATE_XARQ_DONE) { 16783 SD_TRACE(SD_LOG_COMMON, un, 16784 "sdintr: extra sense data received. pkt=%p\n", pktp); 16785 } 16786 16787 /* 16788 * First see if the pkt has auto-request sense data with it.... 16789 * Look at the packet state first so we don't take a performance 16790 * hit looking at the arq enabled flag unless absolutely necessary. 16791 */ 16792 if ((pktp->pkt_state & STATE_ARQ_DONE) && 16793 (un->un_f_arq_enabled == TRUE)) { 16794 /* 16795 * The HBA did an auto request sense for this command so check 16796 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 16797 * driver command that should not be retried. 16798 */ 16799 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 16800 /* 16801 * Save the relevant sense info into the xp for the 16802 * original cmd. 16803 */ 16804 struct scsi_arq_status *asp; 16805 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 16806 xp->xb_sense_status = 16807 *((uchar_t *)(&(asp->sts_rqpkt_status))); 16808 xp->xb_sense_state = asp->sts_rqpkt_state; 16809 xp->xb_sense_resid = asp->sts_rqpkt_resid; 16810 if (pktp->pkt_state & STATE_XARQ_DONE) { 16811 actual_len = MAX_SENSE_LENGTH - 16812 xp->xb_sense_resid; 16813 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16814 MAX_SENSE_LENGTH); 16815 } else { 16816 if (xp->xb_sense_resid > SENSE_LENGTH) { 16817 actual_len = MAX_SENSE_LENGTH - 16818 xp->xb_sense_resid; 16819 } else { 16820 actual_len = SENSE_LENGTH - 16821 xp->xb_sense_resid; 16822 } 16823 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 16824 if ((((struct uscsi_cmd *) 16825 (xp->xb_pktinfo))->uscsi_rqlen) > 16826 actual_len) { 16827 xp->xb_sense_resid = 16828 (((struct uscsi_cmd *) 16829 (xp->xb_pktinfo))-> 16830 uscsi_rqlen) - actual_len; 16831 } else { 16832 xp->xb_sense_resid = 0; 16833 } 16834 } 16835 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16836 SENSE_LENGTH); 16837 } 16838 16839 /* fail the command */ 16840 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16841 "sdintr: arq done and FLAG_DIAGNOSE set\n"); 16842 sd_return_failed_command(un, bp, EIO); 16843 goto exit; 16844 } 16845 16846 #if (defined(__x86)) /* DMAFREE for x86 only */ 16847 /* 16848 * We want to either retry or fail this command, so free 16849 * the DMA resources here. If we retry the command then 16850 * the DMA resources will be reallocated in sd_start_cmds(). 16851 * Note that when PKT_DMA_PARTIAL is used, this reallocation 16852 * causes the *entire* transfer to start over again from the 16853 * beginning of the request, even for PARTIAL chunks that 16854 * have already transferred successfully. 16855 */ 16856 if ((un->un_f_is_fibre == TRUE) && 16857 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 16858 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 16859 scsi_dmafree(pktp); 16860 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 16861 } 16862 #endif 16863 16864 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16865 "sdintr: arq done, sd_handle_auto_request_sense\n"); 16866 16867 sd_handle_auto_request_sense(un, bp, xp, pktp); 16868 goto exit; 16869 } 16870 16871 /* Next see if this is the REQUEST SENSE pkt for the instance */ 16872 if (pktp->pkt_flags & FLAG_SENSING) { 16873 /* This pktp is from the unit's REQUEST_SENSE command */ 16874 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16875 "sdintr: sd_handle_request_sense\n"); 16876 sd_handle_request_sense(un, bp, xp, pktp); 16877 goto exit; 16878 } 16879 16880 /* 16881 * Check to see if the command successfully completed as requested; 16882 * this is the most common case (and also the hot performance path). 16883 * 16884 * Requirements for successful completion are: 16885 * pkt_reason is CMD_CMPLT and packet status is status good. 16886 * In addition: 16887 * - A residual of zero indicates successful completion no matter what 16888 * the command is. 16889 * - If the residual is not zero and the command is not a read or 16890 * write, then it's still defined as successful completion. In other 16891 * words, if the command is a read or write the residual must be 16892 * zero for successful completion. 16893 * - If the residual is not zero and the command is a read or 16894 * write, and it's a USCSICMD, then it's still defined as 16895 * successful completion. 16896 */ 16897 if ((pktp->pkt_reason == CMD_CMPLT) && 16898 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) { 16899 16900 /* 16901 * Since this command is returned with a good status, we 16902 * can reset the count for Sonoma failover. 16903 */ 16904 un->un_sonoma_failure_count = 0; 16905 16906 /* 16907 * Return all USCSI commands on good status 16908 */ 16909 if (pktp->pkt_resid == 0) { 16910 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16911 "sdintr: returning command for resid == 0\n"); 16912 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) && 16913 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) { 16914 SD_UPDATE_B_RESID(bp, pktp); 16915 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16916 "sdintr: returning command for resid != 0\n"); 16917 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 16918 SD_UPDATE_B_RESID(bp, pktp); 16919 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16920 "sdintr: returning uscsi command\n"); 16921 } else { 16922 goto not_successful; 16923 } 16924 sd_return_command(un, bp); 16925 16926 /* 16927 * Decrement counter to indicate that the callback routine 16928 * is done. 16929 */ 16930 un->un_in_callback--; 16931 ASSERT(un->un_in_callback >= 0); 16932 mutex_exit(SD_MUTEX(un)); 16933 16934 return; 16935 } 16936 16937 not_successful: 16938 16939 #if (defined(__x86)) /* DMAFREE for x86 only */ 16940 /* 16941 * The following is based upon knowledge of the underlying transport 16942 * and its use of DMA resources. This code should be removed when 16943 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor 16944 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf() 16945 * and sd_start_cmds(). 16946 * 16947 * Free any DMA resources associated with this command if there 16948 * is a chance it could be retried or enqueued for later retry. 16949 * If we keep the DMA binding then mpxio cannot reissue the 16950 * command on another path whenever a path failure occurs. 16951 * 16952 * Note that when PKT_DMA_PARTIAL is used, free/reallocation 16953 * causes the *entire* transfer to start over again from the 16954 * beginning of the request, even for PARTIAL chunks that 16955 * have already transferred successfully. 16956 * 16957 * This is only done for non-uscsi commands (and also skipped for the 16958 * driver's internal RQS command). Also just do this for Fibre Channel 16959 * devices as these are the only ones that support mpxio. 16960 */ 16961 if ((un->un_f_is_fibre == TRUE) && 16962 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 16963 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 16964 scsi_dmafree(pktp); 16965 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 16966 } 16967 #endif 16968 16969 /* 16970 * The command did not successfully complete as requested so check 16971 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 16972 * driver command that should not be retried so just return. If 16973 * FLAG_DIAGNOSE is not set the error will be processed below. 16974 */ 16975 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 16976 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16977 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n"); 16978 /* 16979 * Issue a request sense if a check condition caused the error 16980 * (we handle the auto request sense case above), otherwise 16981 * just fail the command. 16982 */ 16983 if ((pktp->pkt_reason == CMD_CMPLT) && 16984 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { 16985 sd_send_request_sense_command(un, bp, pktp); 16986 } else { 16987 sd_return_failed_command(un, bp, EIO); 16988 } 16989 goto exit; 16990 } 16991 16992 /* 16993 * The command did not successfully complete as requested so process 16994 * the error, retry, and/or attempt recovery. 16995 */ 16996 switch (pktp->pkt_reason) { 16997 case CMD_CMPLT: 16998 switch (SD_GET_PKT_STATUS(pktp)) { 16999 case STATUS_GOOD: 17000 /* 17001 * The command completed successfully with a non-zero 17002 * residual 17003 */ 17004 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17005 "sdintr: STATUS_GOOD \n"); 17006 sd_pkt_status_good(un, bp, xp, pktp); 17007 break; 17008 17009 case STATUS_CHECK: 17010 case STATUS_TERMINATED: 17011 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17012 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n"); 17013 sd_pkt_status_check_condition(un, bp, xp, pktp); 17014 break; 17015 17016 case STATUS_BUSY: 17017 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17018 "sdintr: STATUS_BUSY\n"); 17019 sd_pkt_status_busy(un, bp, xp, pktp); 17020 break; 17021 17022 case STATUS_RESERVATION_CONFLICT: 17023 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17024 "sdintr: STATUS_RESERVATION_CONFLICT\n"); 17025 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 17026 break; 17027 17028 case STATUS_QFULL: 17029 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17030 "sdintr: STATUS_QFULL\n"); 17031 sd_pkt_status_qfull(un, bp, xp, pktp); 17032 break; 17033 17034 case STATUS_MET: 17035 case STATUS_INTERMEDIATE: 17036 case STATUS_SCSI2: 17037 case STATUS_INTERMEDIATE_MET: 17038 case STATUS_ACA_ACTIVE: 17039 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17040 "Unexpected SCSI status received: 0x%x\n", 17041 SD_GET_PKT_STATUS(pktp)); 17042 /* 17043 * Mark the ssc_flags when detected invalid status 17044 * code for non-USCSI command. 17045 */ 17046 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17047 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS, 17048 0, "stat-code"); 17049 } 17050 sd_return_failed_command(un, bp, EIO); 17051 break; 17052 17053 default: 17054 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17055 "Invalid SCSI status received: 0x%x\n", 17056 SD_GET_PKT_STATUS(pktp)); 17057 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17058 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS, 17059 0, "stat-code"); 17060 } 17061 sd_return_failed_command(un, bp, EIO); 17062 break; 17063 17064 } 17065 break; 17066 17067 case CMD_INCOMPLETE: 17068 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17069 "sdintr: CMD_INCOMPLETE\n"); 17070 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp); 17071 break; 17072 case CMD_TRAN_ERR: 17073 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17074 "sdintr: CMD_TRAN_ERR\n"); 17075 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp); 17076 break; 17077 case CMD_RESET: 17078 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17079 "sdintr: CMD_RESET \n"); 17080 sd_pkt_reason_cmd_reset(un, bp, xp, pktp); 17081 break; 17082 case CMD_ABORTED: 17083 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17084 "sdintr: CMD_ABORTED \n"); 17085 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp); 17086 break; 17087 case CMD_TIMEOUT: 17088 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17089 "sdintr: CMD_TIMEOUT\n"); 17090 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp); 17091 break; 17092 case CMD_UNX_BUS_FREE: 17093 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17094 "sdintr: CMD_UNX_BUS_FREE \n"); 17095 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp); 17096 break; 17097 case CMD_TAG_REJECT: 17098 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17099 "sdintr: CMD_TAG_REJECT\n"); 17100 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp); 17101 break; 17102 default: 17103 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 17104 "sdintr: default\n"); 17105 /* 17106 * Mark the ssc_flags for detecting invliad pkt_reason. 17107 */ 17108 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17109 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_PKT_REASON, 17110 0, "pkt-reason"); 17111 } 17112 sd_pkt_reason_default(un, bp, xp, pktp); 17113 break; 17114 } 17115 17116 exit: 17117 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n"); 17118 17119 /* Decrement counter to indicate that the callback routine is done. */ 17120 un->un_in_callback--; 17121 ASSERT(un->un_in_callback >= 0); 17122 17123 /* 17124 * At this point, the pkt has been dispatched, ie, it is either 17125 * being re-tried or has been returned to its caller and should 17126 * not be referenced. 17127 */ 17128 17129 mutex_exit(SD_MUTEX(un)); 17130 } 17131 17132 17133 /* 17134 * Function: sd_print_incomplete_msg 17135 * 17136 * Description: Prints the error message for a CMD_INCOMPLETE error. 17137 * 17138 * Arguments: un - ptr to associated softstate for the device. 17139 * bp - ptr to the buf(9S) for the command. 17140 * arg - message string ptr 17141 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED, 17142 * or SD_NO_RETRY_ISSUED. 17143 * 17144 * Context: May be called under interrupt context 17145 */ 17146 17147 static void 17148 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 17149 { 17150 struct scsi_pkt *pktp; 17151 char *msgp; 17152 char *cmdp = arg; 17153 17154 ASSERT(un != NULL); 17155 ASSERT(mutex_owned(SD_MUTEX(un))); 17156 ASSERT(bp != NULL); 17157 ASSERT(arg != NULL); 17158 pktp = SD_GET_PKTP(bp); 17159 ASSERT(pktp != NULL); 17160 17161 switch (code) { 17162 case SD_DELAYED_RETRY_ISSUED: 17163 case SD_IMMEDIATE_RETRY_ISSUED: 17164 msgp = "retrying"; 17165 break; 17166 case SD_NO_RETRY_ISSUED: 17167 default: 17168 msgp = "giving up"; 17169 break; 17170 } 17171 17172 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 17173 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17174 "incomplete %s- %s\n", cmdp, msgp); 17175 } 17176 } 17177 17178 17179 17180 /* 17181 * Function: sd_pkt_status_good 17182 * 17183 * Description: Processing for a STATUS_GOOD code in pkt_status. 17184 * 17185 * Context: May be called under interrupt context 17186 */ 17187 17188 static void 17189 sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 17190 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17191 { 17192 char *cmdp; 17193 17194 ASSERT(un != NULL); 17195 ASSERT(mutex_owned(SD_MUTEX(un))); 17196 ASSERT(bp != NULL); 17197 ASSERT(xp != NULL); 17198 ASSERT(pktp != NULL); 17199 ASSERT(pktp->pkt_reason == CMD_CMPLT); 17200 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD); 17201 ASSERT(pktp->pkt_resid != 0); 17202 17203 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n"); 17204 17205 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17206 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) { 17207 case SCMD_READ: 17208 cmdp = "read"; 17209 break; 17210 case SCMD_WRITE: 17211 cmdp = "write"; 17212 break; 17213 default: 17214 SD_UPDATE_B_RESID(bp, pktp); 17215 sd_return_command(un, bp); 17216 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 17217 return; 17218 } 17219 17220 /* 17221 * See if we can retry the read/write, preferrably immediately. 17222 * If retries are exhaused, then sd_retry_command() will update 17223 * the b_resid count. 17224 */ 17225 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg, 17226 cmdp, EIO, (clock_t)0, NULL); 17227 17228 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 17229 } 17230 17231 17232 17233 17234 17235 /* 17236 * Function: sd_handle_request_sense 17237 * 17238 * Description: Processing for non-auto Request Sense command. 17239 * 17240 * Arguments: un - ptr to associated softstate 17241 * sense_bp - ptr to buf(9S) for the RQS command 17242 * sense_xp - ptr to the sd_xbuf for the RQS command 17243 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command 17244 * 17245 * Context: May be called under interrupt context 17246 */ 17247 17248 static void 17249 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp, 17250 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp) 17251 { 17252 struct buf *cmd_bp; /* buf for the original command */ 17253 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */ 17254 struct scsi_pkt *cmd_pktp; /* pkt for the original command */ 17255 size_t actual_len; /* actual sense data length */ 17256 17257 ASSERT(un != NULL); 17258 ASSERT(mutex_owned(SD_MUTEX(un))); 17259 ASSERT(sense_bp != NULL); 17260 ASSERT(sense_xp != NULL); 17261 ASSERT(sense_pktp != NULL); 17262 17263 /* 17264 * Note the sense_bp, sense_xp, and sense_pktp here are for the 17265 * RQS command and not the original command. 17266 */ 17267 ASSERT(sense_pktp == un->un_rqs_pktp); 17268 ASSERT(sense_bp == un->un_rqs_bp); 17269 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) == 17270 (FLAG_SENSING | FLAG_HEAD)); 17271 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) & 17272 FLAG_SENSING) == FLAG_SENSING); 17273 17274 /* These are the bp, xp, and pktp for the original command */ 17275 cmd_bp = sense_xp->xb_sense_bp; 17276 cmd_xp = SD_GET_XBUF(cmd_bp); 17277 cmd_pktp = SD_GET_PKTP(cmd_bp); 17278 17279 if (sense_pktp->pkt_reason != CMD_CMPLT) { 17280 /* 17281 * The REQUEST SENSE command failed. Release the REQUEST 17282 * SENSE command for re-use, get back the bp for the original 17283 * command, and attempt to re-try the original command if 17284 * FLAG_DIAGNOSE is not set in the original packet. 17285 */ 17286 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17287 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 17288 cmd_bp = sd_mark_rqs_idle(un, sense_xp); 17289 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD, 17290 NULL, NULL, EIO, (clock_t)0, NULL); 17291 return; 17292 } 17293 } 17294 17295 /* 17296 * Save the relevant sense info into the xp for the original cmd. 17297 * 17298 * Note: if the request sense failed the state info will be zero 17299 * as set in sd_mark_rqs_busy() 17300 */ 17301 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp); 17302 cmd_xp->xb_sense_state = sense_pktp->pkt_state; 17303 actual_len = MAX_SENSE_LENGTH - sense_pktp->pkt_resid; 17304 if ((cmd_xp->xb_pkt_flags & SD_XB_USCSICMD) && 17305 (((struct uscsi_cmd *)cmd_xp->xb_pktinfo)->uscsi_rqlen > 17306 SENSE_LENGTH)) { 17307 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 17308 MAX_SENSE_LENGTH); 17309 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid; 17310 } else { 17311 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 17312 SENSE_LENGTH); 17313 if (actual_len < SENSE_LENGTH) { 17314 cmd_xp->xb_sense_resid = SENSE_LENGTH - actual_len; 17315 } else { 17316 cmd_xp->xb_sense_resid = 0; 17317 } 17318 } 17319 17320 /* 17321 * Free up the RQS command.... 17322 * NOTE: 17323 * Must do this BEFORE calling sd_validate_sense_data! 17324 * sd_validate_sense_data may return the original command in 17325 * which case the pkt will be freed and the flags can no 17326 * longer be touched. 17327 * SD_MUTEX is held through this process until the command 17328 * is dispatched based upon the sense data, so there are 17329 * no race conditions. 17330 */ 17331 (void) sd_mark_rqs_idle(un, sense_xp); 17332 17333 /* 17334 * For a retryable command see if we have valid sense data, if so then 17335 * turn it over to sd_decode_sense() to figure out the right course of 17336 * action. Just fail a non-retryable command. 17337 */ 17338 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 17339 if (sd_validate_sense_data(un, cmd_bp, cmd_xp, actual_len) == 17340 SD_SENSE_DATA_IS_VALID) { 17341 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp); 17342 } 17343 } else { 17344 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB", 17345 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 17346 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data", 17347 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 17348 sd_return_failed_command(un, cmd_bp, EIO); 17349 } 17350 } 17351 17352 17353 17354 17355 /* 17356 * Function: sd_handle_auto_request_sense 17357 * 17358 * Description: Processing for auto-request sense information. 17359 * 17360 * Arguments: un - ptr to associated softstate 17361 * bp - ptr to buf(9S) for the command 17362 * xp - ptr to the sd_xbuf for the command 17363 * pktp - ptr to the scsi_pkt(9S) for the command 17364 * 17365 * Context: May be called under interrupt context 17366 */ 17367 17368 static void 17369 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 17370 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17371 { 17372 struct scsi_arq_status *asp; 17373 size_t actual_len; 17374 17375 ASSERT(un != NULL); 17376 ASSERT(mutex_owned(SD_MUTEX(un))); 17377 ASSERT(bp != NULL); 17378 ASSERT(xp != NULL); 17379 ASSERT(pktp != NULL); 17380 ASSERT(pktp != un->un_rqs_pktp); 17381 ASSERT(bp != un->un_rqs_bp); 17382 17383 /* 17384 * For auto-request sense, we get a scsi_arq_status back from 17385 * the HBA, with the sense data in the sts_sensedata member. 17386 * The pkt_scbp of the packet points to this scsi_arq_status. 17387 */ 17388 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 17389 17390 if (asp->sts_rqpkt_reason != CMD_CMPLT) { 17391 /* 17392 * The auto REQUEST SENSE failed; see if we can re-try 17393 * the original command. 17394 */ 17395 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17396 "auto request sense failed (reason=%s)\n", 17397 scsi_rname(asp->sts_rqpkt_reason)); 17398 17399 sd_reset_target(un, pktp); 17400 17401 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 17402 NULL, NULL, EIO, (clock_t)0, NULL); 17403 return; 17404 } 17405 17406 /* Save the relevant sense info into the xp for the original cmd. */ 17407 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status))); 17408 xp->xb_sense_state = asp->sts_rqpkt_state; 17409 xp->xb_sense_resid = asp->sts_rqpkt_resid; 17410 if (xp->xb_sense_state & STATE_XARQ_DONE) { 17411 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 17412 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 17413 MAX_SENSE_LENGTH); 17414 } else { 17415 if (xp->xb_sense_resid > SENSE_LENGTH) { 17416 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 17417 } else { 17418 actual_len = SENSE_LENGTH - xp->xb_sense_resid; 17419 } 17420 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 17421 if ((((struct uscsi_cmd *) 17422 (xp->xb_pktinfo))->uscsi_rqlen) > actual_len) { 17423 xp->xb_sense_resid = (((struct uscsi_cmd *) 17424 (xp->xb_pktinfo))->uscsi_rqlen) - 17425 actual_len; 17426 } else { 17427 xp->xb_sense_resid = 0; 17428 } 17429 } 17430 bcopy(&asp->sts_sensedata, xp->xb_sense_data, SENSE_LENGTH); 17431 } 17432 17433 /* 17434 * See if we have valid sense data, if so then turn it over to 17435 * sd_decode_sense() to figure out the right course of action. 17436 */ 17437 if (sd_validate_sense_data(un, bp, xp, actual_len) == 17438 SD_SENSE_DATA_IS_VALID) { 17439 sd_decode_sense(un, bp, xp, pktp); 17440 } 17441 } 17442 17443 17444 /* 17445 * Function: sd_print_sense_failed_msg 17446 * 17447 * Description: Print log message when RQS has failed. 17448 * 17449 * Arguments: un - ptr to associated softstate 17450 * bp - ptr to buf(9S) for the command 17451 * arg - generic message string ptr 17452 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17453 * or SD_NO_RETRY_ISSUED 17454 * 17455 * Context: May be called from interrupt context 17456 */ 17457 17458 static void 17459 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg, 17460 int code) 17461 { 17462 char *msgp = arg; 17463 17464 ASSERT(un != NULL); 17465 ASSERT(mutex_owned(SD_MUTEX(un))); 17466 ASSERT(bp != NULL); 17467 17468 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) { 17469 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp); 17470 } 17471 } 17472 17473 17474 /* 17475 * Function: sd_validate_sense_data 17476 * 17477 * Description: Check the given sense data for validity. 17478 * If the sense data is not valid, the command will 17479 * be either failed or retried! 17480 * 17481 * Return Code: SD_SENSE_DATA_IS_INVALID 17482 * SD_SENSE_DATA_IS_VALID 17483 * 17484 * Context: May be called from interrupt context 17485 */ 17486 17487 static int 17488 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 17489 size_t actual_len) 17490 { 17491 struct scsi_extended_sense *esp; 17492 struct scsi_pkt *pktp; 17493 char *msgp = NULL; 17494 sd_ssc_t *sscp; 17495 17496 ASSERT(un != NULL); 17497 ASSERT(mutex_owned(SD_MUTEX(un))); 17498 ASSERT(bp != NULL); 17499 ASSERT(bp != un->un_rqs_bp); 17500 ASSERT(xp != NULL); 17501 ASSERT(un->un_fm_private != NULL); 17502 17503 pktp = SD_GET_PKTP(bp); 17504 ASSERT(pktp != NULL); 17505 17506 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc; 17507 ASSERT(sscp != NULL); 17508 17509 /* 17510 * Check the status of the RQS command (auto or manual). 17511 */ 17512 switch (xp->xb_sense_status & STATUS_MASK) { 17513 case STATUS_GOOD: 17514 break; 17515 17516 case STATUS_RESERVATION_CONFLICT: 17517 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 17518 return (SD_SENSE_DATA_IS_INVALID); 17519 17520 case STATUS_BUSY: 17521 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17522 "Busy Status on REQUEST SENSE\n"); 17523 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL, 17524 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); 17525 return (SD_SENSE_DATA_IS_INVALID); 17526 17527 case STATUS_QFULL: 17528 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17529 "QFULL Status on REQUEST SENSE\n"); 17530 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, 17531 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); 17532 return (SD_SENSE_DATA_IS_INVALID); 17533 17534 case STATUS_CHECK: 17535 case STATUS_TERMINATED: 17536 msgp = "Check Condition on REQUEST SENSE\n"; 17537 goto sense_failed; 17538 17539 default: 17540 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n"; 17541 goto sense_failed; 17542 } 17543 17544 /* 17545 * See if we got the minimum required amount of sense data. 17546 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes 17547 * or less. 17548 */ 17549 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) || 17550 (actual_len == 0)) { 17551 msgp = "Request Sense couldn't get sense data\n"; 17552 goto sense_failed; 17553 } 17554 17555 if (actual_len < SUN_MIN_SENSE_LENGTH) { 17556 msgp = "Not enough sense information\n"; 17557 /* Mark the ssc_flags for detecting invalid sense data */ 17558 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17559 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0, 17560 "sense-data"); 17561 } 17562 goto sense_failed; 17563 } 17564 17565 /* 17566 * We require the extended sense data 17567 */ 17568 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 17569 if (esp->es_class != CLASS_EXTENDED_SENSE) { 17570 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 17571 static char tmp[8]; 17572 static char buf[148]; 17573 char *p = (char *)(xp->xb_sense_data); 17574 int i; 17575 17576 mutex_enter(&sd_sense_mutex); 17577 (void) strcpy(buf, "undecodable sense information:"); 17578 for (i = 0; i < actual_len; i++) { 17579 (void) sprintf(tmp, " 0x%x", *(p++) & 0xff); 17580 (void) strcpy(&buf[strlen(buf)], tmp); 17581 } 17582 i = strlen(buf); 17583 (void) strcpy(&buf[i], "-(assumed fatal)\n"); 17584 17585 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP) { 17586 scsi_log(SD_DEVINFO(un), sd_label, 17587 CE_WARN, buf); 17588 } 17589 mutex_exit(&sd_sense_mutex); 17590 } 17591 17592 /* Mark the ssc_flags for detecting invalid sense data */ 17593 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17594 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0, 17595 "sense-data"); 17596 } 17597 17598 /* Note: Legacy behavior, fail the command with no retry */ 17599 sd_return_failed_command(un, bp, EIO); 17600 return (SD_SENSE_DATA_IS_INVALID); 17601 } 17602 17603 /* 17604 * Check that es_code is valid (es_class concatenated with es_code 17605 * make up the "response code" field. es_class will always be 7, so 17606 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the 17607 * format. 17608 */ 17609 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) && 17610 (esp->es_code != CODE_FMT_FIXED_DEFERRED) && 17611 (esp->es_code != CODE_FMT_DESCR_CURRENT) && 17612 (esp->es_code != CODE_FMT_DESCR_DEFERRED) && 17613 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) { 17614 /* Mark the ssc_flags for detecting invalid sense data */ 17615 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17616 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0, 17617 "sense-data"); 17618 } 17619 goto sense_failed; 17620 } 17621 17622 return (SD_SENSE_DATA_IS_VALID); 17623 17624 sense_failed: 17625 /* 17626 * If the request sense failed (for whatever reason), attempt 17627 * to retry the original command. 17628 */ 17629 #if defined(__x86) 17630 /* 17631 * SD_RETRY_DELAY is conditionally compile (#if fibre) in 17632 * sddef.h for Sparc platform, and x86 uses 1 binary 17633 * for both SCSI/FC. 17634 * The SD_RETRY_DELAY value need to be adjusted here 17635 * when SD_RETRY_DELAY change in sddef.h 17636 */ 17637 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 17638 sd_print_sense_failed_msg, msgp, EIO, 17639 un->un_f_is_fibre ? drv_usectohz(100000) : (clock_t)0, NULL); 17640 #else 17641 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 17642 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL); 17643 #endif 17644 17645 return (SD_SENSE_DATA_IS_INVALID); 17646 } 17647 17648 /* 17649 * Function: sd_decode_sense 17650 * 17651 * Description: Take recovery action(s) when SCSI Sense Data is received. 17652 * 17653 * Context: Interrupt context. 17654 */ 17655 17656 static void 17657 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 17658 struct scsi_pkt *pktp) 17659 { 17660 uint8_t sense_key; 17661 17662 ASSERT(un != NULL); 17663 ASSERT(mutex_owned(SD_MUTEX(un))); 17664 ASSERT(bp != NULL); 17665 ASSERT(bp != un->un_rqs_bp); 17666 ASSERT(xp != NULL); 17667 ASSERT(pktp != NULL); 17668 17669 sense_key = scsi_sense_key(xp->xb_sense_data); 17670 17671 switch (sense_key) { 17672 case KEY_NO_SENSE: 17673 sd_sense_key_no_sense(un, bp, xp, pktp); 17674 break; 17675 case KEY_RECOVERABLE_ERROR: 17676 sd_sense_key_recoverable_error(un, xp->xb_sense_data, 17677 bp, xp, pktp); 17678 break; 17679 case KEY_NOT_READY: 17680 sd_sense_key_not_ready(un, xp->xb_sense_data, 17681 bp, xp, pktp); 17682 break; 17683 case KEY_MEDIUM_ERROR: 17684 case KEY_HARDWARE_ERROR: 17685 sd_sense_key_medium_or_hardware_error(un, 17686 xp->xb_sense_data, bp, xp, pktp); 17687 break; 17688 case KEY_ILLEGAL_REQUEST: 17689 sd_sense_key_illegal_request(un, bp, xp, pktp); 17690 break; 17691 case KEY_UNIT_ATTENTION: 17692 sd_sense_key_unit_attention(un, xp->xb_sense_data, 17693 bp, xp, pktp); 17694 break; 17695 case KEY_WRITE_PROTECT: 17696 case KEY_VOLUME_OVERFLOW: 17697 case KEY_MISCOMPARE: 17698 sd_sense_key_fail_command(un, bp, xp, pktp); 17699 break; 17700 case KEY_BLANK_CHECK: 17701 sd_sense_key_blank_check(un, bp, xp, pktp); 17702 break; 17703 case KEY_ABORTED_COMMAND: 17704 sd_sense_key_aborted_command(un, bp, xp, pktp); 17705 break; 17706 case KEY_VENDOR_UNIQUE: 17707 case KEY_COPY_ABORTED: 17708 case KEY_EQUAL: 17709 case KEY_RESERVED: 17710 default: 17711 sd_sense_key_default(un, xp->xb_sense_data, 17712 bp, xp, pktp); 17713 break; 17714 } 17715 } 17716 17717 17718 /* 17719 * Function: sd_dump_memory 17720 * 17721 * Description: Debug logging routine to print the contents of a user provided 17722 * buffer. The output of the buffer is broken up into 256 byte 17723 * segments due to a size constraint of the scsi_log. 17724 * implementation. 17725 * 17726 * Arguments: un - ptr to softstate 17727 * comp - component mask 17728 * title - "title" string to preceed data when printed 17729 * data - ptr to data block to be printed 17730 * len - size of data block to be printed 17731 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c) 17732 * 17733 * Context: May be called from interrupt context 17734 */ 17735 17736 #define SD_DUMP_MEMORY_BUF_SIZE 256 17737 17738 static char *sd_dump_format_string[] = { 17739 " 0x%02x", 17740 " %c" 17741 }; 17742 17743 static void 17744 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data, 17745 int len, int fmt) 17746 { 17747 int i, j; 17748 int avail_count; 17749 int start_offset; 17750 int end_offset; 17751 size_t entry_len; 17752 char *bufp; 17753 char *local_buf; 17754 char *format_string; 17755 17756 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR)); 17757 17758 /* 17759 * In the debug version of the driver, this function is called from a 17760 * number of places which are NOPs in the release driver. 17761 * The debug driver therefore has additional methods of filtering 17762 * debug output. 17763 */ 17764 #ifdef SDDEBUG 17765 /* 17766 * In the debug version of the driver we can reduce the amount of debug 17767 * messages by setting sd_error_level to something other than 17768 * SCSI_ERR_ALL and clearing bits in sd_level_mask and 17769 * sd_component_mask. 17770 */ 17771 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) || 17772 (sd_error_level != SCSI_ERR_ALL)) { 17773 return; 17774 } 17775 if (((sd_component_mask & comp) == 0) || 17776 (sd_error_level != SCSI_ERR_ALL)) { 17777 return; 17778 } 17779 #else 17780 if (sd_error_level != SCSI_ERR_ALL) { 17781 return; 17782 } 17783 #endif 17784 17785 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP); 17786 bufp = local_buf; 17787 /* 17788 * Available length is the length of local_buf[], minus the 17789 * length of the title string, minus one for the ":", minus 17790 * one for the newline, minus one for the NULL terminator. 17791 * This gives the #bytes available for holding the printed 17792 * values from the given data buffer. 17793 */ 17794 if (fmt == SD_LOG_HEX) { 17795 format_string = sd_dump_format_string[0]; 17796 } else /* SD_LOG_CHAR */ { 17797 format_string = sd_dump_format_string[1]; 17798 } 17799 /* 17800 * Available count is the number of elements from the given 17801 * data buffer that we can fit into the available length. 17802 * This is based upon the size of the format string used. 17803 * Make one entry and find it's size. 17804 */ 17805 (void) sprintf(bufp, format_string, data[0]); 17806 entry_len = strlen(bufp); 17807 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len; 17808 17809 j = 0; 17810 while (j < len) { 17811 bufp = local_buf; 17812 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE); 17813 start_offset = j; 17814 17815 end_offset = start_offset + avail_count; 17816 17817 (void) sprintf(bufp, "%s:", title); 17818 bufp += strlen(bufp); 17819 for (i = start_offset; ((i < end_offset) && (j < len)); 17820 i++, j++) { 17821 (void) sprintf(bufp, format_string, data[i]); 17822 bufp += entry_len; 17823 } 17824 (void) sprintf(bufp, "\n"); 17825 17826 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf); 17827 } 17828 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE); 17829 } 17830 17831 /* 17832 * Function: sd_print_sense_msg 17833 * 17834 * Description: Log a message based upon the given sense data. 17835 * 17836 * Arguments: un - ptr to associated softstate 17837 * bp - ptr to buf(9S) for the command 17838 * arg - ptr to associate sd_sense_info struct 17839 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17840 * or SD_NO_RETRY_ISSUED 17841 * 17842 * Context: May be called from interrupt context 17843 */ 17844 17845 static void 17846 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 17847 { 17848 struct sd_xbuf *xp; 17849 struct scsi_pkt *pktp; 17850 uint8_t *sensep; 17851 daddr_t request_blkno; 17852 diskaddr_t err_blkno; 17853 int severity; 17854 int pfa_flag; 17855 extern struct scsi_key_strings scsi_cmds[]; 17856 17857 ASSERT(un != NULL); 17858 ASSERT(mutex_owned(SD_MUTEX(un))); 17859 ASSERT(bp != NULL); 17860 xp = SD_GET_XBUF(bp); 17861 ASSERT(xp != NULL); 17862 pktp = SD_GET_PKTP(bp); 17863 ASSERT(pktp != NULL); 17864 ASSERT(arg != NULL); 17865 17866 severity = ((struct sd_sense_info *)(arg))->ssi_severity; 17867 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag; 17868 17869 if ((code == SD_DELAYED_RETRY_ISSUED) || 17870 (code == SD_IMMEDIATE_RETRY_ISSUED)) { 17871 severity = SCSI_ERR_RETRYABLE; 17872 } 17873 17874 /* Use absolute block number for the request block number */ 17875 request_blkno = xp->xb_blkno; 17876 17877 /* 17878 * Now try to get the error block number from the sense data 17879 */ 17880 sensep = xp->xb_sense_data; 17881 17882 if (scsi_sense_info_uint64(sensep, SENSE_LENGTH, 17883 (uint64_t *)&err_blkno)) { 17884 /* 17885 * We retrieved the error block number from the information 17886 * portion of the sense data. 17887 * 17888 * For USCSI commands we are better off using the error 17889 * block no. as the requested block no. (This is the best 17890 * we can estimate.) 17891 */ 17892 if ((SD_IS_BUFIO(xp) == FALSE) && 17893 ((pktp->pkt_flags & FLAG_SILENT) == 0)) { 17894 request_blkno = err_blkno; 17895 } 17896 } else { 17897 /* 17898 * Without the es_valid bit set (for fixed format) or an 17899 * information descriptor (for descriptor format) we cannot 17900 * be certain of the error blkno, so just use the 17901 * request_blkno. 17902 */ 17903 err_blkno = (diskaddr_t)request_blkno; 17904 } 17905 17906 /* 17907 * The following will log the buffer contents for the release driver 17908 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error 17909 * level is set to verbose. 17910 */ 17911 sd_dump_memory(un, SD_LOG_IO, "Failed CDB", 17912 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 17913 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 17914 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX); 17915 17916 if (pfa_flag == FALSE) { 17917 /* This is normally only set for USCSI */ 17918 if ((pktp->pkt_flags & FLAG_SILENT) != 0) { 17919 return; 17920 } 17921 17922 if ((SD_IS_BUFIO(xp) == TRUE) && 17923 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) && 17924 (severity < sd_error_level))) { 17925 return; 17926 } 17927 } 17928 /* 17929 * Check for Sonoma Failover and keep a count of how many failed I/O's 17930 */ 17931 if ((SD_IS_LSI(un)) && 17932 (scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) && 17933 (scsi_sense_asc(sensep) == 0x94) && 17934 (scsi_sense_ascq(sensep) == 0x01)) { 17935 un->un_sonoma_failure_count++; 17936 if (un->un_sonoma_failure_count > 1) { 17937 return; 17938 } 17939 } 17940 17941 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP || 17942 ((scsi_sense_key(sensep) == KEY_RECOVERABLE_ERROR) && 17943 (pktp->pkt_resid == 0))) { 17944 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity, 17945 request_blkno, err_blkno, scsi_cmds, 17946 (struct scsi_extended_sense *)sensep, 17947 un->un_additional_codes, NULL); 17948 } 17949 } 17950 17951 /* 17952 * Function: sd_sense_key_no_sense 17953 * 17954 * Description: Recovery action when sense data was not received. 17955 * 17956 * Context: May be called from interrupt context 17957 */ 17958 17959 static void 17960 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 17961 struct scsi_pkt *pktp) 17962 { 17963 struct sd_sense_info si; 17964 17965 ASSERT(un != NULL); 17966 ASSERT(mutex_owned(SD_MUTEX(un))); 17967 ASSERT(bp != NULL); 17968 ASSERT(xp != NULL); 17969 ASSERT(pktp != NULL); 17970 17971 si.ssi_severity = SCSI_ERR_FATAL; 17972 si.ssi_pfa_flag = FALSE; 17973 17974 SD_UPDATE_ERRSTATS(un, sd_softerrs); 17975 17976 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17977 &si, EIO, (clock_t)0, NULL); 17978 } 17979 17980 17981 /* 17982 * Function: sd_sense_key_recoverable_error 17983 * 17984 * Description: Recovery actions for a SCSI "Recovered Error" sense key. 17985 * 17986 * Context: May be called from interrupt context 17987 */ 17988 17989 static void 17990 sd_sense_key_recoverable_error(struct sd_lun *un, uint8_t *sense_datap, 17991 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17992 { 17993 struct sd_sense_info si; 17994 uint8_t asc = scsi_sense_asc(sense_datap); 17995 uint8_t ascq = scsi_sense_ascq(sense_datap); 17996 17997 ASSERT(un != NULL); 17998 ASSERT(mutex_owned(SD_MUTEX(un))); 17999 ASSERT(bp != NULL); 18000 ASSERT(xp != NULL); 18001 ASSERT(pktp != NULL); 18002 18003 /* 18004 * 0x00, 0x1D: ATA PASSTHROUGH INFORMATION AVAILABLE 18005 */ 18006 if (asc == 0x00 && ascq == 0x1D) { 18007 sd_return_command(un, bp); 18008 return; 18009 } 18010 18011 /* 18012 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED 18013 */ 18014 if ((asc == 0x5D) && (sd_report_pfa != 0)) { 18015 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 18016 si.ssi_severity = SCSI_ERR_INFO; 18017 si.ssi_pfa_flag = TRUE; 18018 } else { 18019 SD_UPDATE_ERRSTATS(un, sd_softerrs); 18020 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err); 18021 si.ssi_severity = SCSI_ERR_RECOVERED; 18022 si.ssi_pfa_flag = FALSE; 18023 } 18024 18025 if (pktp->pkt_resid == 0) { 18026 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18027 sd_return_command(un, bp); 18028 return; 18029 } 18030 18031 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18032 &si, EIO, (clock_t)0, NULL); 18033 } 18034 18035 18036 18037 18038 /* 18039 * Function: sd_sense_key_not_ready 18040 * 18041 * Description: Recovery actions for a SCSI "Not Ready" sense key. 18042 * 18043 * Context: May be called from interrupt context 18044 */ 18045 18046 static void 18047 sd_sense_key_not_ready(struct sd_lun *un, uint8_t *sense_datap, struct buf *bp, 18048 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18049 { 18050 struct sd_sense_info si; 18051 uint8_t asc = scsi_sense_asc(sense_datap); 18052 uint8_t ascq = scsi_sense_ascq(sense_datap); 18053 18054 ASSERT(un != NULL); 18055 ASSERT(mutex_owned(SD_MUTEX(un))); 18056 ASSERT(bp != NULL); 18057 ASSERT(xp != NULL); 18058 ASSERT(pktp != NULL); 18059 18060 si.ssi_severity = SCSI_ERR_FATAL; 18061 si.ssi_pfa_flag = FALSE; 18062 18063 /* 18064 * Update error stats after first NOT READY error. Disks may have 18065 * been powered down and may need to be restarted. For CDROMs, 18066 * report NOT READY errors only if media is present. 18067 */ 18068 if ((ISCD(un) && (asc == 0x3A)) || 18069 (xp->xb_nr_retry_count > 0)) { 18070 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18071 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err); 18072 } 18073 18074 /* 18075 * Just fail if the "not ready" retry limit has been reached. 18076 */ 18077 if (xp->xb_nr_retry_count >= un->un_notready_retry_count) { 18078 /* Special check for error message printing for removables. */ 18079 if (un->un_f_has_removable_media && (asc == 0x04) && 18080 (ascq >= 0x04)) { 18081 si.ssi_severity = SCSI_ERR_ALL; 18082 } 18083 goto fail_command; 18084 } 18085 18086 /* 18087 * Check the ASC and ASCQ in the sense data as needed, to determine 18088 * what to do. 18089 */ 18090 switch (asc) { 18091 case 0x04: /* LOGICAL UNIT NOT READY */ 18092 /* 18093 * disk drives that don't spin up result in a very long delay 18094 * in format without warning messages. We will log a message 18095 * if the error level is set to verbose. 18096 */ 18097 if (sd_error_level < SCSI_ERR_RETRYABLE) { 18098 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18099 "logical unit not ready, resetting disk\n"); 18100 } 18101 18102 /* 18103 * There are different requirements for CDROMs and disks for 18104 * the number of retries. If a CD-ROM is giving this, it is 18105 * probably reading TOC and is in the process of getting 18106 * ready, so we should keep on trying for a long time to make 18107 * sure that all types of media are taken in account (for 18108 * some media the drive takes a long time to read TOC). For 18109 * disks we do not want to retry this too many times as this 18110 * can cause a long hang in format when the drive refuses to 18111 * spin up (a very common failure). 18112 */ 18113 switch (ascq) { 18114 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */ 18115 /* 18116 * Disk drives frequently refuse to spin up which 18117 * results in a very long hang in format without 18118 * warning messages. 18119 * 18120 * Note: This code preserves the legacy behavior of 18121 * comparing xb_nr_retry_count against zero for fibre 18122 * channel targets instead of comparing against the 18123 * un_reset_retry_count value. The reason for this 18124 * discrepancy has been so utterly lost beneath the 18125 * Sands of Time that even Indiana Jones could not 18126 * find it. 18127 */ 18128 if (un->un_f_is_fibre == TRUE) { 18129 if (((sd_level_mask & SD_LOGMASK_DIAG) || 18130 (xp->xb_nr_retry_count > 0)) && 18131 (un->un_startstop_timeid == NULL)) { 18132 scsi_log(SD_DEVINFO(un), sd_label, 18133 CE_WARN, "logical unit not ready, " 18134 "resetting disk\n"); 18135 sd_reset_target(un, pktp); 18136 } 18137 } else { 18138 if (((sd_level_mask & SD_LOGMASK_DIAG) || 18139 (xp->xb_nr_retry_count > 18140 un->un_reset_retry_count)) && 18141 (un->un_startstop_timeid == NULL)) { 18142 scsi_log(SD_DEVINFO(un), sd_label, 18143 CE_WARN, "logical unit not ready, " 18144 "resetting disk\n"); 18145 sd_reset_target(un, pktp); 18146 } 18147 } 18148 break; 18149 18150 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */ 18151 /* 18152 * If the target is in the process of becoming 18153 * ready, just proceed with the retry. This can 18154 * happen with CD-ROMs that take a long time to 18155 * read TOC after a power cycle or reset. 18156 */ 18157 goto do_retry; 18158 18159 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */ 18160 break; 18161 18162 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */ 18163 /* 18164 * Retries cannot help here so just fail right away. 18165 */ 18166 goto fail_command; 18167 18168 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */ 18169 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */ 18170 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */ 18171 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */ 18172 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */ 18173 default: /* Possible future codes in SCSI spec? */ 18174 /* 18175 * For removable-media devices, do not retry if 18176 * ASCQ > 2 as these result mostly from USCSI commands 18177 * on MMC devices issued to check status of an 18178 * operation initiated in immediate mode. Also for 18179 * ASCQ >= 4 do not print console messages as these 18180 * mainly represent a user-initiated operation 18181 * instead of a system failure. 18182 */ 18183 if (un->un_f_has_removable_media) { 18184 si.ssi_severity = SCSI_ERR_ALL; 18185 goto fail_command; 18186 } 18187 break; 18188 } 18189 18190 /* 18191 * As part of our recovery attempt for the NOT READY 18192 * condition, we issue a START STOP UNIT command. However 18193 * we want to wait for a short delay before attempting this 18194 * as there may still be more commands coming back from the 18195 * target with the check condition. To do this we use 18196 * timeout(9F) to call sd_start_stop_unit_callback() after 18197 * the delay interval expires. (sd_start_stop_unit_callback() 18198 * dispatches sd_start_stop_unit_task(), which will issue 18199 * the actual START STOP UNIT command. The delay interval 18200 * is one-half of the delay that we will use to retry the 18201 * command that generated the NOT READY condition. 18202 * 18203 * Note that we could just dispatch sd_start_stop_unit_task() 18204 * from here and allow it to sleep for the delay interval, 18205 * but then we would be tying up the taskq thread 18206 * uncesessarily for the duration of the delay. 18207 * 18208 * Do not issue the START STOP UNIT if the current command 18209 * is already a START STOP UNIT. 18210 */ 18211 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) { 18212 break; 18213 } 18214 18215 /* 18216 * Do not schedule the timeout if one is already pending. 18217 */ 18218 if (un->un_startstop_timeid != NULL) { 18219 SD_INFO(SD_LOG_ERROR, un, 18220 "sd_sense_key_not_ready: restart already issued to" 18221 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)), 18222 ddi_get_instance(SD_DEVINFO(un))); 18223 break; 18224 } 18225 18226 /* 18227 * Schedule the START STOP UNIT command, then queue the command 18228 * for a retry. 18229 * 18230 * Note: A timeout is not scheduled for this retry because we 18231 * want the retry to be serial with the START_STOP_UNIT. The 18232 * retry will be started when the START_STOP_UNIT is completed 18233 * in sd_start_stop_unit_task. 18234 */ 18235 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback, 18236 un, un->un_busy_timeout / 2); 18237 xp->xb_nr_retry_count++; 18238 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter); 18239 return; 18240 18241 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */ 18242 if (sd_error_level < SCSI_ERR_RETRYABLE) { 18243 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18244 "unit does not respond to selection\n"); 18245 } 18246 break; 18247 18248 case 0x3A: /* MEDIUM NOT PRESENT */ 18249 if (sd_error_level >= SCSI_ERR_FATAL) { 18250 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18251 "Caddy not inserted in drive\n"); 18252 } 18253 18254 sr_ejected(un); 18255 un->un_mediastate = DKIO_EJECTED; 18256 /* The state has changed, inform the media watch routines */ 18257 cv_broadcast(&un->un_state_cv); 18258 /* Just fail if no media is present in the drive. */ 18259 goto fail_command; 18260 18261 default: 18262 if (sd_error_level < SCSI_ERR_RETRYABLE) { 18263 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 18264 "Unit not Ready. Additional sense code 0x%x\n", 18265 asc); 18266 } 18267 break; 18268 } 18269 18270 do_retry: 18271 18272 /* 18273 * Retry the command, as some targets may report NOT READY for 18274 * several seconds after being reset. 18275 */ 18276 xp->xb_nr_retry_count++; 18277 si.ssi_severity = SCSI_ERR_RETRYABLE; 18278 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 18279 &si, EIO, un->un_busy_timeout, NULL); 18280 18281 return; 18282 18283 fail_command: 18284 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18285 sd_return_failed_command(un, bp, EIO); 18286 } 18287 18288 18289 18290 /* 18291 * Function: sd_sense_key_medium_or_hardware_error 18292 * 18293 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error" 18294 * sense key. 18295 * 18296 * Context: May be called from interrupt context 18297 */ 18298 18299 static void 18300 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, uint8_t *sense_datap, 18301 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 18302 { 18303 struct sd_sense_info si; 18304 uint8_t sense_key = scsi_sense_key(sense_datap); 18305 uint8_t asc = scsi_sense_asc(sense_datap); 18306 18307 ASSERT(un != NULL); 18308 ASSERT(mutex_owned(SD_MUTEX(un))); 18309 ASSERT(bp != NULL); 18310 ASSERT(xp != NULL); 18311 ASSERT(pktp != NULL); 18312 18313 si.ssi_severity = SCSI_ERR_FATAL; 18314 si.ssi_pfa_flag = FALSE; 18315 18316 if (sense_key == KEY_MEDIUM_ERROR) { 18317 SD_UPDATE_ERRSTATS(un, sd_rq_media_err); 18318 } 18319 18320 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18321 18322 if ((un->un_reset_retry_count != 0) && 18323 (xp->xb_retry_count == un->un_reset_retry_count)) { 18324 mutex_exit(SD_MUTEX(un)); 18325 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */ 18326 if (un->un_f_allow_bus_device_reset == TRUE) { 18327 18328 boolean_t try_resetting_target = B_TRUE; 18329 18330 /* 18331 * We need to be able to handle specific ASC when we are 18332 * handling a KEY_HARDWARE_ERROR. In particular 18333 * taking the default action of resetting the target may 18334 * not be the appropriate way to attempt recovery. 18335 * Resetting a target because of a single LUN failure 18336 * victimizes all LUNs on that target. 18337 * 18338 * This is true for the LSI arrays, if an LSI 18339 * array controller returns an ASC of 0x84 (LUN Dead) we 18340 * should trust it. 18341 */ 18342 18343 if (sense_key == KEY_HARDWARE_ERROR) { 18344 switch (asc) { 18345 case 0x84: 18346 if (SD_IS_LSI(un)) { 18347 try_resetting_target = B_FALSE; 18348 } 18349 break; 18350 default: 18351 break; 18352 } 18353 } 18354 18355 if (try_resetting_target == B_TRUE) { 18356 int reset_retval = 0; 18357 if (un->un_f_lun_reset_enabled == TRUE) { 18358 SD_TRACE(SD_LOG_IO_CORE, un, 18359 "sd_sense_key_medium_or_hardware_" 18360 "error: issuing RESET_LUN\n"); 18361 reset_retval = 18362 scsi_reset(SD_ADDRESS(un), 18363 RESET_LUN); 18364 } 18365 if (reset_retval == 0) { 18366 SD_TRACE(SD_LOG_IO_CORE, un, 18367 "sd_sense_key_medium_or_hardware_" 18368 "error: issuing RESET_TARGET\n"); 18369 (void) scsi_reset(SD_ADDRESS(un), 18370 RESET_TARGET); 18371 } 18372 } 18373 } 18374 mutex_enter(SD_MUTEX(un)); 18375 } 18376 18377 /* 18378 * This really ought to be a fatal error, but we will retry anyway 18379 * as some drives report this as a spurious error. 18380 */ 18381 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18382 &si, EIO, (clock_t)0, NULL); 18383 } 18384 18385 18386 18387 /* 18388 * Function: sd_sense_key_illegal_request 18389 * 18390 * Description: Recovery actions for a SCSI "Illegal Request" sense key. 18391 * 18392 * Context: May be called from interrupt context 18393 */ 18394 18395 static void 18396 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 18397 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18398 { 18399 struct sd_sense_info si; 18400 18401 ASSERT(un != NULL); 18402 ASSERT(mutex_owned(SD_MUTEX(un))); 18403 ASSERT(bp != NULL); 18404 ASSERT(xp != NULL); 18405 ASSERT(pktp != NULL); 18406 18407 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err); 18408 18409 si.ssi_severity = SCSI_ERR_INFO; 18410 si.ssi_pfa_flag = FALSE; 18411 18412 /* Pointless to retry if the target thinks it's an illegal request */ 18413 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18414 sd_return_failed_command(un, bp, EIO); 18415 } 18416 18417 18418 18419 18420 /* 18421 * Function: sd_sense_key_unit_attention 18422 * 18423 * Description: Recovery actions for a SCSI "Unit Attention" sense key. 18424 * 18425 * Context: May be called from interrupt context 18426 */ 18427 18428 static void 18429 sd_sense_key_unit_attention(struct sd_lun *un, uint8_t *sense_datap, 18430 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 18431 { 18432 /* 18433 * For UNIT ATTENTION we allow retries for one minute. Devices 18434 * like Sonoma can return UNIT ATTENTION close to a minute 18435 * under certain conditions. 18436 */ 18437 int retry_check_flag = SD_RETRIES_UA; 18438 boolean_t kstat_updated = B_FALSE; 18439 struct sd_sense_info si; 18440 uint8_t asc = scsi_sense_asc(sense_datap); 18441 uint8_t ascq = scsi_sense_ascq(sense_datap); 18442 18443 ASSERT(un != NULL); 18444 ASSERT(mutex_owned(SD_MUTEX(un))); 18445 ASSERT(bp != NULL); 18446 ASSERT(xp != NULL); 18447 ASSERT(pktp != NULL); 18448 18449 si.ssi_severity = SCSI_ERR_INFO; 18450 si.ssi_pfa_flag = FALSE; 18451 18452 18453 switch (asc) { 18454 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */ 18455 if (sd_report_pfa != 0) { 18456 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 18457 si.ssi_pfa_flag = TRUE; 18458 retry_check_flag = SD_RETRIES_STANDARD; 18459 goto do_retry; 18460 } 18461 18462 break; 18463 18464 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */ 18465 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 18466 un->un_resvd_status |= 18467 (SD_LOST_RESERVE | SD_WANT_RESERVE); 18468 } 18469 #ifdef _LP64 18470 if (un->un_blockcount + 1 > SD_GROUP1_MAX_ADDRESS) { 18471 if (taskq_dispatch(sd_tq, sd_reenable_dsense_task, 18472 un, KM_NOSLEEP) == TASKQID_INVALID) { 18473 /* 18474 * If we can't dispatch the task we'll just 18475 * live without descriptor sense. We can 18476 * try again on the next "unit attention" 18477 */ 18478 SD_ERROR(SD_LOG_ERROR, un, 18479 "sd_sense_key_unit_attention: " 18480 "Could not dispatch " 18481 "sd_reenable_dsense_task\n"); 18482 } 18483 } 18484 #endif /* _LP64 */ 18485 /* FALLTHRU */ 18486 18487 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */ 18488 if (!un->un_f_has_removable_media) { 18489 break; 18490 } 18491 18492 /* 18493 * When we get a unit attention from a removable-media device, 18494 * it may be in a state that will take a long time to recover 18495 * (e.g., from a reset). Since we are executing in interrupt 18496 * context here, we cannot wait around for the device to come 18497 * back. So hand this command off to sd_media_change_task() 18498 * for deferred processing under taskq thread context. (Note 18499 * that the command still may be failed if a problem is 18500 * encountered at a later time.) 18501 */ 18502 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp, 18503 KM_NOSLEEP) == TASKQID_INVALID) { 18504 /* 18505 * Cannot dispatch the request so fail the command. 18506 */ 18507 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18508 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 18509 si.ssi_severity = SCSI_ERR_FATAL; 18510 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18511 sd_return_failed_command(un, bp, EIO); 18512 } 18513 18514 /* 18515 * If failed to dispatch sd_media_change_task(), we already 18516 * updated kstat. If succeed to dispatch sd_media_change_task(), 18517 * we should update kstat later if it encounters an error. So, 18518 * we update kstat_updated flag here. 18519 */ 18520 kstat_updated = B_TRUE; 18521 18522 /* 18523 * Either the command has been successfully dispatched to a 18524 * task Q for retrying, or the dispatch failed. In either case 18525 * do NOT retry again by calling sd_retry_command. This sets up 18526 * two retries of the same command and when one completes and 18527 * frees the resources the other will access freed memory, 18528 * a bad thing. 18529 */ 18530 return; 18531 18532 default: 18533 break; 18534 } 18535 18536 /* 18537 * ASC ASCQ 18538 * 2A 09 Capacity data has changed 18539 * 2A 01 Mode parameters changed 18540 * 3F 0E Reported luns data has changed 18541 * Arrays that support logical unit expansion should report 18542 * capacity changes(2Ah/09). Mode parameters changed and 18543 * reported luns data has changed are the approximation. 18544 */ 18545 if (((asc == 0x2a) && (ascq == 0x09)) || 18546 ((asc == 0x2a) && (ascq == 0x01)) || 18547 ((asc == 0x3f) && (ascq == 0x0e))) { 18548 if (taskq_dispatch(sd_tq, sd_target_change_task, un, 18549 KM_NOSLEEP) == TASKQID_INVALID) { 18550 SD_ERROR(SD_LOG_ERROR, un, 18551 "sd_sense_key_unit_attention: " 18552 "Could not dispatch sd_target_change_task\n"); 18553 } 18554 } 18555 18556 /* 18557 * Update kstat if we haven't done that. 18558 */ 18559 if (!kstat_updated) { 18560 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18561 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 18562 } 18563 18564 do_retry: 18565 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si, 18566 EIO, SD_UA_RETRY_DELAY, NULL); 18567 } 18568 18569 18570 18571 /* 18572 * Function: sd_sense_key_fail_command 18573 * 18574 * Description: Use to fail a command when we don't like the sense key that 18575 * was returned. 18576 * 18577 * Context: May be called from interrupt context 18578 */ 18579 18580 static void 18581 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 18582 struct scsi_pkt *pktp) 18583 { 18584 struct sd_sense_info si; 18585 18586 ASSERT(un != NULL); 18587 ASSERT(mutex_owned(SD_MUTEX(un))); 18588 ASSERT(bp != NULL); 18589 ASSERT(xp != NULL); 18590 ASSERT(pktp != NULL); 18591 18592 si.ssi_severity = SCSI_ERR_FATAL; 18593 si.ssi_pfa_flag = FALSE; 18594 18595 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18596 sd_return_failed_command(un, bp, EIO); 18597 } 18598 18599 18600 18601 /* 18602 * Function: sd_sense_key_blank_check 18603 * 18604 * Description: Recovery actions for a SCSI "Blank Check" sense key. 18605 * Has no monetary connotation. 18606 * 18607 * Context: May be called from interrupt context 18608 */ 18609 18610 static void 18611 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 18612 struct scsi_pkt *pktp) 18613 { 18614 struct sd_sense_info si; 18615 18616 ASSERT(un != NULL); 18617 ASSERT(mutex_owned(SD_MUTEX(un))); 18618 ASSERT(bp != NULL); 18619 ASSERT(xp != NULL); 18620 ASSERT(pktp != NULL); 18621 18622 /* 18623 * Blank check is not fatal for removable devices, therefore 18624 * it does not require a console message. 18625 */ 18626 si.ssi_severity = (un->un_f_has_removable_media) ? SCSI_ERR_ALL : 18627 SCSI_ERR_FATAL; 18628 si.ssi_pfa_flag = FALSE; 18629 18630 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18631 sd_return_failed_command(un, bp, EIO); 18632 } 18633 18634 18635 18636 18637 /* 18638 * Function: sd_sense_key_aborted_command 18639 * 18640 * Description: Recovery actions for a SCSI "Aborted Command" sense key. 18641 * 18642 * Context: May be called from interrupt context 18643 */ 18644 18645 static void 18646 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 18647 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18648 { 18649 struct sd_sense_info si; 18650 18651 ASSERT(un != NULL); 18652 ASSERT(mutex_owned(SD_MUTEX(un))); 18653 ASSERT(bp != NULL); 18654 ASSERT(xp != NULL); 18655 ASSERT(pktp != NULL); 18656 18657 si.ssi_severity = SCSI_ERR_FATAL; 18658 si.ssi_pfa_flag = FALSE; 18659 18660 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18661 18662 /* 18663 * This really ought to be a fatal error, but we will retry anyway 18664 * as some drives report this as a spurious error. 18665 */ 18666 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18667 &si, EIO, drv_usectohz(100000), NULL); 18668 } 18669 18670 18671 18672 /* 18673 * Function: sd_sense_key_default 18674 * 18675 * Description: Default recovery action for several SCSI sense keys (basically 18676 * attempts a retry). 18677 * 18678 * Context: May be called from interrupt context 18679 */ 18680 18681 static void 18682 sd_sense_key_default(struct sd_lun *un, uint8_t *sense_datap, struct buf *bp, 18683 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18684 { 18685 struct sd_sense_info si; 18686 uint8_t sense_key = scsi_sense_key(sense_datap); 18687 18688 ASSERT(un != NULL); 18689 ASSERT(mutex_owned(SD_MUTEX(un))); 18690 ASSERT(bp != NULL); 18691 ASSERT(xp != NULL); 18692 ASSERT(pktp != NULL); 18693 18694 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18695 18696 /* 18697 * Undecoded sense key. Attempt retries and hope that will fix 18698 * the problem. Otherwise, we're dead. 18699 */ 18700 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 18701 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18702 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]); 18703 } 18704 18705 si.ssi_severity = SCSI_ERR_FATAL; 18706 si.ssi_pfa_flag = FALSE; 18707 18708 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18709 &si, EIO, (clock_t)0, NULL); 18710 } 18711 18712 18713 18714 /* 18715 * Function: sd_print_retry_msg 18716 * 18717 * Description: Print a message indicating the retry action being taken. 18718 * 18719 * Arguments: un - ptr to associated softstate 18720 * bp - ptr to buf(9S) for the command 18721 * arg - not used. 18722 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 18723 * or SD_NO_RETRY_ISSUED 18724 * 18725 * Context: May be called from interrupt context 18726 */ 18727 /* ARGSUSED */ 18728 static void 18729 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag) 18730 { 18731 struct sd_xbuf *xp; 18732 struct scsi_pkt *pktp; 18733 char *reasonp; 18734 char *msgp; 18735 18736 ASSERT(un != NULL); 18737 ASSERT(mutex_owned(SD_MUTEX(un))); 18738 ASSERT(bp != NULL); 18739 pktp = SD_GET_PKTP(bp); 18740 ASSERT(pktp != NULL); 18741 xp = SD_GET_XBUF(bp); 18742 ASSERT(xp != NULL); 18743 18744 ASSERT(!mutex_owned(&un->un_pm_mutex)); 18745 mutex_enter(&un->un_pm_mutex); 18746 if ((un->un_state == SD_STATE_SUSPENDED) || 18747 (SD_DEVICE_IS_IN_LOW_POWER(un)) || 18748 (pktp->pkt_flags & FLAG_SILENT)) { 18749 mutex_exit(&un->un_pm_mutex); 18750 goto update_pkt_reason; 18751 } 18752 mutex_exit(&un->un_pm_mutex); 18753 18754 /* 18755 * Suppress messages if they are all the same pkt_reason; with 18756 * TQ, many (up to 256) are returned with the same pkt_reason. 18757 * If we are in panic, then suppress the retry messages. 18758 */ 18759 switch (flag) { 18760 case SD_NO_RETRY_ISSUED: 18761 msgp = "giving up"; 18762 break; 18763 case SD_IMMEDIATE_RETRY_ISSUED: 18764 case SD_DELAYED_RETRY_ISSUED: 18765 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) || 18766 ((pktp->pkt_reason == un->un_last_pkt_reason) && 18767 (sd_error_level != SCSI_ERR_ALL))) { 18768 return; 18769 } 18770 msgp = "retrying command"; 18771 break; 18772 default: 18773 goto update_pkt_reason; 18774 } 18775 18776 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" : 18777 scsi_rname(pktp->pkt_reason)); 18778 18779 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP) { 18780 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18781 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp); 18782 } 18783 18784 update_pkt_reason: 18785 /* 18786 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason. 18787 * This is to prevent multiple console messages for the same failure 18788 * condition. Note that un->un_last_pkt_reason is NOT restored if & 18789 * when the command is retried successfully because there still may be 18790 * more commands coming back with the same value of pktp->pkt_reason. 18791 */ 18792 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) { 18793 un->un_last_pkt_reason = pktp->pkt_reason; 18794 } 18795 } 18796 18797 18798 /* 18799 * Function: sd_print_cmd_incomplete_msg 18800 * 18801 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason. 18802 * 18803 * Arguments: un - ptr to associated softstate 18804 * bp - ptr to buf(9S) for the command 18805 * arg - passed to sd_print_retry_msg() 18806 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 18807 * or SD_NO_RETRY_ISSUED 18808 * 18809 * Context: May be called from interrupt context 18810 */ 18811 18812 static void 18813 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, 18814 int code) 18815 { 18816 dev_info_t *dip; 18817 18818 ASSERT(un != NULL); 18819 ASSERT(mutex_owned(SD_MUTEX(un))); 18820 ASSERT(bp != NULL); 18821 18822 switch (code) { 18823 case SD_NO_RETRY_ISSUED: 18824 /* Command was failed. Someone turned off this target? */ 18825 if (un->un_state != SD_STATE_OFFLINE) { 18826 /* 18827 * Suppress message if we are detaching and 18828 * device has been disconnected 18829 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation 18830 * private interface and not part of the DDI 18831 */ 18832 dip = un->un_sd->sd_dev; 18833 if (!(DEVI_IS_DETACHING(dip) && 18834 DEVI_IS_DEVICE_REMOVED(dip))) { 18835 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18836 "disk not responding to selection\n"); 18837 } 18838 New_state(un, SD_STATE_OFFLINE); 18839 } 18840 break; 18841 18842 case SD_DELAYED_RETRY_ISSUED: 18843 case SD_IMMEDIATE_RETRY_ISSUED: 18844 default: 18845 /* Command was successfully queued for retry */ 18846 sd_print_retry_msg(un, bp, arg, code); 18847 break; 18848 } 18849 } 18850 18851 18852 /* 18853 * Function: sd_pkt_reason_cmd_incomplete 18854 * 18855 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason. 18856 * 18857 * Context: May be called from interrupt context 18858 */ 18859 18860 static void 18861 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 18862 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18863 { 18864 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE; 18865 18866 ASSERT(un != NULL); 18867 ASSERT(mutex_owned(SD_MUTEX(un))); 18868 ASSERT(bp != NULL); 18869 ASSERT(xp != NULL); 18870 ASSERT(pktp != NULL); 18871 18872 /* Do not do a reset if selection did not complete */ 18873 /* Note: Should this not just check the bit? */ 18874 if (pktp->pkt_state != STATE_GOT_BUS) { 18875 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18876 sd_reset_target(un, pktp); 18877 } 18878 18879 /* 18880 * If the target was not successfully selected, then set 18881 * SD_RETRIES_FAILFAST to indicate that we lost communication 18882 * with the target, and further retries and/or commands are 18883 * likely to take a long time. 18884 */ 18885 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) { 18886 flag |= SD_RETRIES_FAILFAST; 18887 } 18888 18889 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18890 18891 sd_retry_command(un, bp, flag, 18892 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18893 } 18894 18895 18896 18897 /* 18898 * Function: sd_pkt_reason_cmd_tran_err 18899 * 18900 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason. 18901 * 18902 * Context: May be called from interrupt context 18903 */ 18904 18905 static void 18906 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 18907 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18908 { 18909 ASSERT(un != NULL); 18910 ASSERT(mutex_owned(SD_MUTEX(un))); 18911 ASSERT(bp != NULL); 18912 ASSERT(xp != NULL); 18913 ASSERT(pktp != NULL); 18914 18915 /* 18916 * Do not reset if we got a parity error, or if 18917 * selection did not complete. 18918 */ 18919 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18920 /* Note: Should this not just check the bit for pkt_state? */ 18921 if (((pktp->pkt_statistics & STAT_PERR) == 0) && 18922 (pktp->pkt_state != STATE_GOT_BUS)) { 18923 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18924 sd_reset_target(un, pktp); 18925 } 18926 18927 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18928 18929 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18930 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18931 } 18932 18933 18934 18935 /* 18936 * Function: sd_pkt_reason_cmd_reset 18937 * 18938 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason. 18939 * 18940 * Context: May be called from interrupt context 18941 */ 18942 18943 static void 18944 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 18945 struct scsi_pkt *pktp) 18946 { 18947 ASSERT(un != NULL); 18948 ASSERT(mutex_owned(SD_MUTEX(un))); 18949 ASSERT(bp != NULL); 18950 ASSERT(xp != NULL); 18951 ASSERT(pktp != NULL); 18952 18953 /* The target may still be running the command, so try to reset. */ 18954 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18955 sd_reset_target(un, pktp); 18956 18957 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18958 18959 /* 18960 * If pkt_reason is CMD_RESET chances are that this pkt got 18961 * reset because another target on this bus caused it. The target 18962 * that caused it should get CMD_TIMEOUT with pkt_statistics 18963 * of STAT_TIMEOUT/STAT_DEV_RESET. 18964 */ 18965 18966 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 18967 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18968 } 18969 18970 18971 18972 18973 /* 18974 * Function: sd_pkt_reason_cmd_aborted 18975 * 18976 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason. 18977 * 18978 * Context: May be called from interrupt context 18979 */ 18980 18981 static void 18982 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 18983 struct scsi_pkt *pktp) 18984 { 18985 ASSERT(un != NULL); 18986 ASSERT(mutex_owned(SD_MUTEX(un))); 18987 ASSERT(bp != NULL); 18988 ASSERT(xp != NULL); 18989 ASSERT(pktp != NULL); 18990 18991 /* The target may still be running the command, so try to reset. */ 18992 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18993 sd_reset_target(un, pktp); 18994 18995 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18996 18997 /* 18998 * If pkt_reason is CMD_ABORTED chances are that this pkt got 18999 * aborted because another target on this bus caused it. The target 19000 * that caused it should get CMD_TIMEOUT with pkt_statistics 19001 * of STAT_TIMEOUT/STAT_DEV_RESET. 19002 */ 19003 19004 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 19005 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19006 } 19007 19008 19009 19010 /* 19011 * Function: sd_pkt_reason_cmd_timeout 19012 * 19013 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason. 19014 * 19015 * Context: May be called from interrupt context 19016 */ 19017 19018 static void 19019 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 19020 struct scsi_pkt *pktp) 19021 { 19022 ASSERT(un != NULL); 19023 ASSERT(mutex_owned(SD_MUTEX(un))); 19024 ASSERT(bp != NULL); 19025 ASSERT(xp != NULL); 19026 ASSERT(pktp != NULL); 19027 19028 19029 SD_UPDATE_ERRSTATS(un, sd_transerrs); 19030 sd_reset_target(un, pktp); 19031 19032 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19033 19034 /* 19035 * A command timeout indicates that we could not establish 19036 * communication with the target, so set SD_RETRIES_FAILFAST 19037 * as further retries/commands are likely to take a long time. 19038 */ 19039 sd_retry_command(un, bp, 19040 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST), 19041 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19042 } 19043 19044 19045 19046 /* 19047 * Function: sd_pkt_reason_cmd_unx_bus_free 19048 * 19049 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason. 19050 * 19051 * Context: May be called from interrupt context 19052 */ 19053 19054 static void 19055 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 19056 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19057 { 19058 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code); 19059 19060 ASSERT(un != NULL); 19061 ASSERT(mutex_owned(SD_MUTEX(un))); 19062 ASSERT(bp != NULL); 19063 ASSERT(xp != NULL); 19064 ASSERT(pktp != NULL); 19065 19066 SD_UPDATE_ERRSTATS(un, sd_harderrs); 19067 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19068 19069 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? 19070 sd_print_retry_msg : NULL; 19071 19072 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 19073 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19074 } 19075 19076 19077 /* 19078 * Function: sd_pkt_reason_cmd_tag_reject 19079 * 19080 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason. 19081 * 19082 * Context: May be called from interrupt context 19083 */ 19084 19085 static void 19086 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 19087 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19088 { 19089 ASSERT(un != NULL); 19090 ASSERT(mutex_owned(SD_MUTEX(un))); 19091 ASSERT(bp != NULL); 19092 ASSERT(xp != NULL); 19093 ASSERT(pktp != NULL); 19094 19095 SD_UPDATE_ERRSTATS(un, sd_harderrs); 19096 pktp->pkt_flags = 0; 19097 un->un_tagflags = 0; 19098 if (un->un_f_opt_queueing == TRUE) { 19099 un->un_throttle = min(un->un_throttle, 3); 19100 } else { 19101 un->un_throttle = 1; 19102 } 19103 mutex_exit(SD_MUTEX(un)); 19104 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 19105 mutex_enter(SD_MUTEX(un)); 19106 19107 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19108 19109 /* Legacy behavior not to check retry counts here. */ 19110 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE), 19111 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19112 } 19113 19114 19115 /* 19116 * Function: sd_pkt_reason_default 19117 * 19118 * Description: Default recovery actions for SCSA pkt_reason values that 19119 * do not have more explicit recovery actions. 19120 * 19121 * Context: May be called from interrupt context 19122 */ 19123 19124 static void 19125 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 19126 struct scsi_pkt *pktp) 19127 { 19128 ASSERT(un != NULL); 19129 ASSERT(mutex_owned(SD_MUTEX(un))); 19130 ASSERT(bp != NULL); 19131 ASSERT(xp != NULL); 19132 ASSERT(pktp != NULL); 19133 19134 SD_UPDATE_ERRSTATS(un, sd_transerrs); 19135 sd_reset_target(un, pktp); 19136 19137 SD_UPDATE_RESERVATION_STATUS(un, pktp); 19138 19139 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 19140 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 19141 } 19142 19143 19144 19145 /* 19146 * Function: sd_pkt_status_check_condition 19147 * 19148 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status. 19149 * 19150 * Context: May be called from interrupt context 19151 */ 19152 19153 static void 19154 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 19155 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19156 { 19157 ASSERT(un != NULL); 19158 ASSERT(mutex_owned(SD_MUTEX(un))); 19159 ASSERT(bp != NULL); 19160 ASSERT(xp != NULL); 19161 ASSERT(pktp != NULL); 19162 19163 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: " 19164 "entry: buf:0x%p xp:0x%p\n", bp, xp); 19165 19166 /* 19167 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the 19168 * command will be retried after the request sense). Otherwise, retry 19169 * the command. Note: we are issuing the request sense even though the 19170 * retry limit may have been reached for the failed command. 19171 */ 19172 if (un->un_f_arq_enabled == FALSE) { 19173 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 19174 "no ARQ, sending request sense command\n"); 19175 sd_send_request_sense_command(un, bp, pktp); 19176 } else { 19177 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 19178 "ARQ,retrying request sense command\n"); 19179 #if defined(__x86) 19180 /* 19181 * The SD_RETRY_DELAY value need to be adjusted here 19182 * when SD_RETRY_DELAY change in sddef.h 19183 */ 19184 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 19185 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, 19186 NULL); 19187 #else 19188 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 19189 EIO, SD_RETRY_DELAY, NULL); 19190 #endif 19191 } 19192 19193 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); 19194 } 19195 19196 19197 /* 19198 * Function: sd_pkt_status_busy 19199 * 19200 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status. 19201 * 19202 * Context: May be called from interrupt context 19203 */ 19204 19205 static void 19206 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 19207 struct scsi_pkt *pktp) 19208 { 19209 ASSERT(un != NULL); 19210 ASSERT(mutex_owned(SD_MUTEX(un))); 19211 ASSERT(bp != NULL); 19212 ASSERT(xp != NULL); 19213 ASSERT(pktp != NULL); 19214 19215 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19216 "sd_pkt_status_busy: entry\n"); 19217 19218 /* If retries are exhausted, just fail the command. */ 19219 if (xp->xb_retry_count >= un->un_busy_retry_count) { 19220 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 19221 "device busy too long\n"); 19222 sd_return_failed_command(un, bp, EIO); 19223 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19224 "sd_pkt_status_busy: exit\n"); 19225 return; 19226 } 19227 xp->xb_retry_count++; 19228 19229 /* 19230 * Try to reset the target. However, we do not want to perform 19231 * more than one reset if the device continues to fail. The reset 19232 * will be performed when the retry count reaches the reset 19233 * threshold. This threshold should be set such that at least 19234 * one retry is issued before the reset is performed. 19235 */ 19236 if (xp->xb_retry_count == 19237 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) { 19238 int rval = 0; 19239 mutex_exit(SD_MUTEX(un)); 19240 if (un->un_f_allow_bus_device_reset == TRUE) { 19241 /* 19242 * First try to reset the LUN; if we cannot then 19243 * try to reset the target. 19244 */ 19245 if (un->un_f_lun_reset_enabled == TRUE) { 19246 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19247 "sd_pkt_status_busy: RESET_LUN\n"); 19248 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 19249 } 19250 if (rval == 0) { 19251 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19252 "sd_pkt_status_busy: RESET_TARGET\n"); 19253 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 19254 } 19255 } 19256 if (rval == 0) { 19257 /* 19258 * If the RESET_LUN and/or RESET_TARGET failed, 19259 * try RESET_ALL 19260 */ 19261 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19262 "sd_pkt_status_busy: RESET_ALL\n"); 19263 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL); 19264 } 19265 mutex_enter(SD_MUTEX(un)); 19266 if (rval == 0) { 19267 /* 19268 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed. 19269 * At this point we give up & fail the command. 19270 */ 19271 sd_return_failed_command(un, bp, EIO); 19272 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19273 "sd_pkt_status_busy: exit (failed cmd)\n"); 19274 return; 19275 } 19276 } 19277 19278 /* 19279 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as 19280 * we have already checked the retry counts above. 19281 */ 19282 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 19283 EIO, un->un_busy_timeout, NULL); 19284 19285 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19286 "sd_pkt_status_busy: exit\n"); 19287 } 19288 19289 19290 /* 19291 * Function: sd_pkt_status_reservation_conflict 19292 * 19293 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI 19294 * command status. 19295 * 19296 * Context: May be called from interrupt context 19297 */ 19298 19299 static void 19300 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp, 19301 struct sd_xbuf *xp, struct scsi_pkt *pktp) 19302 { 19303 ASSERT(un != NULL); 19304 ASSERT(mutex_owned(SD_MUTEX(un))); 19305 ASSERT(bp != NULL); 19306 ASSERT(xp != NULL); 19307 ASSERT(pktp != NULL); 19308 19309 /* 19310 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation 19311 * conflict could be due to various reasons like incorrect keys, not 19312 * registered or not reserved etc. So, we return EACCES to the caller. 19313 */ 19314 if (un->un_reservation_type == SD_SCSI3_RESERVATION) { 19315 int cmd = SD_GET_PKT_OPCODE(pktp); 19316 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) || 19317 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) { 19318 sd_return_failed_command(un, bp, EACCES); 19319 return; 19320 } 19321 } 19322 19323 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 19324 19325 if ((un->un_resvd_status & SD_FAILFAST) != 0) { 19326 if (sd_failfast_enable != 0) { 19327 /* By definition, we must panic here.... */ 19328 sd_panic_for_res_conflict(un); 19329 /*NOTREACHED*/ 19330 } 19331 SD_ERROR(SD_LOG_IO, un, 19332 "sd_handle_resv_conflict: Disk Reserved\n"); 19333 sd_return_failed_command(un, bp, EACCES); 19334 return; 19335 } 19336 19337 /* 19338 * 1147670: retry only if sd_retry_on_reservation_conflict 19339 * property is set (default is 1). Retries will not succeed 19340 * on a disk reserved by another initiator. HA systems 19341 * may reset this via sd.conf to avoid these retries. 19342 * 19343 * Note: The legacy return code for this failure is EIO, however EACCES 19344 * seems more appropriate for a reservation conflict. 19345 */ 19346 if (sd_retry_on_reservation_conflict == 0) { 19347 SD_ERROR(SD_LOG_IO, un, 19348 "sd_handle_resv_conflict: Device Reserved\n"); 19349 sd_return_failed_command(un, bp, EIO); 19350 return; 19351 } 19352 19353 /* 19354 * Retry the command if we can. 19355 * 19356 * Note: The legacy return code for this failure is EIO, however EACCES 19357 * seems more appropriate for a reservation conflict. 19358 */ 19359 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 19360 (clock_t)2, NULL); 19361 } 19362 19363 19364 19365 /* 19366 * Function: sd_pkt_status_qfull 19367 * 19368 * Description: Handle a QUEUE FULL condition from the target. This can 19369 * occur if the HBA does not handle the queue full condition. 19370 * (Basically this means third-party HBAs as Sun HBAs will 19371 * handle the queue full condition.) Note that if there are 19372 * some commands already in the transport, then the queue full 19373 * has occurred because the queue for this nexus is actually 19374 * full. If there are no commands in the transport, then the 19375 * queue full is resulting from some other initiator or lun 19376 * consuming all the resources at the target. 19377 * 19378 * Context: May be called from interrupt context 19379 */ 19380 19381 static void 19382 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 19383 struct scsi_pkt *pktp) 19384 { 19385 ASSERT(un != NULL); 19386 ASSERT(mutex_owned(SD_MUTEX(un))); 19387 ASSERT(bp != NULL); 19388 ASSERT(xp != NULL); 19389 ASSERT(pktp != NULL); 19390 19391 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19392 "sd_pkt_status_qfull: entry\n"); 19393 19394 /* 19395 * Just lower the QFULL throttle and retry the command. Note that 19396 * we do not limit the number of retries here. 19397 */ 19398 sd_reduce_throttle(un, SD_THROTTLE_QFULL); 19399 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, 19400 SD_RESTART_TIMEOUT, NULL); 19401 19402 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19403 "sd_pkt_status_qfull: exit\n"); 19404 } 19405 19406 19407 /* 19408 * Function: sd_reset_target 19409 * 19410 * Description: Issue a scsi_reset(9F), with either RESET_LUN, 19411 * RESET_TARGET, or RESET_ALL. 19412 * 19413 * Context: May be called under interrupt context. 19414 */ 19415 19416 static void 19417 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp) 19418 { 19419 int rval = 0; 19420 19421 ASSERT(un != NULL); 19422 ASSERT(mutex_owned(SD_MUTEX(un))); 19423 ASSERT(pktp != NULL); 19424 19425 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n"); 19426 19427 /* 19428 * No need to reset if the transport layer has already done so. 19429 */ 19430 if ((pktp->pkt_statistics & 19431 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) { 19432 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19433 "sd_reset_target: no reset\n"); 19434 return; 19435 } 19436 19437 mutex_exit(SD_MUTEX(un)); 19438 19439 if (un->un_f_allow_bus_device_reset == TRUE) { 19440 if (un->un_f_lun_reset_enabled == TRUE) { 19441 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19442 "sd_reset_target: RESET_LUN\n"); 19443 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 19444 } 19445 if (rval == 0) { 19446 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19447 "sd_reset_target: RESET_TARGET\n"); 19448 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 19449 } 19450 } 19451 19452 if (rval == 0) { 19453 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19454 "sd_reset_target: RESET_ALL\n"); 19455 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 19456 } 19457 19458 mutex_enter(SD_MUTEX(un)); 19459 19460 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n"); 19461 } 19462 19463 /* 19464 * Function: sd_target_change_task 19465 * 19466 * Description: Handle dynamic target change 19467 * 19468 * Context: Executes in a taskq() thread context 19469 */ 19470 static void 19471 sd_target_change_task(void *arg) 19472 { 19473 struct sd_lun *un = arg; 19474 uint64_t capacity; 19475 diskaddr_t label_cap; 19476 uint_t lbasize; 19477 sd_ssc_t *ssc; 19478 19479 ASSERT(un != NULL); 19480 ASSERT(!mutex_owned(SD_MUTEX(un))); 19481 19482 if ((un->un_f_blockcount_is_valid == FALSE) || 19483 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 19484 return; 19485 } 19486 19487 ssc = sd_ssc_init(un); 19488 19489 if (sd_send_scsi_READ_CAPACITY(ssc, &capacity, 19490 &lbasize, SD_PATH_DIRECT) != 0) { 19491 SD_ERROR(SD_LOG_ERROR, un, 19492 "sd_target_change_task: fail to read capacity\n"); 19493 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19494 goto task_exit; 19495 } 19496 19497 mutex_enter(SD_MUTEX(un)); 19498 if (capacity <= un->un_blockcount) { 19499 mutex_exit(SD_MUTEX(un)); 19500 goto task_exit; 19501 } 19502 19503 sd_update_block_info(un, lbasize, capacity); 19504 mutex_exit(SD_MUTEX(un)); 19505 19506 /* 19507 * If lun is EFI labeled and lun capacity is greater than the 19508 * capacity contained in the label, log a sys event. 19509 */ 19510 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 19511 (void*)SD_PATH_DIRECT) == 0) { 19512 mutex_enter(SD_MUTEX(un)); 19513 if (un->un_f_blockcount_is_valid && 19514 un->un_blockcount > label_cap) { 19515 mutex_exit(SD_MUTEX(un)); 19516 sd_log_lun_expansion_event(un, KM_SLEEP); 19517 } else { 19518 mutex_exit(SD_MUTEX(un)); 19519 } 19520 } 19521 19522 task_exit: 19523 sd_ssc_fini(ssc); 19524 } 19525 19526 19527 /* 19528 * Function: sd_log_dev_status_event 19529 * 19530 * Description: Log EC_dev_status sysevent 19531 * 19532 * Context: Never called from interrupt context 19533 */ 19534 static void 19535 sd_log_dev_status_event(struct sd_lun *un, char *esc, int km_flag) 19536 { 19537 int err; 19538 char *path; 19539 nvlist_t *attr_list; 19540 size_t n; 19541 19542 /* Allocate and build sysevent attribute list */ 19543 err = nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE, km_flag); 19544 if (err != 0) { 19545 SD_ERROR(SD_LOG_ERROR, un, 19546 "sd_log_dev_status_event: fail to allocate space\n"); 19547 return; 19548 } 19549 19550 path = kmem_alloc(MAXPATHLEN, km_flag); 19551 if (path == NULL) { 19552 nvlist_free(attr_list); 19553 SD_ERROR(SD_LOG_ERROR, un, 19554 "sd_log_dev_status_event: fail to allocate space\n"); 19555 return; 19556 } 19557 19558 n = snprintf(path, MAXPATHLEN, "/devices"); 19559 (void) ddi_pathname(SD_DEVINFO(un), path + n); 19560 n = strlen(path); 19561 n += snprintf(path + n, MAXPATHLEN - n, ":x"); 19562 19563 /* 19564 * On receipt of this event, the ZFS sysevent module will scan 19565 * active zpools for child vdevs matching this physical path. 19566 * In order to catch both whole disk pools and those with an 19567 * EFI boot partition, generate separate sysevents for minor 19568 * node 'a' and 'b'. 19569 */ 19570 for (char c = 'a'; c < 'c'; c++) { 19571 path[n - 1] = c; 19572 19573 err = nvlist_add_string(attr_list, DEV_PHYS_PATH, path); 19574 if (err != 0) { 19575 SD_ERROR(SD_LOG_ERROR, un, 19576 "sd_log_dev_status_event: fail to add attribute\n"); 19577 break; 19578 } 19579 19580 err = ddi_log_sysevent(SD_DEVINFO(un), SUNW_VENDOR, 19581 EC_DEV_STATUS, esc, attr_list, NULL, km_flag); 19582 if (err != DDI_SUCCESS) { 19583 SD_ERROR(SD_LOG_ERROR, un, 19584 "sd_log_dev_status_event: fail to log sysevent\n"); 19585 break; 19586 } 19587 } 19588 19589 nvlist_free(attr_list); 19590 kmem_free(path, MAXPATHLEN); 19591 } 19592 19593 19594 /* 19595 * Function: sd_log_lun_expansion_event 19596 * 19597 * Description: Log lun expansion sys event 19598 * 19599 * Context: Never called from interrupt context 19600 */ 19601 static void 19602 sd_log_lun_expansion_event(struct sd_lun *un, int km_flag) 19603 { 19604 sd_log_dev_status_event(un, ESC_DEV_DLE, km_flag); 19605 } 19606 19607 19608 /* 19609 * Function: sd_log_eject_request_event 19610 * 19611 * Description: Log eject request sysevent 19612 * 19613 * Context: Never called from interrupt context 19614 */ 19615 static void 19616 sd_log_eject_request_event(struct sd_lun *un, int km_flag) 19617 { 19618 sd_log_dev_status_event(un, ESC_DEV_EJECT_REQUEST, km_flag); 19619 } 19620 19621 19622 /* 19623 * Function: sd_media_change_task 19624 * 19625 * Description: Recovery action for CDROM to become available. 19626 * 19627 * Context: Executes in a taskq() thread context 19628 */ 19629 19630 static void 19631 sd_media_change_task(void *arg) 19632 { 19633 struct scsi_pkt *pktp = arg; 19634 struct sd_lun *un; 19635 struct buf *bp; 19636 struct sd_xbuf *xp; 19637 int err = 0; 19638 int retry_count = 0; 19639 int retry_limit = SD_UNIT_ATTENTION_RETRY/10; 19640 struct sd_sense_info si; 19641 19642 ASSERT(pktp != NULL); 19643 bp = (struct buf *)pktp->pkt_private; 19644 ASSERT(bp != NULL); 19645 xp = SD_GET_XBUF(bp); 19646 ASSERT(xp != NULL); 19647 un = SD_GET_UN(bp); 19648 ASSERT(un != NULL); 19649 ASSERT(!mutex_owned(SD_MUTEX(un))); 19650 ASSERT(un->un_f_monitor_media_state); 19651 19652 si.ssi_severity = SCSI_ERR_INFO; 19653 si.ssi_pfa_flag = FALSE; 19654 19655 /* 19656 * When a reset is issued on a CDROM, it takes a long time to 19657 * recover. First few attempts to read capacity and other things 19658 * related to handling unit attention fail (with a ASC 0x4 and 19659 * ASCQ 0x1). In that case we want to do enough retries and we want 19660 * to limit the retries in other cases of genuine failures like 19661 * no media in drive. 19662 */ 19663 while (retry_count++ < retry_limit) { 19664 if ((err = sd_handle_mchange(un)) == 0) { 19665 break; 19666 } 19667 if (err == EAGAIN) { 19668 retry_limit = SD_UNIT_ATTENTION_RETRY; 19669 } 19670 /* Sleep for 0.5 sec. & try again */ 19671 delay(drv_usectohz(500000)); 19672 } 19673 19674 /* 19675 * Dispatch (retry or fail) the original command here, 19676 * along with appropriate console messages.... 19677 * 19678 * Must grab the mutex before calling sd_retry_command, 19679 * sd_print_sense_msg and sd_return_failed_command. 19680 */ 19681 mutex_enter(SD_MUTEX(un)); 19682 if (err != SD_CMD_SUCCESS) { 19683 SD_UPDATE_ERRSTATS(un, sd_harderrs); 19684 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 19685 si.ssi_severity = SCSI_ERR_FATAL; 19686 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 19687 sd_return_failed_command(un, bp, EIO); 19688 } else { 19689 sd_retry_command(un, bp, SD_RETRIES_UA, sd_print_sense_msg, 19690 &si, EIO, (clock_t)0, NULL); 19691 } 19692 mutex_exit(SD_MUTEX(un)); 19693 } 19694 19695 19696 19697 /* 19698 * Function: sd_handle_mchange 19699 * 19700 * Description: Perform geometry validation & other recovery when CDROM 19701 * has been removed from drive. 19702 * 19703 * Return Code: 0 for success 19704 * errno-type return code of either sd_send_scsi_DOORLOCK() or 19705 * sd_send_scsi_READ_CAPACITY() 19706 * 19707 * Context: Executes in a taskq() thread context 19708 */ 19709 19710 static int 19711 sd_handle_mchange(struct sd_lun *un) 19712 { 19713 uint64_t capacity; 19714 uint32_t lbasize; 19715 int rval; 19716 sd_ssc_t *ssc; 19717 19718 ASSERT(!mutex_owned(SD_MUTEX(un))); 19719 ASSERT(un->un_f_monitor_media_state); 19720 19721 ssc = sd_ssc_init(un); 19722 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize, 19723 SD_PATH_DIRECT_PRIORITY); 19724 19725 if (rval != 0) 19726 goto failed; 19727 19728 mutex_enter(SD_MUTEX(un)); 19729 sd_update_block_info(un, lbasize, capacity); 19730 19731 if (un->un_errstats != NULL) { 19732 struct sd_errstats *stp = 19733 (struct sd_errstats *)un->un_errstats->ks_data; 19734 stp->sd_capacity.value.ui64 = (uint64_t) 19735 ((uint64_t)un->un_blockcount * 19736 (uint64_t)un->un_tgt_blocksize); 19737 } 19738 19739 /* 19740 * Check if the media in the device is writable or not 19741 */ 19742 if (ISCD(un)) { 19743 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT_PRIORITY); 19744 } 19745 19746 /* 19747 * Note: Maybe let the strategy/partitioning chain worry about getting 19748 * valid geometry. 19749 */ 19750 mutex_exit(SD_MUTEX(un)); 19751 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 19752 19753 19754 if (cmlb_validate(un->un_cmlbhandle, 0, 19755 (void *)SD_PATH_DIRECT_PRIORITY) != 0) { 19756 sd_ssc_fini(ssc); 19757 return (EIO); 19758 } else { 19759 if (un->un_f_pkstats_enabled) { 19760 sd_set_pstats(un); 19761 SD_TRACE(SD_LOG_IO_PARTITION, un, 19762 "sd_handle_mchange: un:0x%p pstats created and " 19763 "set\n", un); 19764 } 19765 } 19766 19767 /* 19768 * Try to lock the door 19769 */ 19770 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 19771 SD_PATH_DIRECT_PRIORITY); 19772 failed: 19773 if (rval != 0) 19774 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19775 sd_ssc_fini(ssc); 19776 return (rval); 19777 } 19778 19779 19780 /* 19781 * Function: sd_send_scsi_DOORLOCK 19782 * 19783 * Description: Issue the scsi DOOR LOCK command 19784 * 19785 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 19786 * structure for this target. 19787 * flag - SD_REMOVAL_ALLOW 19788 * SD_REMOVAL_PREVENT 19789 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19790 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19791 * to use the USCSI "direct" chain and bypass the normal 19792 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19793 * command is issued as part of an error recovery action. 19794 * 19795 * Return Code: 0 - Success 19796 * errno return code from sd_ssc_send() 19797 * 19798 * Context: Can sleep. 19799 */ 19800 19801 static int 19802 sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag) 19803 { 19804 struct scsi_extended_sense sense_buf; 19805 union scsi_cdb cdb; 19806 struct uscsi_cmd ucmd_buf; 19807 int status; 19808 struct sd_lun *un; 19809 19810 ASSERT(ssc != NULL); 19811 un = ssc->ssc_un; 19812 ASSERT(un != NULL); 19813 ASSERT(!mutex_owned(SD_MUTEX(un))); 19814 19815 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un); 19816 19817 /* already determined doorlock is not supported, fake success */ 19818 if (un->un_f_doorlock_supported == FALSE) { 19819 return (0); 19820 } 19821 19822 /* 19823 * If we are ejecting and see an SD_REMOVAL_PREVENT 19824 * ignore the command so we can complete the eject 19825 * operation. 19826 */ 19827 if (flag == SD_REMOVAL_PREVENT) { 19828 mutex_enter(SD_MUTEX(un)); 19829 if (un->un_f_ejecting == TRUE) { 19830 mutex_exit(SD_MUTEX(un)); 19831 return (EAGAIN); 19832 } 19833 mutex_exit(SD_MUTEX(un)); 19834 } 19835 19836 bzero(&cdb, sizeof (cdb)); 19837 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19838 19839 cdb.scc_cmd = SCMD_DOORLOCK; 19840 cdb.cdb_opaque[4] = (uchar_t)flag; 19841 19842 ucmd_buf.uscsi_cdb = (char *)&cdb; 19843 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19844 ucmd_buf.uscsi_bufaddr = NULL; 19845 ucmd_buf.uscsi_buflen = 0; 19846 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19847 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19848 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19849 ucmd_buf.uscsi_timeout = 15; 19850 19851 SD_TRACE(SD_LOG_IO, un, 19852 "sd_send_scsi_DOORLOCK: returning sd_ssc_send\n"); 19853 19854 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19855 UIO_SYSSPACE, path_flag); 19856 19857 if (status == 0) 19858 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19859 19860 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) && 19861 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19862 (scsi_sense_key((uint8_t *)&sense_buf) == KEY_ILLEGAL_REQUEST)) { 19863 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19864 19865 /* fake success and skip subsequent doorlock commands */ 19866 un->un_f_doorlock_supported = FALSE; 19867 return (0); 19868 } 19869 19870 return (status); 19871 } 19872 19873 /* 19874 * Function: sd_send_scsi_READ_CAPACITY 19875 * 19876 * Description: This routine uses the scsi READ CAPACITY command to determine 19877 * the device capacity in number of blocks and the device native 19878 * block size. If this function returns a failure, then the 19879 * values in *capp and *lbap are undefined. If the capacity 19880 * returned is 0xffffffff then the lun is too large for a 19881 * normal READ CAPACITY command and the results of a 19882 * READ CAPACITY 16 will be used instead. 19883 * 19884 * Arguments: ssc - ssc contains ptr to soft state struct for the target 19885 * capp - ptr to unsigned 64-bit variable to receive the 19886 * capacity value from the command. 19887 * lbap - ptr to unsigned 32-bit varaible to receive the 19888 * block size value from the command 19889 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19890 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19891 * to use the USCSI "direct" chain and bypass the normal 19892 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19893 * command is issued as part of an error recovery action. 19894 * 19895 * Return Code: 0 - Success 19896 * EIO - IO error 19897 * EACCES - Reservation conflict detected 19898 * EAGAIN - Device is becoming ready 19899 * errno return code from sd_ssc_send() 19900 * 19901 * Context: Can sleep. Blocks until command completes. 19902 */ 19903 19904 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity) 19905 19906 static int 19907 sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp, uint32_t *lbap, 19908 int path_flag) 19909 { 19910 struct scsi_extended_sense sense_buf; 19911 struct uscsi_cmd ucmd_buf; 19912 union scsi_cdb cdb; 19913 uint32_t *capacity_buf; 19914 uint64_t capacity; 19915 uint32_t lbasize; 19916 uint32_t pbsize; 19917 int status; 19918 struct sd_lun *un; 19919 19920 ASSERT(ssc != NULL); 19921 19922 un = ssc->ssc_un; 19923 ASSERT(un != NULL); 19924 ASSERT(!mutex_owned(SD_MUTEX(un))); 19925 ASSERT(capp != NULL); 19926 ASSERT(lbap != NULL); 19927 19928 SD_TRACE(SD_LOG_IO, un, 19929 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 19930 19931 /* 19932 * First send a READ_CAPACITY command to the target. 19933 * (This command is mandatory under SCSI-2.) 19934 * 19935 * Set up the CDB for the READ_CAPACITY command. The Partial 19936 * Medium Indicator bit is cleared. The address field must be 19937 * zero if the PMI bit is zero. 19938 */ 19939 bzero(&cdb, sizeof (cdb)); 19940 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19941 19942 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP); 19943 19944 cdb.scc_cmd = SCMD_READ_CAPACITY; 19945 19946 ucmd_buf.uscsi_cdb = (char *)&cdb; 19947 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19948 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; 19949 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; 19950 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19951 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19952 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19953 ucmd_buf.uscsi_timeout = 60; 19954 19955 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19956 UIO_SYSSPACE, path_flag); 19957 19958 switch (status) { 19959 case 0: 19960 /* Return failure if we did not get valid capacity data. */ 19961 if (ucmd_buf.uscsi_resid != 0) { 19962 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 19963 "sd_send_scsi_READ_CAPACITY received invalid " 19964 "capacity data"); 19965 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19966 return (EIO); 19967 } 19968 /* 19969 * Read capacity and block size from the READ CAPACITY 10 data. 19970 * This data may be adjusted later due to device specific 19971 * issues. 19972 * 19973 * According to the SCSI spec, the READ CAPACITY 10 19974 * command returns the following: 19975 * 19976 * bytes 0-3: Maximum logical block address available. 19977 * (MSB in byte:0 & LSB in byte:3) 19978 * 19979 * bytes 4-7: Block length in bytes 19980 * (MSB in byte:4 & LSB in byte:7) 19981 * 19982 */ 19983 capacity = BE_32(capacity_buf[0]); 19984 lbasize = BE_32(capacity_buf[1]); 19985 19986 /* 19987 * Done with capacity_buf 19988 */ 19989 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19990 19991 /* 19992 * if the reported capacity is set to all 0xf's, then 19993 * this disk is too large and requires SBC-2 commands. 19994 * Reissue the request using READ CAPACITY 16. 19995 */ 19996 if (capacity == 0xffffffff) { 19997 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19998 status = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, 19999 &lbasize, &pbsize, path_flag); 20000 if (status != 0) { 20001 return (status); 20002 } else { 20003 goto rc16_done; 20004 } 20005 } 20006 break; /* Success! */ 20007 case EIO: 20008 switch (ucmd_buf.uscsi_status) { 20009 case STATUS_RESERVATION_CONFLICT: 20010 status = EACCES; 20011 break; 20012 case STATUS_CHECK: 20013 /* 20014 * Check condition; look for ASC/ASCQ of 0x04/0x01 20015 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 20016 */ 20017 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20018 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 20019 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 20020 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 20021 return (EAGAIN); 20022 } 20023 break; 20024 default: 20025 break; 20026 } 20027 /* FALLTHRU */ 20028 default: 20029 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 20030 return (status); 20031 } 20032 20033 /* 20034 * Some ATAPI CD-ROM drives report inaccurate LBA size values 20035 * (2352 and 0 are common) so for these devices always force the value 20036 * to 2048 as required by the ATAPI specs. 20037 */ 20038 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 20039 lbasize = 2048; 20040 } 20041 20042 /* 20043 * Get the maximum LBA value from the READ CAPACITY data. 20044 * Here we assume that the Partial Medium Indicator (PMI) bit 20045 * was cleared when issuing the command. This means that the LBA 20046 * returned from the device is the LBA of the last logical block 20047 * on the logical unit. The actual logical block count will be 20048 * this value plus one. 20049 */ 20050 capacity += 1; 20051 20052 /* 20053 * Currently, for removable media, the capacity is saved in terms 20054 * of un->un_sys_blocksize, so scale the capacity value to reflect this. 20055 */ 20056 if (un->un_f_has_removable_media) 20057 capacity *= (lbasize / un->un_sys_blocksize); 20058 20059 rc16_done: 20060 20061 /* 20062 * Copy the values from the READ CAPACITY command into the space 20063 * provided by the caller. 20064 */ 20065 *capp = capacity; 20066 *lbap = lbasize; 20067 20068 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: " 20069 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 20070 20071 /* 20072 * Both the lbasize and capacity from the device must be nonzero, 20073 * otherwise we assume that the values are not valid and return 20074 * failure to the caller. (4203735) 20075 */ 20076 if ((capacity == 0) || (lbasize == 0)) { 20077 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 20078 "sd_send_scsi_READ_CAPACITY received invalid value " 20079 "capacity %llu lbasize %d", capacity, lbasize); 20080 return (EIO); 20081 } 20082 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20083 return (0); 20084 } 20085 20086 /* 20087 * Function: sd_send_scsi_READ_CAPACITY_16 20088 * 20089 * Description: This routine uses the scsi READ CAPACITY 16 command to 20090 * determine the device capacity in number of blocks and the 20091 * device native block size. If this function returns a failure, 20092 * then the values in *capp and *lbap are undefined. 20093 * This routine should be called by sd_send_scsi_READ_CAPACITY 20094 * which will apply any device specific adjustments to capacity 20095 * and lbasize. One exception is it is also called by 20096 * sd_get_media_info_ext. In that function, there is no need to 20097 * adjust the capacity and lbasize. 20098 * 20099 * Arguments: ssc - ssc contains ptr to soft state struct for the target 20100 * capp - ptr to unsigned 64-bit variable to receive the 20101 * capacity value from the command. 20102 * lbap - ptr to unsigned 32-bit varaible to receive the 20103 * block size value from the command 20104 * psp - ptr to unsigned 32-bit variable to receive the 20105 * physical block size value from the command 20106 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20107 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20108 * to use the USCSI "direct" chain and bypass the normal 20109 * command waitq. SD_PATH_DIRECT_PRIORITY is used when 20110 * this command is issued as part of an error recovery 20111 * action. 20112 * 20113 * Return Code: 0 - Success 20114 * EIO - IO error 20115 * EACCES - Reservation conflict detected 20116 * EAGAIN - Device is becoming ready 20117 * errno return code from sd_ssc_send() 20118 * 20119 * Context: Can sleep. Blocks until command completes. 20120 */ 20121 20122 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16) 20123 20124 static int 20125 sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp, uint32_t *lbap, 20126 uint32_t *psp, int path_flag) 20127 { 20128 struct scsi_extended_sense sense_buf; 20129 struct uscsi_cmd ucmd_buf; 20130 union scsi_cdb cdb; 20131 uint64_t *capacity16_buf; 20132 uint64_t capacity; 20133 uint32_t lbasize; 20134 uint32_t pbsize; 20135 uint32_t lbpb_exp; 20136 int status; 20137 struct sd_lun *un; 20138 20139 ASSERT(ssc != NULL); 20140 20141 un = ssc->ssc_un; 20142 ASSERT(un != NULL); 20143 ASSERT(!mutex_owned(SD_MUTEX(un))); 20144 ASSERT(capp != NULL); 20145 ASSERT(lbap != NULL); 20146 20147 SD_TRACE(SD_LOG_IO, un, 20148 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 20149 20150 /* 20151 * First send a READ_CAPACITY_16 command to the target. 20152 * 20153 * Set up the CDB for the READ_CAPACITY_16 command. The Partial 20154 * Medium Indicator bit is cleared. The address field must be 20155 * zero if the PMI bit is zero. 20156 */ 20157 bzero(&cdb, sizeof (cdb)); 20158 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20159 20160 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP); 20161 20162 ucmd_buf.uscsi_cdb = (char *)&cdb; 20163 ucmd_buf.uscsi_cdblen = CDB_GROUP4; 20164 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; 20165 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; 20166 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20167 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 20168 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20169 ucmd_buf.uscsi_timeout = 60; 20170 20171 /* 20172 * Read Capacity (16) is a Service Action In command. One 20173 * command byte (0x9E) is overloaded for multiple operations, 20174 * with the second CDB byte specifying the desired operation 20175 */ 20176 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4; 20177 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4; 20178 20179 /* 20180 * Fill in allocation length field 20181 */ 20182 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen); 20183 20184 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20185 UIO_SYSSPACE, path_flag); 20186 20187 switch (status) { 20188 case 0: 20189 /* Return failure if we did not get valid capacity data. */ 20190 if (ucmd_buf.uscsi_resid > 20) { 20191 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 20192 "sd_send_scsi_READ_CAPACITY_16 received invalid " 20193 "capacity data"); 20194 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 20195 return (EIO); 20196 } 20197 20198 /* 20199 * Read capacity and block size from the READ CAPACITY 16 data. 20200 * This data may be adjusted later due to device specific 20201 * issues. 20202 * 20203 * According to the SCSI spec, the READ CAPACITY 16 20204 * command returns the following: 20205 * 20206 * bytes 0-7: Maximum logical block address available. 20207 * (MSB in byte:0 & LSB in byte:7) 20208 * 20209 * bytes 8-11: Block length in bytes 20210 * (MSB in byte:8 & LSB in byte:11) 20211 * 20212 * byte 13: LOGICAL BLOCKS PER PHYSICAL BLOCK EXPONENT 20213 * 20214 * byte 14: 20215 * bit 7: Thin-Provisioning Enabled 20216 * bit 6: Thin-Provisioning Read Zeros 20217 */ 20218 capacity = BE_64(capacity16_buf[0]); 20219 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]); 20220 lbpb_exp = (BE_64(capacity16_buf[1]) >> 16) & 0x0f; 20221 20222 un->un_thin_flags = 0; 20223 if (((uint8_t *)capacity16_buf)[14] & (1 << 7)) 20224 un->un_thin_flags |= SD_THIN_PROV_ENABLED; 20225 if (((uint8_t *)capacity16_buf)[14] & (1 << 6)) 20226 un->un_thin_flags |= SD_THIN_PROV_READ_ZEROS; 20227 20228 pbsize = lbasize << lbpb_exp; 20229 20230 /* 20231 * Done with capacity16_buf 20232 */ 20233 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 20234 20235 /* 20236 * if the reported capacity is set to all 0xf's, then 20237 * this disk is too large. This could only happen with 20238 * a device that supports LBAs larger than 64 bits which 20239 * are not defined by any current T10 standards. 20240 */ 20241 if (capacity == 0xffffffffffffffff) { 20242 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 20243 "disk is too large"); 20244 return (EIO); 20245 } 20246 break; /* Success! */ 20247 case EIO: 20248 switch (ucmd_buf.uscsi_status) { 20249 case STATUS_RESERVATION_CONFLICT: 20250 status = EACCES; 20251 break; 20252 case STATUS_CHECK: 20253 /* 20254 * Check condition; look for ASC/ASCQ of 0x04/0x01 20255 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 20256 */ 20257 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20258 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 20259 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 20260 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 20261 return (EAGAIN); 20262 } 20263 break; 20264 default: 20265 break; 20266 } 20267 /* FALLTHRU */ 20268 default: 20269 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 20270 return (status); 20271 } 20272 20273 /* 20274 * Some ATAPI CD-ROM drives report inaccurate LBA size values 20275 * (2352 and 0 are common) so for these devices always force the value 20276 * to 2048 as required by the ATAPI specs. 20277 */ 20278 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 20279 lbasize = 2048; 20280 } 20281 20282 /* 20283 * Get the maximum LBA value from the READ CAPACITY 16 data. 20284 * Here we assume that the Partial Medium Indicator (PMI) bit 20285 * was cleared when issuing the command. This means that the LBA 20286 * returned from the device is the LBA of the last logical block 20287 * on the logical unit. The actual logical block count will be 20288 * this value plus one. 20289 */ 20290 capacity += 1; 20291 20292 /* 20293 * Currently, for removable media, the capacity is saved in terms 20294 * of un->un_sys_blocksize, so scale the capacity value to reflect this. 20295 */ 20296 if (un->un_f_has_removable_media) 20297 capacity *= (lbasize / un->un_sys_blocksize); 20298 20299 *capp = capacity; 20300 *lbap = lbasize; 20301 *psp = pbsize; 20302 20303 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: " 20304 "capacity:0x%llx lbasize:0x%x, pbsize: 0x%x\n", 20305 capacity, lbasize, pbsize); 20306 20307 if ((capacity == 0) || (lbasize == 0) || (pbsize == 0)) { 20308 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 20309 "sd_send_scsi_READ_CAPACITY_16 received invalid value " 20310 "capacity %llu lbasize %d pbsize %d", capacity, lbasize); 20311 return (EIO); 20312 } 20313 20314 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20315 return (0); 20316 } 20317 20318 20319 /* 20320 * Function: sd_send_scsi_START_STOP_UNIT 20321 * 20322 * Description: Issue a scsi START STOP UNIT command to the target. 20323 * 20324 * Arguments: ssc - ssc contatins pointer to driver soft state (unit) 20325 * structure for this target. 20326 * pc_flag - SD_POWER_CONDITION 20327 * SD_START_STOP 20328 * flag - SD_TARGET_START 20329 * SD_TARGET_STOP 20330 * SD_TARGET_EJECT 20331 * SD_TARGET_CLOSE 20332 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20333 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20334 * to use the USCSI "direct" chain and bypass the normal 20335 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 20336 * command is issued as part of an error recovery action. 20337 * 20338 * Return Code: 0 - Success 20339 * EIO - IO error 20340 * EACCES - Reservation conflict detected 20341 * ENXIO - Not Ready, medium not present 20342 * errno return code from sd_ssc_send() 20343 * 20344 * Context: Can sleep. 20345 */ 20346 20347 static int 20348 sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int pc_flag, int flag, 20349 int path_flag) 20350 { 20351 struct scsi_extended_sense sense_buf; 20352 union scsi_cdb cdb; 20353 struct uscsi_cmd ucmd_buf; 20354 int status; 20355 struct sd_lun *un; 20356 20357 ASSERT(ssc != NULL); 20358 un = ssc->ssc_un; 20359 ASSERT(un != NULL); 20360 ASSERT(!mutex_owned(SD_MUTEX(un))); 20361 20362 SD_TRACE(SD_LOG_IO, un, 20363 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un); 20364 20365 if (un->un_f_check_start_stop && 20366 (pc_flag == SD_START_STOP) && 20367 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) && 20368 (un->un_f_start_stop_supported != TRUE)) { 20369 return (0); 20370 } 20371 20372 /* 20373 * If we are performing an eject operation and 20374 * we receive any command other than SD_TARGET_EJECT 20375 * we should immediately return. 20376 */ 20377 if (flag != SD_TARGET_EJECT) { 20378 mutex_enter(SD_MUTEX(un)); 20379 if (un->un_f_ejecting == TRUE) { 20380 mutex_exit(SD_MUTEX(un)); 20381 return (EAGAIN); 20382 } 20383 mutex_exit(SD_MUTEX(un)); 20384 } 20385 20386 bzero(&cdb, sizeof (cdb)); 20387 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20388 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20389 20390 cdb.scc_cmd = SCMD_START_STOP; 20391 cdb.cdb_opaque[4] = (pc_flag == SD_POWER_CONDITION) ? 20392 (uchar_t)(flag << 4) : (uchar_t)flag; 20393 20394 ucmd_buf.uscsi_cdb = (char *)&cdb; 20395 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 20396 ucmd_buf.uscsi_bufaddr = NULL; 20397 ucmd_buf.uscsi_buflen = 0; 20398 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20399 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20400 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 20401 ucmd_buf.uscsi_timeout = 200; 20402 20403 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20404 UIO_SYSSPACE, path_flag); 20405 20406 switch (status) { 20407 case 0: 20408 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20409 break; /* Success! */ 20410 case EIO: 20411 switch (ucmd_buf.uscsi_status) { 20412 case STATUS_RESERVATION_CONFLICT: 20413 status = EACCES; 20414 break; 20415 case STATUS_CHECK: 20416 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) { 20417 switch (scsi_sense_key( 20418 (uint8_t *)&sense_buf)) { 20419 case KEY_ILLEGAL_REQUEST: 20420 status = ENOTSUP; 20421 break; 20422 case KEY_NOT_READY: 20423 if (scsi_sense_asc( 20424 (uint8_t *)&sense_buf) 20425 == 0x3A) { 20426 status = ENXIO; 20427 } 20428 break; 20429 default: 20430 break; 20431 } 20432 } 20433 break; 20434 default: 20435 break; 20436 } 20437 break; 20438 default: 20439 break; 20440 } 20441 20442 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n"); 20443 20444 return (status); 20445 } 20446 20447 20448 /* 20449 * Function: sd_start_stop_unit_callback 20450 * 20451 * Description: timeout(9F) callback to begin recovery process for a 20452 * device that has spun down. 20453 * 20454 * Arguments: arg - pointer to associated softstate struct. 20455 * 20456 * Context: Executes in a timeout(9F) thread context 20457 */ 20458 20459 static void 20460 sd_start_stop_unit_callback(void *arg) 20461 { 20462 struct sd_lun *un = arg; 20463 ASSERT(un != NULL); 20464 ASSERT(!mutex_owned(SD_MUTEX(un))); 20465 20466 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n"); 20467 20468 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP); 20469 } 20470 20471 20472 /* 20473 * Function: sd_start_stop_unit_task 20474 * 20475 * Description: Recovery procedure when a drive is spun down. 20476 * 20477 * Arguments: arg - pointer to associated softstate struct. 20478 * 20479 * Context: Executes in a taskq() thread context 20480 */ 20481 20482 static void 20483 sd_start_stop_unit_task(void *arg) 20484 { 20485 struct sd_lun *un = arg; 20486 sd_ssc_t *ssc; 20487 int power_level; 20488 int rval; 20489 20490 ASSERT(un != NULL); 20491 ASSERT(!mutex_owned(SD_MUTEX(un))); 20492 20493 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n"); 20494 20495 /* 20496 * Some unformatted drives report not ready error, no need to 20497 * restart if format has been initiated. 20498 */ 20499 mutex_enter(SD_MUTEX(un)); 20500 if (un->un_f_format_in_progress == TRUE) { 20501 mutex_exit(SD_MUTEX(un)); 20502 return; 20503 } 20504 mutex_exit(SD_MUTEX(un)); 20505 20506 ssc = sd_ssc_init(un); 20507 /* 20508 * When a START STOP command is issued from here, it is part of a 20509 * failure recovery operation and must be issued before any other 20510 * commands, including any pending retries. Thus it must be sent 20511 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up 20512 * succeeds or not, we will start I/O after the attempt. 20513 * If power condition is supported and the current power level 20514 * is capable of performing I/O, we should set the power condition 20515 * to that level. Otherwise, set the power condition to ACTIVE. 20516 */ 20517 if (un->un_f_power_condition_supported) { 20518 mutex_enter(SD_MUTEX(un)); 20519 ASSERT(SD_PM_IS_LEVEL_VALID(un, un->un_power_level)); 20520 power_level = sd_pwr_pc.ran_perf[un->un_power_level] 20521 > 0 ? un->un_power_level : SD_SPINDLE_ACTIVE; 20522 mutex_exit(SD_MUTEX(un)); 20523 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_POWER_CONDITION, 20524 sd_pl2pc[power_level], SD_PATH_DIRECT_PRIORITY); 20525 } else { 20526 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 20527 SD_TARGET_START, SD_PATH_DIRECT_PRIORITY); 20528 } 20529 20530 if (rval != 0) 20531 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 20532 sd_ssc_fini(ssc); 20533 /* 20534 * The above call blocks until the START_STOP_UNIT command completes. 20535 * Now that it has completed, we must re-try the original IO that 20536 * received the NOT READY condition in the first place. There are 20537 * three possible conditions here: 20538 * 20539 * (1) The original IO is on un_retry_bp. 20540 * (2) The original IO is on the regular wait queue, and un_retry_bp 20541 * is NULL. 20542 * (3) The original IO is on the regular wait queue, and un_retry_bp 20543 * points to some other, unrelated bp. 20544 * 20545 * For each case, we must call sd_start_cmds() with un_retry_bp 20546 * as the argument. If un_retry_bp is NULL, this will initiate 20547 * processing of the regular wait queue. If un_retry_bp is not NULL, 20548 * then this will process the bp on un_retry_bp. That may or may not 20549 * be the original IO, but that does not matter: the important thing 20550 * is to keep the IO processing going at this point. 20551 * 20552 * Note: This is a very specific error recovery sequence associated 20553 * with a drive that is not spun up. We attempt a START_STOP_UNIT and 20554 * serialize the I/O with completion of the spin-up. 20555 */ 20556 mutex_enter(SD_MUTEX(un)); 20557 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 20558 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n", 20559 un, un->un_retry_bp); 20560 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */ 20561 sd_start_cmds(un, un->un_retry_bp); 20562 mutex_exit(SD_MUTEX(un)); 20563 20564 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n"); 20565 } 20566 20567 20568 /* 20569 * Function: sd_send_scsi_INQUIRY 20570 * 20571 * Description: Issue the scsi INQUIRY command. 20572 * 20573 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20574 * structure for this target. 20575 * bufaddr 20576 * buflen 20577 * evpd 20578 * page_code 20579 * page_length 20580 * 20581 * Return Code: 0 - Success 20582 * errno return code from sd_ssc_send() 20583 * 20584 * Context: Can sleep. Does not return until command is completed. 20585 */ 20586 20587 static int 20588 sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr, size_t buflen, 20589 uchar_t evpd, uchar_t page_code, size_t *residp) 20590 { 20591 union scsi_cdb cdb; 20592 struct uscsi_cmd ucmd_buf; 20593 int status; 20594 struct sd_lun *un; 20595 20596 ASSERT(ssc != NULL); 20597 un = ssc->ssc_un; 20598 ASSERT(un != NULL); 20599 ASSERT(!mutex_owned(SD_MUTEX(un))); 20600 ASSERT(bufaddr != NULL); 20601 20602 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un); 20603 20604 bzero(&cdb, sizeof (cdb)); 20605 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20606 bzero(bufaddr, buflen); 20607 20608 cdb.scc_cmd = SCMD_INQUIRY; 20609 cdb.cdb_opaque[1] = evpd; 20610 cdb.cdb_opaque[2] = page_code; 20611 FORMG0COUNT(&cdb, buflen); 20612 20613 ucmd_buf.uscsi_cdb = (char *)&cdb; 20614 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 20615 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 20616 ucmd_buf.uscsi_buflen = buflen; 20617 ucmd_buf.uscsi_rqbuf = NULL; 20618 ucmd_buf.uscsi_rqlen = 0; 20619 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 20620 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */ 20621 20622 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20623 UIO_SYSSPACE, SD_PATH_DIRECT); 20624 20625 /* 20626 * Only handle status == 0, the upper-level caller 20627 * will put different assessment based on the context. 20628 */ 20629 if (status == 0) 20630 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20631 20632 if ((status == 0) && (residp != NULL)) { 20633 *residp = ucmd_buf.uscsi_resid; 20634 } 20635 20636 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n"); 20637 20638 return (status); 20639 } 20640 20641 20642 /* 20643 * Function: sd_send_scsi_TEST_UNIT_READY 20644 * 20645 * Description: Issue the scsi TEST UNIT READY command. 20646 * This routine can be told to set the flag USCSI_DIAGNOSE to 20647 * prevent retrying failed commands. Use this when the intent 20648 * is either to check for device readiness, to clear a Unit 20649 * Attention, or to clear any outstanding sense data. 20650 * However under specific conditions the expected behavior 20651 * is for retries to bring a device ready, so use the flag 20652 * with caution. 20653 * 20654 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20655 * structure for this target. 20656 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present 20657 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE. 20658 * 0: dont check for media present, do retries on cmd. 20659 * 20660 * Return Code: 0 - Success 20661 * EIO - IO error 20662 * EACCES - Reservation conflict detected 20663 * ENXIO - Not Ready, medium not present 20664 * errno return code from sd_ssc_send() 20665 * 20666 * Context: Can sleep. Does not return until command is completed. 20667 */ 20668 20669 static int 20670 sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag) 20671 { 20672 struct scsi_extended_sense sense_buf; 20673 union scsi_cdb cdb; 20674 struct uscsi_cmd ucmd_buf; 20675 int status; 20676 struct sd_lun *un; 20677 20678 ASSERT(ssc != NULL); 20679 un = ssc->ssc_un; 20680 ASSERT(un != NULL); 20681 ASSERT(!mutex_owned(SD_MUTEX(un))); 20682 20683 SD_TRACE(SD_LOG_IO, un, 20684 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un); 20685 20686 /* 20687 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect 20688 * timeouts when they receive a TUR and the queue is not empty. Check 20689 * the configuration flag set during attach (indicating the drive has 20690 * this firmware bug) and un_ncmds_in_transport before issuing the 20691 * TUR. If there are 20692 * pending commands return success, this is a bit arbitrary but is ok 20693 * for non-removables (i.e. the eliteI disks) and non-clustering 20694 * configurations. 20695 */ 20696 if (un->un_f_cfg_tur_check == TRUE) { 20697 mutex_enter(SD_MUTEX(un)); 20698 if (un->un_ncmds_in_transport != 0) { 20699 mutex_exit(SD_MUTEX(un)); 20700 return (0); 20701 } 20702 mutex_exit(SD_MUTEX(un)); 20703 } 20704 20705 bzero(&cdb, sizeof (cdb)); 20706 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20707 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20708 20709 cdb.scc_cmd = SCMD_TEST_UNIT_READY; 20710 20711 ucmd_buf.uscsi_cdb = (char *)&cdb; 20712 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 20713 ucmd_buf.uscsi_bufaddr = NULL; 20714 ucmd_buf.uscsi_buflen = 0; 20715 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20716 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20717 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 20718 20719 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ 20720 if ((flag & SD_DONT_RETRY_TUR) != 0) { 20721 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; 20722 } 20723 ucmd_buf.uscsi_timeout = 60; 20724 20725 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20726 UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : 20727 SD_PATH_STANDARD)); 20728 20729 switch (status) { 20730 case 0: 20731 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20732 break; /* Success! */ 20733 case EIO: 20734 switch (ucmd_buf.uscsi_status) { 20735 case STATUS_RESERVATION_CONFLICT: 20736 status = EACCES; 20737 break; 20738 case STATUS_CHECK: 20739 if ((flag & SD_CHECK_FOR_MEDIA) == 0) { 20740 break; 20741 } 20742 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20743 (scsi_sense_key((uint8_t *)&sense_buf) == 20744 KEY_NOT_READY) && 20745 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x3A)) { 20746 status = ENXIO; 20747 } 20748 break; 20749 default: 20750 break; 20751 } 20752 break; 20753 default: 20754 break; 20755 } 20756 20757 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n"); 20758 20759 return (status); 20760 } 20761 20762 /* 20763 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN 20764 * 20765 * Description: Issue the scsi PERSISTENT RESERVE IN command. 20766 * 20767 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20768 * structure for this target. 20769 * 20770 * Return Code: 0 - Success 20771 * EACCES 20772 * ENOTSUP 20773 * errno return code from sd_ssc_send() 20774 * 20775 * Context: Can sleep. Does not return until command is completed. 20776 */ 20777 20778 static int 20779 sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc, uchar_t usr_cmd, 20780 uint16_t data_len, uchar_t *data_bufp) 20781 { 20782 struct scsi_extended_sense sense_buf; 20783 union scsi_cdb cdb; 20784 struct uscsi_cmd ucmd_buf; 20785 int status; 20786 int no_caller_buf = FALSE; 20787 struct sd_lun *un; 20788 20789 ASSERT(ssc != NULL); 20790 un = ssc->ssc_un; 20791 ASSERT(un != NULL); 20792 ASSERT(!mutex_owned(SD_MUTEX(un))); 20793 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV)); 20794 20795 SD_TRACE(SD_LOG_IO, un, 20796 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un); 20797 20798 bzero(&cdb, sizeof (cdb)); 20799 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20800 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20801 if (data_bufp == NULL) { 20802 /* Allocate a default buf if the caller did not give one */ 20803 ASSERT(data_len == 0); 20804 data_len = MHIOC_RESV_KEY_SIZE; 20805 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP); 20806 no_caller_buf = TRUE; 20807 } 20808 20809 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN; 20810 cdb.cdb_opaque[1] = usr_cmd; 20811 FORMG1COUNT(&cdb, data_len); 20812 20813 ucmd_buf.uscsi_cdb = (char *)&cdb; 20814 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 20815 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; 20816 ucmd_buf.uscsi_buflen = data_len; 20817 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20818 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20819 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20820 ucmd_buf.uscsi_timeout = 60; 20821 20822 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20823 UIO_SYSSPACE, SD_PATH_STANDARD); 20824 20825 switch (status) { 20826 case 0: 20827 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20828 20829 break; /* Success! */ 20830 case EIO: 20831 switch (ucmd_buf.uscsi_status) { 20832 case STATUS_RESERVATION_CONFLICT: 20833 status = EACCES; 20834 break; 20835 case STATUS_CHECK: 20836 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20837 (scsi_sense_key((uint8_t *)&sense_buf) == 20838 KEY_ILLEGAL_REQUEST)) { 20839 status = ENOTSUP; 20840 } 20841 break; 20842 default: 20843 break; 20844 } 20845 break; 20846 default: 20847 break; 20848 } 20849 20850 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n"); 20851 20852 if (no_caller_buf == TRUE) { 20853 kmem_free(data_bufp, data_len); 20854 } 20855 20856 return (status); 20857 } 20858 20859 20860 /* 20861 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT 20862 * 20863 * Description: This routine is the driver entry point for handling CD-ROM 20864 * multi-host persistent reservation requests (MHIOCGRP_INKEYS, 20865 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the 20866 * device. 20867 * 20868 * Arguments: ssc - ssc contains un - pointer to soft state struct 20869 * for the target. 20870 * usr_cmd SCSI-3 reservation facility command (one of 20871 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE, 20872 * SD_SCSI3_PREEMPTANDABORT, SD_SCSI3_CLEAR) 20873 * usr_bufp - user provided pointer register, reserve descriptor or 20874 * preempt and abort structure (mhioc_register_t, 20875 * mhioc_resv_desc_t, mhioc_preemptandabort_t) 20876 * 20877 * Return Code: 0 - Success 20878 * EACCES 20879 * ENOTSUP 20880 * errno return code from sd_ssc_send() 20881 * 20882 * Context: Can sleep. Does not return until command is completed. 20883 */ 20884 20885 static int 20886 sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, uchar_t usr_cmd, 20887 uchar_t *usr_bufp) 20888 { 20889 struct scsi_extended_sense sense_buf; 20890 union scsi_cdb cdb; 20891 struct uscsi_cmd ucmd_buf; 20892 int status; 20893 uchar_t data_len = sizeof (sd_prout_t); 20894 sd_prout_t *prp; 20895 struct sd_lun *un; 20896 20897 ASSERT(ssc != NULL); 20898 un = ssc->ssc_un; 20899 ASSERT(un != NULL); 20900 ASSERT(!mutex_owned(SD_MUTEX(un))); 20901 ASSERT(data_len == 24); /* required by scsi spec */ 20902 20903 SD_TRACE(SD_LOG_IO, un, 20904 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un); 20905 20906 if (usr_bufp == NULL) { 20907 return (EINVAL); 20908 } 20909 20910 bzero(&cdb, sizeof (cdb)); 20911 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20912 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20913 prp = kmem_zalloc(data_len, KM_SLEEP); 20914 20915 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT; 20916 cdb.cdb_opaque[1] = usr_cmd; 20917 FORMG1COUNT(&cdb, data_len); 20918 20919 ucmd_buf.uscsi_cdb = (char *)&cdb; 20920 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 20921 ucmd_buf.uscsi_bufaddr = (caddr_t)prp; 20922 ucmd_buf.uscsi_buflen = data_len; 20923 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20924 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20925 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 20926 ucmd_buf.uscsi_timeout = 60; 20927 20928 switch (usr_cmd) { 20929 case SD_SCSI3_REGISTER: { 20930 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp; 20931 20932 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 20933 bcopy(ptr->newkey.key, prp->service_key, 20934 MHIOC_RESV_KEY_SIZE); 20935 prp->aptpl = ptr->aptpl; 20936 break; 20937 } 20938 case SD_SCSI3_CLEAR: { 20939 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 20940 20941 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 20942 break; 20943 } 20944 case SD_SCSI3_RESERVE: 20945 case SD_SCSI3_RELEASE: { 20946 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 20947 20948 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 20949 prp->scope_address = BE_32(ptr->scope_specific_addr); 20950 cdb.cdb_opaque[2] = ptr->type; 20951 break; 20952 } 20953 case SD_SCSI3_PREEMPTANDABORT: { 20954 mhioc_preemptandabort_t *ptr = 20955 (mhioc_preemptandabort_t *)usr_bufp; 20956 20957 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 20958 bcopy(ptr->victim_key.key, prp->service_key, 20959 MHIOC_RESV_KEY_SIZE); 20960 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr); 20961 cdb.cdb_opaque[2] = ptr->resvdesc.type; 20962 ucmd_buf.uscsi_flags |= USCSI_HEAD; 20963 break; 20964 } 20965 case SD_SCSI3_REGISTERANDIGNOREKEY: 20966 { 20967 mhioc_registerandignorekey_t *ptr; 20968 ptr = (mhioc_registerandignorekey_t *)usr_bufp; 20969 bcopy(ptr->newkey.key, 20970 prp->service_key, MHIOC_RESV_KEY_SIZE); 20971 prp->aptpl = ptr->aptpl; 20972 break; 20973 } 20974 default: 20975 ASSERT(FALSE); 20976 break; 20977 } 20978 20979 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20980 UIO_SYSSPACE, SD_PATH_STANDARD); 20981 20982 switch (status) { 20983 case 0: 20984 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20985 break; /* Success! */ 20986 case EIO: 20987 switch (ucmd_buf.uscsi_status) { 20988 case STATUS_RESERVATION_CONFLICT: 20989 status = EACCES; 20990 break; 20991 case STATUS_CHECK: 20992 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20993 (scsi_sense_key((uint8_t *)&sense_buf) == 20994 KEY_ILLEGAL_REQUEST)) { 20995 status = ENOTSUP; 20996 } 20997 break; 20998 default: 20999 break; 21000 } 21001 break; 21002 default: 21003 break; 21004 } 21005 21006 kmem_free(prp, data_len); 21007 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n"); 21008 return (status); 21009 } 21010 21011 21012 /* 21013 * Function: sd_send_scsi_SYNCHRONIZE_CACHE 21014 * 21015 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target 21016 * 21017 * Arguments: un - pointer to the target's soft state struct 21018 * dkc - pointer to the callback structure 21019 * 21020 * Return Code: 0 - success 21021 * errno-type error code 21022 * 21023 * Context: kernel thread context only. 21024 * 21025 * _______________________________________________________________ 21026 * | dkc_flag & | dkc_callback | DKIOCFLUSHWRITECACHE | 21027 * |FLUSH_VOLATILE| | operation | 21028 * |______________|______________|_________________________________| 21029 * | 0 | NULL | Synchronous flush on both | 21030 * | | | volatile and non-volatile cache | 21031 * |______________|______________|_________________________________| 21032 * | 1 | NULL | Synchronous flush on volatile | 21033 * | | | cache; disk drivers may suppress| 21034 * | | | flush if disk table indicates | 21035 * | | | non-volatile cache | 21036 * |______________|______________|_________________________________| 21037 * | 0 | !NULL | Asynchronous flush on both | 21038 * | | | volatile and non-volatile cache;| 21039 * |______________|______________|_________________________________| 21040 * | 1 | !NULL | Asynchronous flush on volatile | 21041 * | | | cache; disk drivers may suppress| 21042 * | | | flush if disk table indicates | 21043 * | | | non-volatile cache | 21044 * |______________|______________|_________________________________| 21045 * 21046 */ 21047 21048 static int 21049 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc) 21050 { 21051 struct sd_uscsi_info *uip; 21052 struct uscsi_cmd *uscmd; 21053 union scsi_cdb *cdb; 21054 struct buf *bp; 21055 int rval = 0; 21056 int is_async; 21057 21058 SD_TRACE(SD_LOG_IO, un, 21059 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un); 21060 21061 ASSERT(un != NULL); 21062 ASSERT(!mutex_owned(SD_MUTEX(un))); 21063 21064 if (dkc == NULL || dkc->dkc_callback == NULL) { 21065 is_async = FALSE; 21066 } else { 21067 is_async = TRUE; 21068 } 21069 21070 mutex_enter(SD_MUTEX(un)); 21071 /* check whether cache flush should be suppressed */ 21072 if (un->un_f_suppress_cache_flush == TRUE) { 21073 mutex_exit(SD_MUTEX(un)); 21074 /* 21075 * suppress the cache flush if the device is told to do 21076 * so by sd.conf or disk table 21077 */ 21078 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_SYNCHRONIZE_CACHE: \ 21079 skip the cache flush since suppress_cache_flush is %d!\n", 21080 un->un_f_suppress_cache_flush); 21081 21082 if (is_async == TRUE) { 21083 /* invoke callback for asynchronous flush */ 21084 (*dkc->dkc_callback)(dkc->dkc_cookie, 0); 21085 } 21086 return (rval); 21087 } 21088 mutex_exit(SD_MUTEX(un)); 21089 21090 /* 21091 * check dkc_flag & FLUSH_VOLATILE so SYNC_NV bit can be 21092 * set properly 21093 */ 21094 cdb = kmem_zalloc(CDB_GROUP1, KM_SLEEP); 21095 cdb->scc_cmd = SCMD_SYNCHRONIZE_CACHE; 21096 21097 mutex_enter(SD_MUTEX(un)); 21098 if (dkc != NULL && un->un_f_sync_nv_supported && 21099 (dkc->dkc_flag & FLUSH_VOLATILE)) { 21100 /* 21101 * if the device supports SYNC_NV bit, turn on 21102 * the SYNC_NV bit to only flush volatile cache 21103 */ 21104 cdb->cdb_un.tag |= SD_SYNC_NV_BIT; 21105 } 21106 mutex_exit(SD_MUTEX(un)); 21107 21108 /* 21109 * First get some memory for the uscsi_cmd struct and cdb 21110 * and initialize for SYNCHRONIZE_CACHE cmd. 21111 */ 21112 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 21113 uscmd->uscsi_cdblen = CDB_GROUP1; 21114 uscmd->uscsi_cdb = (caddr_t)cdb; 21115 uscmd->uscsi_bufaddr = NULL; 21116 uscmd->uscsi_buflen = 0; 21117 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 21118 uscmd->uscsi_rqlen = SENSE_LENGTH; 21119 uscmd->uscsi_rqresid = SENSE_LENGTH; 21120 uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 21121 uscmd->uscsi_timeout = sd_io_time; 21122 21123 /* 21124 * Allocate an sd_uscsi_info struct and fill it with the info 21125 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 21126 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 21127 * since we allocate the buf here in this function, we do not 21128 * need to preserve the prior contents of b_private. 21129 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 21130 */ 21131 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 21132 uip->ui_flags = SD_PATH_DIRECT; 21133 uip->ui_cmdp = uscmd; 21134 21135 bp = getrbuf(KM_SLEEP); 21136 bp->b_private = uip; 21137 21138 /* 21139 * Setup buffer to carry uscsi request. 21140 */ 21141 bp->b_flags = B_BUSY; 21142 bp->b_bcount = 0; 21143 bp->b_blkno = 0; 21144 21145 if (is_async == TRUE) { 21146 bp->b_iodone = sd_send_scsi_SYNCHRONIZE_CACHE_biodone; 21147 uip->ui_dkc = *dkc; 21148 } 21149 21150 bp->b_edev = SD_GET_DEV(un); 21151 bp->b_dev = cmpdev(bp->b_edev); /* maybe unnecessary? */ 21152 21153 /* 21154 * Unset un_f_sync_cache_required flag 21155 */ 21156 mutex_enter(SD_MUTEX(un)); 21157 un->un_f_sync_cache_required = FALSE; 21158 mutex_exit(SD_MUTEX(un)); 21159 21160 (void) sd_uscsi_strategy(bp); 21161 21162 /* 21163 * If synchronous request, wait for completion 21164 * If async just return and let b_iodone callback 21165 * cleanup. 21166 * NOTE: On return, u_ncmds_in_driver will be decremented, 21167 * but it was also incremented in sd_uscsi_strategy(), so 21168 * we should be ok. 21169 */ 21170 if (is_async == FALSE) { 21171 (void) biowait(bp); 21172 rval = sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp); 21173 } 21174 21175 return (rval); 21176 } 21177 21178 21179 static int 21180 sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp) 21181 { 21182 struct sd_uscsi_info *uip; 21183 struct uscsi_cmd *uscmd; 21184 uint8_t *sense_buf; 21185 struct sd_lun *un; 21186 int status; 21187 union scsi_cdb *cdb; 21188 21189 uip = (struct sd_uscsi_info *)(bp->b_private); 21190 ASSERT(uip != NULL); 21191 21192 uscmd = uip->ui_cmdp; 21193 ASSERT(uscmd != NULL); 21194 21195 sense_buf = (uint8_t *)uscmd->uscsi_rqbuf; 21196 ASSERT(sense_buf != NULL); 21197 21198 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 21199 ASSERT(un != NULL); 21200 21201 cdb = (union scsi_cdb *)uscmd->uscsi_cdb; 21202 21203 status = geterror(bp); 21204 switch (status) { 21205 case 0: 21206 break; /* Success! */ 21207 case EIO: 21208 switch (uscmd->uscsi_status) { 21209 case STATUS_RESERVATION_CONFLICT: 21210 /* Ignore reservation conflict */ 21211 status = 0; 21212 goto done; 21213 21214 case STATUS_CHECK: 21215 if ((uscmd->uscsi_rqstatus == STATUS_GOOD) && 21216 (scsi_sense_key(sense_buf) == 21217 KEY_ILLEGAL_REQUEST)) { 21218 /* Ignore Illegal Request error */ 21219 if (cdb->cdb_un.tag&SD_SYNC_NV_BIT) { 21220 mutex_enter(SD_MUTEX(un)); 21221 un->un_f_sync_nv_supported = FALSE; 21222 mutex_exit(SD_MUTEX(un)); 21223 status = 0; 21224 SD_TRACE(SD_LOG_IO, un, 21225 "un_f_sync_nv_supported \ 21226 is set to false.\n"); 21227 goto done; 21228 } 21229 21230 mutex_enter(SD_MUTEX(un)); 21231 un->un_f_sync_cache_supported = FALSE; 21232 mutex_exit(SD_MUTEX(un)); 21233 SD_TRACE(SD_LOG_IO, un, 21234 "sd_send_scsi_SYNCHRONIZE_CACHE_biodone: \ 21235 un_f_sync_cache_supported set to false \ 21236 with asc = %x, ascq = %x\n", 21237 scsi_sense_asc(sense_buf), 21238 scsi_sense_ascq(sense_buf)); 21239 status = ENOTSUP; 21240 goto done; 21241 } 21242 break; 21243 default: 21244 break; 21245 } 21246 /* FALLTHRU */ 21247 default: 21248 /* 21249 * Turn on the un_f_sync_cache_required flag 21250 * since the SYNC CACHE command failed 21251 */ 21252 mutex_enter(SD_MUTEX(un)); 21253 un->un_f_sync_cache_required = TRUE; 21254 mutex_exit(SD_MUTEX(un)); 21255 21256 /* 21257 * Don't log an error message if this device 21258 * has removable media. 21259 */ 21260 if (!un->un_f_has_removable_media) { 21261 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 21262 "SYNCHRONIZE CACHE command failed (%d)\n", status); 21263 } 21264 break; 21265 } 21266 21267 done: 21268 if (uip->ui_dkc.dkc_callback != NULL) { 21269 (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status); 21270 } 21271 21272 ASSERT((bp->b_flags & B_REMAPPED) == 0); 21273 freerbuf(bp); 21274 kmem_free(uip, sizeof (struct sd_uscsi_info)); 21275 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); 21276 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen); 21277 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 21278 21279 return (status); 21280 } 21281 21282 /* 21283 * Issues a single SCSI UNMAP command with a prepared UNMAP parameter list. 21284 * Returns zero on success, or the non-zero command error code on failure. 21285 */ 21286 static int 21287 sd_send_scsi_UNMAP_issue_one(sd_ssc_t *ssc, unmap_param_hdr_t *uph, 21288 uint64_t num_descr, uint64_t bytes) 21289 { 21290 struct sd_lun *un = ssc->ssc_un; 21291 struct scsi_extended_sense sense_buf; 21292 union scsi_cdb cdb; 21293 struct uscsi_cmd ucmd_buf; 21294 int status; 21295 const uint64_t param_size = sizeof (unmap_param_hdr_t) + 21296 num_descr * sizeof (unmap_blk_descr_t); 21297 21298 ASSERT3U(param_size - 2, <=, UINT16_MAX); 21299 uph->uph_data_len = BE_16(param_size - 2); 21300 uph->uph_descr_data_len = BE_16(param_size - 8); 21301 21302 bzero(&cdb, sizeof (cdb)); 21303 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21304 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21305 21306 cdb.scc_cmd = SCMD_UNMAP; 21307 FORMG1COUNT(&cdb, param_size); 21308 21309 ucmd_buf.uscsi_cdb = (char *)&cdb; 21310 ucmd_buf.uscsi_cdblen = (uchar_t)CDB_GROUP1; 21311 ucmd_buf.uscsi_bufaddr = (caddr_t)uph; 21312 ucmd_buf.uscsi_buflen = param_size; 21313 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21314 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21315 ucmd_buf.uscsi_flags = USCSI_WRITE | USCSI_RQENABLE | USCSI_SILENT; 21316 ucmd_buf.uscsi_timeout = un->un_cmd_timeout; 21317 21318 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, UIO_SYSSPACE, 21319 SD_PATH_STANDARD); 21320 21321 switch (status) { 21322 case 0: 21323 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21324 21325 if (un->un_unmapstats) { 21326 atomic_inc_64(&un->un_unmapstats->us_cmds.value.ui64); 21327 atomic_add_64(&un->un_unmapstats->us_extents.value.ui64, 21328 num_descr); 21329 atomic_add_64(&un->un_unmapstats->us_bytes.value.ui64, 21330 bytes); 21331 } 21332 break; /* Success! */ 21333 case EIO: 21334 if (un->un_unmapstats) 21335 atomic_inc_64(&un->un_unmapstats->us_errs.value.ui64); 21336 switch (ucmd_buf.uscsi_status) { 21337 case STATUS_RESERVATION_CONFLICT: 21338 status = EACCES; 21339 break; 21340 default: 21341 break; 21342 } 21343 break; 21344 default: 21345 if (un->un_unmapstats) 21346 atomic_inc_64(&un->un_unmapstats->us_errs.value.ui64); 21347 break; 21348 } 21349 21350 return (status); 21351 } 21352 21353 /* 21354 * Returns a pointer to the i'th block descriptor inside an UNMAP param list. 21355 */ 21356 static inline unmap_blk_descr_t * 21357 UNMAP_blk_descr_i(void *buf, size_t i) 21358 { 21359 return ((unmap_blk_descr_t *)((uintptr_t)buf + 21360 sizeof (unmap_param_hdr_t) + (i * sizeof (unmap_blk_descr_t)))); 21361 } 21362 21363 /* 21364 * Takes the list of extents from sd_send_scsi_UNMAP, chops it up, prepares 21365 * UNMAP block descriptors and issues individual SCSI UNMAP commands. While 21366 * doing so we consult the block limits to determine at most how many 21367 * extents and LBAs we can UNMAP in one command. 21368 * If a command fails for whatever, reason, extent list processing is aborted 21369 * and the failed command's status is returned. Otherwise returns 0 on 21370 * success. 21371 */ 21372 static int 21373 sd_send_scsi_UNMAP_issue(dev_t dev, sd_ssc_t *ssc, const dkioc_free_list_t *dfl) 21374 { 21375 struct sd_lun *un = ssc->ssc_un; 21376 unmap_param_hdr_t *uph; 21377 sd_blk_limits_t *lim = &un->un_blk_lim; 21378 int rval = 0; 21379 int partition; 21380 /* partition offset & length in system blocks */ 21381 diskaddr_t part_off_sysblks = 0, part_len_sysblks = 0; 21382 uint64_t part_off, part_len; 21383 uint64_t descr_cnt_lim, byte_cnt_lim; 21384 uint64_t descr_issued = 0, bytes_issued = 0; 21385 21386 uph = kmem_zalloc(SD_UNMAP_PARAM_LIST_MAXSZ, KM_SLEEP); 21387 21388 partition = SDPART(dev); 21389 rval = cmlb_partinfo(un->un_cmlbhandle, partition, &part_len_sysblks, 21390 &part_off_sysblks, NULL, NULL, (void *)SD_PATH_DIRECT); 21391 if (rval != 0) 21392 goto out; 21393 part_off = SD_SYSBLOCKS2BYTES(part_off_sysblks); 21394 part_len = SD_SYSBLOCKS2BYTES(part_len_sysblks); 21395 21396 ASSERT(un->un_blk_lim.lim_max_unmap_lba_cnt != 0); 21397 ASSERT(un->un_blk_lim.lim_max_unmap_descr_cnt != 0); 21398 /* Spec says 0xffffffff are special values, so compute maximums. */ 21399 byte_cnt_lim = lim->lim_max_unmap_lba_cnt < UINT32_MAX ? 21400 (uint64_t)lim->lim_max_unmap_lba_cnt * un->un_tgt_blocksize : 21401 UINT64_MAX; 21402 descr_cnt_lim = MIN(lim->lim_max_unmap_descr_cnt, SD_UNMAP_MAX_DESCR); 21403 21404 if (dfl->dfl_offset >= part_len) { 21405 rval = SET_ERROR(EINVAL); 21406 goto out; 21407 } 21408 21409 for (size_t i = 0; i < dfl->dfl_num_exts; i++) { 21410 const dkioc_free_list_ext_t *ext = &dfl->dfl_exts[i]; 21411 uint64_t ext_start = ext->dfle_start; 21412 uint64_t ext_length = ext->dfle_length; 21413 21414 while (ext_length > 0) { 21415 unmap_blk_descr_t *ubd; 21416 /* Respect device limit on LBA count per command */ 21417 uint64_t len = MIN(MIN(ext_length, byte_cnt_lim - 21418 bytes_issued), SD_TGTBLOCKS2BYTES(un, UINT32_MAX)); 21419 21420 /* check partition limits */ 21421 if (ext_start >= part_len || 21422 ext_start + len < ext_start || 21423 dfl->dfl_offset + ext_start + len < 21424 dfl->dfl_offset || 21425 dfl->dfl_offset + ext_start + len > part_len) { 21426 rval = SET_ERROR(EINVAL); 21427 goto out; 21428 } 21429 21430 ASSERT3U(descr_issued, <, descr_cnt_lim); 21431 ASSERT3U(bytes_issued, <, byte_cnt_lim); 21432 ubd = UNMAP_blk_descr_i(uph, descr_issued); 21433 21434 /* adjust in-partition addresses to be device-global */ 21435 ubd->ubd_lba = BE_64(SD_BYTES2TGTBLOCKS(un, 21436 dfl->dfl_offset + ext_start + part_off)); 21437 ubd->ubd_lba_cnt = BE_32(SD_BYTES2TGTBLOCKS(un, len)); 21438 21439 descr_issued++; 21440 bytes_issued += len; 21441 21442 /* Issue command when device limits reached */ 21443 if (descr_issued == descr_cnt_lim || 21444 bytes_issued == byte_cnt_lim) { 21445 rval = sd_send_scsi_UNMAP_issue_one(ssc, uph, 21446 descr_issued, bytes_issued); 21447 if (rval != 0) 21448 goto out; 21449 descr_issued = 0; 21450 bytes_issued = 0; 21451 } 21452 21453 ext_start += len; 21454 ext_length -= len; 21455 } 21456 } 21457 21458 if (descr_issued > 0) { 21459 /* issue last command */ 21460 rval = sd_send_scsi_UNMAP_issue_one(ssc, uph, descr_issued, 21461 bytes_issued); 21462 } 21463 21464 out: 21465 kmem_free(uph, SD_UNMAP_PARAM_LIST_MAXSZ); 21466 return (rval); 21467 } 21468 21469 /* 21470 * Issues one or several UNMAP commands based on a list of extents to be 21471 * unmapped. The internal multi-command processing is hidden, as the exact 21472 * number of commands and extents per command is limited by both SCSI 21473 * command syntax and device limits (as expressed in the SCSI Block Limits 21474 * VPD page and un_blk_lim in struct sd_lun). 21475 * Returns zero on success, or the error code of the first failed SCSI UNMAP 21476 * command. 21477 */ 21478 static int 21479 sd_send_scsi_UNMAP(dev_t dev, sd_ssc_t *ssc, dkioc_free_list_t *dfl, int flag) 21480 { 21481 struct sd_lun *un = ssc->ssc_un; 21482 int rval = 0; 21483 21484 ASSERT(!mutex_owned(SD_MUTEX(un))); 21485 ASSERT(dfl != NULL); 21486 21487 /* Per spec, any of these conditions signals lack of UNMAP support. */ 21488 if (!(un->un_thin_flags & SD_THIN_PROV_ENABLED) || 21489 un->un_blk_lim.lim_max_unmap_descr_cnt == 0 || 21490 un->un_blk_lim.lim_max_unmap_lba_cnt == 0) { 21491 return (SET_ERROR(ENOTSUP)); 21492 } 21493 21494 /* For userspace calls we must copy in. */ 21495 if (!(flag & FKIOCTL)) { 21496 int err = dfl_copyin(dfl, &dfl, flag, KM_SLEEP); 21497 if (err != 0) 21498 return (err); 21499 } else if (dfl->dfl_num_exts > DFL_COPYIN_MAX_EXTS) { 21500 ASSERT3U(dfl->dfl_num_exts, <=, DFL_COPYIN_MAX_EXTS); 21501 return (SET_ERROR(EINVAL)); 21502 } 21503 21504 rval = sd_send_scsi_UNMAP_issue(dev, ssc, dfl); 21505 21506 if (!(flag & FKIOCTL)) { 21507 dfl_free(dfl); 21508 dfl = NULL; 21509 } 21510 21511 return (rval); 21512 } 21513 21514 /* 21515 * Function: sd_send_scsi_GET_CONFIGURATION 21516 * 21517 * Description: Issues the get configuration command to the device. 21518 * Called from sd_check_for_writable_cd & sd_get_media_info 21519 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN 21520 * Arguments: ssc 21521 * ucmdbuf 21522 * rqbuf 21523 * rqbuflen 21524 * bufaddr 21525 * buflen 21526 * path_flag 21527 * 21528 * Return Code: 0 - Success 21529 * errno return code from sd_ssc_send() 21530 * 21531 * Context: Can sleep. Does not return until command is completed. 21532 * 21533 */ 21534 21535 static int 21536 sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, struct uscsi_cmd *ucmdbuf, 21537 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen, 21538 int path_flag) 21539 { 21540 char cdb[CDB_GROUP1]; 21541 int status; 21542 struct sd_lun *un; 21543 21544 ASSERT(ssc != NULL); 21545 un = ssc->ssc_un; 21546 ASSERT(un != NULL); 21547 ASSERT(!mutex_owned(SD_MUTEX(un))); 21548 ASSERT(bufaddr != NULL); 21549 ASSERT(ucmdbuf != NULL); 21550 ASSERT(rqbuf != NULL); 21551 21552 SD_TRACE(SD_LOG_IO, un, 21553 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un); 21554 21555 bzero(cdb, sizeof (cdb)); 21556 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 21557 bzero(rqbuf, rqbuflen); 21558 bzero(bufaddr, buflen); 21559 21560 /* 21561 * Set up cdb field for the get configuration command. 21562 */ 21563 cdb[0] = SCMD_GET_CONFIGURATION; 21564 cdb[1] = 0x02; /* Requested Type */ 21565 cdb[8] = SD_PROFILE_HEADER_LEN; 21566 ucmdbuf->uscsi_cdb = cdb; 21567 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 21568 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 21569 ucmdbuf->uscsi_buflen = buflen; 21570 ucmdbuf->uscsi_timeout = sd_io_time; 21571 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 21572 ucmdbuf->uscsi_rqlen = rqbuflen; 21573 ucmdbuf->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT | USCSI_READ; 21574 21575 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL, 21576 UIO_SYSSPACE, path_flag); 21577 21578 switch (status) { 21579 case 0: 21580 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21581 break; /* Success! */ 21582 case EIO: 21583 switch (ucmdbuf->uscsi_status) { 21584 case STATUS_RESERVATION_CONFLICT: 21585 status = EACCES; 21586 break; 21587 default: 21588 break; 21589 } 21590 break; 21591 default: 21592 break; 21593 } 21594 21595 if (status == 0) { 21596 SD_DUMP_MEMORY(un, SD_LOG_IO, 21597 "sd_send_scsi_GET_CONFIGURATION: data", 21598 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 21599 } 21600 21601 SD_TRACE(SD_LOG_IO, un, 21602 "sd_send_scsi_GET_CONFIGURATION: exit\n"); 21603 21604 return (status); 21605 } 21606 21607 /* 21608 * Function: sd_send_scsi_feature_GET_CONFIGURATION 21609 * 21610 * Description: Issues the get configuration command to the device to 21611 * retrieve a specific feature. Called from 21612 * sd_check_for_writable_cd & sd_set_mmc_caps. 21613 * Arguments: ssc 21614 * ucmdbuf 21615 * rqbuf 21616 * rqbuflen 21617 * bufaddr 21618 * buflen 21619 * feature 21620 * 21621 * Return Code: 0 - Success 21622 * errno return code from sd_ssc_send() 21623 * 21624 * Context: Can sleep. Does not return until command is completed. 21625 * 21626 */ 21627 static int 21628 sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc, struct uscsi_cmd *ucmdbuf, 21629 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen, 21630 char feature, int path_flag) 21631 { 21632 char cdb[CDB_GROUP1]; 21633 int status; 21634 struct sd_lun *un; 21635 21636 ASSERT(ssc != NULL); 21637 un = ssc->ssc_un; 21638 ASSERT(un != NULL); 21639 ASSERT(!mutex_owned(SD_MUTEX(un))); 21640 ASSERT(bufaddr != NULL); 21641 ASSERT(ucmdbuf != NULL); 21642 ASSERT(rqbuf != NULL); 21643 21644 SD_TRACE(SD_LOG_IO, un, 21645 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un); 21646 21647 bzero(cdb, sizeof (cdb)); 21648 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 21649 bzero(rqbuf, rqbuflen); 21650 bzero(bufaddr, buflen); 21651 21652 /* 21653 * Set up cdb field for the get configuration command. 21654 */ 21655 cdb[0] = SCMD_GET_CONFIGURATION; 21656 cdb[1] = 0x02; /* Requested Type */ 21657 cdb[3] = feature; 21658 cdb[8] = buflen; 21659 ucmdbuf->uscsi_cdb = cdb; 21660 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 21661 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 21662 ucmdbuf->uscsi_buflen = buflen; 21663 ucmdbuf->uscsi_timeout = sd_io_time; 21664 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 21665 ucmdbuf->uscsi_rqlen = rqbuflen; 21666 ucmdbuf->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT | USCSI_READ; 21667 21668 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL, 21669 UIO_SYSSPACE, path_flag); 21670 21671 switch (status) { 21672 case 0: 21673 21674 break; /* Success! */ 21675 case EIO: 21676 switch (ucmdbuf->uscsi_status) { 21677 case STATUS_RESERVATION_CONFLICT: 21678 status = EACCES; 21679 break; 21680 default: 21681 break; 21682 } 21683 break; 21684 default: 21685 break; 21686 } 21687 21688 if (status == 0) { 21689 SD_DUMP_MEMORY(un, SD_LOG_IO, 21690 "sd_send_scsi_feature_GET_CONFIGURATION: data", 21691 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 21692 } 21693 21694 SD_TRACE(SD_LOG_IO, un, 21695 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n"); 21696 21697 return (status); 21698 } 21699 21700 21701 /* 21702 * Function: sd_send_scsi_MODE_SENSE 21703 * 21704 * Description: Utility function for issuing a scsi MODE SENSE command. 21705 * Note: This routine uses a consistent implementation for Group0, 21706 * Group1, and Group2 commands across all platforms. ATAPI devices 21707 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 21708 * 21709 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21710 * structure for this target. 21711 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 21712 * CDB_GROUP[1|2] (10 byte). 21713 * bufaddr - buffer for page data retrieved from the target. 21714 * buflen - size of page to be retrieved. 21715 * page_code - page code of data to be retrieved from the target. 21716 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 21717 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 21718 * to use the USCSI "direct" chain and bypass the normal 21719 * command waitq. 21720 * 21721 * Return Code: 0 - Success 21722 * errno return code from sd_ssc_send() 21723 * 21724 * Context: Can sleep. Does not return until command is completed. 21725 */ 21726 21727 static int 21728 sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr, 21729 size_t buflen, uchar_t page_code, int path_flag) 21730 { 21731 struct scsi_extended_sense sense_buf; 21732 union scsi_cdb cdb; 21733 struct uscsi_cmd ucmd_buf; 21734 int status; 21735 int headlen; 21736 struct sd_lun *un; 21737 21738 ASSERT(ssc != NULL); 21739 un = ssc->ssc_un; 21740 ASSERT(un != NULL); 21741 ASSERT(!mutex_owned(SD_MUTEX(un))); 21742 ASSERT(bufaddr != NULL); 21743 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 21744 (cdbsize == CDB_GROUP2)); 21745 21746 SD_TRACE(SD_LOG_IO, un, 21747 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un); 21748 21749 bzero(&cdb, sizeof (cdb)); 21750 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21751 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21752 bzero(bufaddr, buflen); 21753 21754 if (cdbsize == CDB_GROUP0) { 21755 cdb.scc_cmd = SCMD_MODE_SENSE; 21756 cdb.cdb_opaque[2] = page_code; 21757 FORMG0COUNT(&cdb, buflen); 21758 headlen = MODE_HEADER_LENGTH; 21759 } else { 21760 cdb.scc_cmd = SCMD_MODE_SENSE_G1; 21761 cdb.cdb_opaque[2] = page_code; 21762 FORMG1COUNT(&cdb, buflen); 21763 headlen = MODE_HEADER_LENGTH_GRP2; 21764 } 21765 21766 ASSERT(headlen <= buflen); 21767 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 21768 21769 ucmd_buf.uscsi_cdb = (char *)&cdb; 21770 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 21771 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 21772 ucmd_buf.uscsi_buflen = buflen; 21773 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21774 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21775 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 21776 ucmd_buf.uscsi_timeout = 60; 21777 21778 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21779 UIO_SYSSPACE, path_flag); 21780 21781 switch (status) { 21782 case 0: 21783 /* 21784 * sr_check_wp() uses 0x3f page code and check the header of 21785 * mode page to determine if target device is write-protected. 21786 * But some USB devices return 0 bytes for 0x3f page code. For 21787 * this case, make sure that mode page header is returned at 21788 * least. 21789 */ 21790 if (buflen - ucmd_buf.uscsi_resid < headlen) { 21791 status = EIO; 21792 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 21793 "mode page header is not returned"); 21794 } 21795 break; /* Success! */ 21796 case EIO: 21797 switch (ucmd_buf.uscsi_status) { 21798 case STATUS_RESERVATION_CONFLICT: 21799 status = EACCES; 21800 break; 21801 default: 21802 break; 21803 } 21804 break; 21805 default: 21806 break; 21807 } 21808 21809 if (status == 0) { 21810 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data", 21811 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21812 } 21813 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n"); 21814 21815 return (status); 21816 } 21817 21818 21819 /* 21820 * Function: sd_send_scsi_MODE_SELECT 21821 * 21822 * Description: Utility function for issuing a scsi MODE SELECT command. 21823 * Note: This routine uses a consistent implementation for Group0, 21824 * Group1, and Group2 commands across all platforms. ATAPI devices 21825 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 21826 * 21827 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21828 * structure for this target. 21829 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 21830 * CDB_GROUP[1|2] (10 byte). 21831 * bufaddr - buffer for page data retrieved from the target. 21832 * buflen - size of page to be retrieved. 21833 * save_page - boolean to determin if SP bit should be set. 21834 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 21835 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 21836 * to use the USCSI "direct" chain and bypass the normal 21837 * command waitq. 21838 * 21839 * Return Code: 0 - Success 21840 * errno return code from sd_ssc_send() 21841 * 21842 * Context: Can sleep. Does not return until command is completed. 21843 */ 21844 21845 static int 21846 sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr, 21847 size_t buflen, uchar_t save_page, int path_flag) 21848 { 21849 struct scsi_extended_sense sense_buf; 21850 union scsi_cdb cdb; 21851 struct uscsi_cmd ucmd_buf; 21852 int status; 21853 struct sd_lun *un; 21854 21855 ASSERT(ssc != NULL); 21856 un = ssc->ssc_un; 21857 ASSERT(un != NULL); 21858 ASSERT(!mutex_owned(SD_MUTEX(un))); 21859 ASSERT(bufaddr != NULL); 21860 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 21861 (cdbsize == CDB_GROUP2)); 21862 21863 SD_TRACE(SD_LOG_IO, un, 21864 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un); 21865 21866 bzero(&cdb, sizeof (cdb)); 21867 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21868 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21869 21870 /* Set the PF bit for many third party drives */ 21871 cdb.cdb_opaque[1] = 0x10; 21872 21873 /* Set the savepage(SP) bit if given */ 21874 if (save_page == SD_SAVE_PAGE) { 21875 cdb.cdb_opaque[1] |= 0x01; 21876 } 21877 21878 if (cdbsize == CDB_GROUP0) { 21879 cdb.scc_cmd = SCMD_MODE_SELECT; 21880 FORMG0COUNT(&cdb, buflen); 21881 } else { 21882 cdb.scc_cmd = SCMD_MODE_SELECT_G1; 21883 FORMG1COUNT(&cdb, buflen); 21884 } 21885 21886 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 21887 21888 ucmd_buf.uscsi_cdb = (char *)&cdb; 21889 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 21890 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 21891 ucmd_buf.uscsi_buflen = buflen; 21892 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21893 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21894 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 21895 ucmd_buf.uscsi_timeout = 60; 21896 21897 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21898 UIO_SYSSPACE, path_flag); 21899 21900 switch (status) { 21901 case 0: 21902 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21903 break; /* Success! */ 21904 case EIO: 21905 switch (ucmd_buf.uscsi_status) { 21906 case STATUS_RESERVATION_CONFLICT: 21907 status = EACCES; 21908 break; 21909 default: 21910 break; 21911 } 21912 break; 21913 default: 21914 break; 21915 } 21916 21917 if (status == 0) { 21918 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data", 21919 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21920 } 21921 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n"); 21922 21923 return (status); 21924 } 21925 21926 21927 /* 21928 * Function: sd_send_scsi_RDWR 21929 * 21930 * Description: Issue a scsi READ or WRITE command with the given parameters. 21931 * 21932 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21933 * structure for this target. 21934 * cmd: SCMD_READ or SCMD_WRITE 21935 * bufaddr: Address of caller's buffer to receive the RDWR data 21936 * buflen: Length of caller's buffer receive the RDWR data. 21937 * start_block: Block number for the start of the RDWR operation. 21938 * (Assumes target-native block size.) 21939 * residp: Pointer to variable to receive the redisual of the 21940 * RDWR operation (may be NULL of no residual requested). 21941 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 21942 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 21943 * to use the USCSI "direct" chain and bypass the normal 21944 * command waitq. 21945 * 21946 * Return Code: 0 - Success 21947 * errno return code from sd_ssc_send() 21948 * 21949 * Context: Can sleep. Does not return until command is completed. 21950 */ 21951 21952 static int 21953 sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr, 21954 size_t buflen, daddr_t start_block, int path_flag) 21955 { 21956 struct scsi_extended_sense sense_buf; 21957 union scsi_cdb cdb; 21958 struct uscsi_cmd ucmd_buf; 21959 uint32_t block_count; 21960 int status; 21961 int cdbsize; 21962 uchar_t flag; 21963 struct sd_lun *un; 21964 21965 ASSERT(ssc != NULL); 21966 un = ssc->ssc_un; 21967 ASSERT(un != NULL); 21968 ASSERT(!mutex_owned(SD_MUTEX(un))); 21969 ASSERT(bufaddr != NULL); 21970 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE)); 21971 21972 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un); 21973 21974 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 21975 return (EINVAL); 21976 } 21977 21978 mutex_enter(SD_MUTEX(un)); 21979 block_count = SD_BYTES2TGTBLOCKS(un, buflen); 21980 mutex_exit(SD_MUTEX(un)); 21981 21982 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE; 21983 21984 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: " 21985 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n", 21986 bufaddr, buflen, start_block, block_count); 21987 21988 bzero(&cdb, sizeof (cdb)); 21989 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21990 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21991 21992 /* Compute CDB size to use */ 21993 if (start_block > 0xffffffff) 21994 cdbsize = CDB_GROUP4; 21995 else if ((start_block & 0xFFE00000) || 21996 (un->un_f_cfg_is_atapi == TRUE)) 21997 cdbsize = CDB_GROUP1; 21998 else 21999 cdbsize = CDB_GROUP0; 22000 22001 switch (cdbsize) { 22002 case CDB_GROUP0: /* 6-byte CDBs */ 22003 cdb.scc_cmd = cmd; 22004 FORMG0ADDR(&cdb, start_block); 22005 FORMG0COUNT(&cdb, block_count); 22006 break; 22007 case CDB_GROUP1: /* 10-byte CDBs */ 22008 cdb.scc_cmd = cmd | SCMD_GROUP1; 22009 FORMG1ADDR(&cdb, start_block); 22010 FORMG1COUNT(&cdb, block_count); 22011 break; 22012 case CDB_GROUP4: /* 16-byte CDBs */ 22013 cdb.scc_cmd = cmd | SCMD_GROUP4; 22014 FORMG4LONGADDR(&cdb, (uint64_t)start_block); 22015 FORMG4COUNT(&cdb, block_count); 22016 break; 22017 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */ 22018 default: 22019 /* All others reserved */ 22020 return (EINVAL); 22021 } 22022 22023 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */ 22024 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 22025 22026 ucmd_buf.uscsi_cdb = (char *)&cdb; 22027 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 22028 ucmd_buf.uscsi_bufaddr = bufaddr; 22029 ucmd_buf.uscsi_buflen = buflen; 22030 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 22031 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 22032 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; 22033 ucmd_buf.uscsi_timeout = 60; 22034 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 22035 UIO_SYSSPACE, path_flag); 22036 22037 switch (status) { 22038 case 0: 22039 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 22040 break; /* Success! */ 22041 case EIO: 22042 switch (ucmd_buf.uscsi_status) { 22043 case STATUS_RESERVATION_CONFLICT: 22044 status = EACCES; 22045 break; 22046 default: 22047 break; 22048 } 22049 break; 22050 default: 22051 break; 22052 } 22053 22054 if (status == 0) { 22055 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data", 22056 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 22057 } 22058 22059 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n"); 22060 22061 return (status); 22062 } 22063 22064 22065 /* 22066 * Function: sd_send_scsi_LOG_SENSE 22067 * 22068 * Description: Issue a scsi LOG_SENSE command with the given parameters. 22069 * 22070 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 22071 * structure for this target. 22072 * 22073 * Return Code: 0 - Success 22074 * errno return code from sd_ssc_send() 22075 * 22076 * Context: Can sleep. Does not return until command is completed. 22077 */ 22078 22079 static int 22080 sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr, uint16_t buflen, 22081 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, int path_flag) 22082 { 22083 struct scsi_extended_sense sense_buf; 22084 union scsi_cdb cdb; 22085 struct uscsi_cmd ucmd_buf; 22086 int status; 22087 struct sd_lun *un; 22088 22089 ASSERT(ssc != NULL); 22090 un = ssc->ssc_un; 22091 ASSERT(un != NULL); 22092 ASSERT(!mutex_owned(SD_MUTEX(un))); 22093 22094 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un); 22095 22096 bzero(&cdb, sizeof (cdb)); 22097 bzero(&ucmd_buf, sizeof (ucmd_buf)); 22098 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 22099 22100 cdb.scc_cmd = SCMD_LOG_SENSE_G1; 22101 cdb.cdb_opaque[2] = (page_control << 6) | page_code; 22102 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8); 22103 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF); 22104 FORMG1COUNT(&cdb, buflen); 22105 22106 ucmd_buf.uscsi_cdb = (char *)&cdb; 22107 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 22108 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 22109 ucmd_buf.uscsi_buflen = buflen; 22110 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 22111 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 22112 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 22113 ucmd_buf.uscsi_timeout = 60; 22114 22115 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 22116 UIO_SYSSPACE, path_flag); 22117 22118 switch (status) { 22119 case 0: 22120 break; 22121 case EIO: 22122 switch (ucmd_buf.uscsi_status) { 22123 case STATUS_RESERVATION_CONFLICT: 22124 status = EACCES; 22125 break; 22126 case STATUS_CHECK: 22127 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 22128 (scsi_sense_key((uint8_t *)&sense_buf) == 22129 KEY_ILLEGAL_REQUEST) && 22130 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x24)) { 22131 /* 22132 * ASC 0x24: INVALID FIELD IN CDB 22133 */ 22134 switch (page_code) { 22135 case START_STOP_CYCLE_PAGE: 22136 /* 22137 * The start stop cycle counter is 22138 * implemented as page 0x31 in earlier 22139 * generation disks. In new generation 22140 * disks the start stop cycle counter is 22141 * implemented as page 0xE. To properly 22142 * handle this case if an attempt for 22143 * log page 0xE is made and fails we 22144 * will try again using page 0x31. 22145 * 22146 * Network storage BU committed to 22147 * maintain the page 0x31 for this 22148 * purpose and will not have any other 22149 * page implemented with page code 0x31 22150 * until all disks transition to the 22151 * standard page. 22152 */ 22153 mutex_enter(SD_MUTEX(un)); 22154 un->un_start_stop_cycle_page = 22155 START_STOP_CYCLE_VU_PAGE; 22156 cdb.cdb_opaque[2] = 22157 (char)(page_control << 6) | 22158 un->un_start_stop_cycle_page; 22159 mutex_exit(SD_MUTEX(un)); 22160 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22161 status = sd_ssc_send( 22162 ssc, &ucmd_buf, FKIOCTL, 22163 UIO_SYSSPACE, path_flag); 22164 22165 break; 22166 case TEMPERATURE_PAGE: 22167 status = ENOTTY; 22168 break; 22169 default: 22170 break; 22171 } 22172 } 22173 break; 22174 default: 22175 break; 22176 } 22177 break; 22178 default: 22179 break; 22180 } 22181 22182 if (status == 0) { 22183 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 22184 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data", 22185 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 22186 } 22187 22188 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n"); 22189 22190 return (status); 22191 } 22192 22193 22194 /* 22195 * Function: sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION 22196 * 22197 * Description: Issue the scsi GET EVENT STATUS NOTIFICATION command. 22198 * 22199 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 22200 * structure for this target. 22201 * bufaddr 22202 * buflen 22203 * class_req 22204 * 22205 * Return Code: 0 - Success 22206 * errno return code from sd_ssc_send() 22207 * 22208 * Context: Can sleep. Does not return until command is completed. 22209 */ 22210 22211 static int 22212 sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION(sd_ssc_t *ssc, uchar_t *bufaddr, 22213 size_t buflen, uchar_t class_req) 22214 { 22215 union scsi_cdb cdb; 22216 struct uscsi_cmd ucmd_buf; 22217 int status; 22218 struct sd_lun *un; 22219 22220 ASSERT(ssc != NULL); 22221 un = ssc->ssc_un; 22222 ASSERT(un != NULL); 22223 ASSERT(!mutex_owned(SD_MUTEX(un))); 22224 ASSERT(bufaddr != NULL); 22225 22226 SD_TRACE(SD_LOG_IO, un, 22227 "sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION: entry: un:0x%p\n", un); 22228 22229 bzero(&cdb, sizeof (cdb)); 22230 bzero(&ucmd_buf, sizeof (ucmd_buf)); 22231 bzero(bufaddr, buflen); 22232 22233 cdb.scc_cmd = SCMD_GET_EVENT_STATUS_NOTIFICATION; 22234 cdb.cdb_opaque[1] = 1; /* polled */ 22235 cdb.cdb_opaque[4] = class_req; 22236 FORMG1COUNT(&cdb, buflen); 22237 22238 ucmd_buf.uscsi_cdb = (char *)&cdb; 22239 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 22240 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 22241 ucmd_buf.uscsi_buflen = buflen; 22242 ucmd_buf.uscsi_rqbuf = NULL; 22243 ucmd_buf.uscsi_rqlen = 0; 22244 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 22245 ucmd_buf.uscsi_timeout = 60; 22246 22247 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 22248 UIO_SYSSPACE, SD_PATH_DIRECT); 22249 22250 /* 22251 * Only handle status == 0, the upper-level caller 22252 * will put different assessment based on the context. 22253 */ 22254 if (status == 0) { 22255 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 22256 22257 if (ucmd_buf.uscsi_resid != 0) { 22258 status = EIO; 22259 } 22260 } 22261 22262 SD_TRACE(SD_LOG_IO, un, 22263 "sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION: exit\n"); 22264 22265 return (status); 22266 } 22267 22268 22269 static boolean_t 22270 sd_gesn_media_data_valid(uchar_t *data) 22271 { 22272 uint16_t len; 22273 22274 len = (data[1] << 8) | data[0]; 22275 return ((len >= 6) && 22276 ((data[2] & SD_GESN_HEADER_NEA) == 0) && 22277 ((data[2] & SD_GESN_HEADER_CLASS) == SD_GESN_MEDIA_CLASS) && 22278 ((data[3] & (1 << SD_GESN_MEDIA_CLASS)) != 0)); 22279 } 22280 22281 22282 /* 22283 * Function: sdioctl 22284 * 22285 * Description: Driver's ioctl(9e) entry point function. 22286 * 22287 * Arguments: dev - device number 22288 * cmd - ioctl operation to be performed 22289 * arg - user argument, contains data to be set or reference 22290 * parameter for get 22291 * flag - bit flag, indicating open settings, 32/64 bit type 22292 * cred_p - user credential pointer 22293 * rval_p - calling process return value (OPT) 22294 * 22295 * Return Code: EINVAL 22296 * ENOTTY 22297 * ENXIO 22298 * EIO 22299 * EFAULT 22300 * ENOTSUP 22301 * EPERM 22302 * 22303 * Context: Called from the device switch at normal priority. 22304 */ 22305 22306 static int 22307 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) 22308 { 22309 struct sd_lun *un = NULL; 22310 int err = 0; 22311 int i = 0; 22312 cred_t *cr; 22313 int tmprval = EINVAL; 22314 boolean_t is_valid; 22315 sd_ssc_t *ssc; 22316 22317 /* 22318 * All device accesses go thru sdstrategy where we check on suspend 22319 * status 22320 */ 22321 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22322 return (ENXIO); 22323 } 22324 22325 ASSERT(!mutex_owned(SD_MUTEX(un))); 22326 22327 /* Initialize sd_ssc_t for internal uscsi commands */ 22328 ssc = sd_ssc_init(un); 22329 22330 is_valid = SD_IS_VALID_LABEL(un); 22331 22332 /* 22333 * Moved this wait from sd_uscsi_strategy to here for 22334 * reasons of deadlock prevention. Internal driver commands, 22335 * specifically those to change a devices power level, result 22336 * in a call to sd_uscsi_strategy. 22337 */ 22338 mutex_enter(SD_MUTEX(un)); 22339 while ((un->un_state == SD_STATE_SUSPENDED) || 22340 (un->un_state == SD_STATE_PM_CHANGING)) { 22341 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 22342 } 22343 /* 22344 * Twiddling the counter here protects commands from now 22345 * through to the top of sd_uscsi_strategy. Without the 22346 * counter inc. a power down, for example, could get in 22347 * after the above check for state is made and before 22348 * execution gets to the top of sd_uscsi_strategy. 22349 * That would cause problems. 22350 */ 22351 un->un_ncmds_in_driver++; 22352 22353 if (!is_valid && 22354 (flag & (FNDELAY | FNONBLOCK))) { 22355 switch (cmd) { 22356 case DKIOCGGEOM: /* SD_PATH_DIRECT */ 22357 case DKIOCGVTOC: 22358 case DKIOCGEXTVTOC: 22359 case DKIOCGAPART: 22360 case DKIOCPARTINFO: 22361 case DKIOCEXTPARTINFO: 22362 case DKIOCSGEOM: 22363 case DKIOCSAPART: 22364 case DKIOCGETEFI: 22365 case DKIOCPARTITION: 22366 case DKIOCSVTOC: 22367 case DKIOCSEXTVTOC: 22368 case DKIOCSETEFI: 22369 case DKIOCGMBOOT: 22370 case DKIOCSMBOOT: 22371 case DKIOCG_PHYGEOM: 22372 case DKIOCG_VIRTGEOM: 22373 #if defined(__x86) 22374 case DKIOCSETEXTPART: 22375 #endif 22376 /* let cmlb handle it */ 22377 goto skip_ready_valid; 22378 22379 case CDROMPAUSE: 22380 case CDROMRESUME: 22381 case CDROMPLAYMSF: 22382 case CDROMPLAYTRKIND: 22383 case CDROMREADTOCHDR: 22384 case CDROMREADTOCENTRY: 22385 case CDROMSTOP: 22386 case CDROMSTART: 22387 case CDROMVOLCTRL: 22388 case CDROMSUBCHNL: 22389 case CDROMREADMODE2: 22390 case CDROMREADMODE1: 22391 case CDROMREADOFFSET: 22392 case CDROMSBLKMODE: 22393 case CDROMGBLKMODE: 22394 case CDROMGDRVSPEED: 22395 case CDROMSDRVSPEED: 22396 case CDROMCDDA: 22397 case CDROMCDXA: 22398 case CDROMSUBCODE: 22399 if (!ISCD(un)) { 22400 un->un_ncmds_in_driver--; 22401 ASSERT(un->un_ncmds_in_driver >= 0); 22402 mutex_exit(SD_MUTEX(un)); 22403 err = ENOTTY; 22404 goto done_without_assess; 22405 } 22406 break; 22407 case FDEJECT: 22408 case DKIOCEJECT: 22409 case CDROMEJECT: 22410 if (!un->un_f_eject_media_supported) { 22411 un->un_ncmds_in_driver--; 22412 ASSERT(un->un_ncmds_in_driver >= 0); 22413 mutex_exit(SD_MUTEX(un)); 22414 err = ENOTTY; 22415 goto done_without_assess; 22416 } 22417 break; 22418 case DKIOCFLUSHWRITECACHE: 22419 mutex_exit(SD_MUTEX(un)); 22420 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 22421 if (err != 0) { 22422 mutex_enter(SD_MUTEX(un)); 22423 un->un_ncmds_in_driver--; 22424 ASSERT(un->un_ncmds_in_driver >= 0); 22425 mutex_exit(SD_MUTEX(un)); 22426 err = EIO; 22427 goto done_quick_assess; 22428 } 22429 mutex_enter(SD_MUTEX(un)); 22430 /* FALLTHROUGH */ 22431 case DKIOCREMOVABLE: 22432 case DKIOCHOTPLUGGABLE: 22433 case DKIOCINFO: 22434 case DKIOCGMEDIAINFO: 22435 case DKIOCGMEDIAINFOEXT: 22436 case DKIOCSOLIDSTATE: 22437 case DKIOC_CANFREE: 22438 case MHIOCENFAILFAST: 22439 case MHIOCSTATUS: 22440 case MHIOCTKOWN: 22441 case MHIOCRELEASE: 22442 case MHIOCGRP_INKEYS: 22443 case MHIOCGRP_INRESV: 22444 case MHIOCGRP_REGISTER: 22445 case MHIOCGRP_CLEAR: 22446 case MHIOCGRP_RESERVE: 22447 case MHIOCGRP_PREEMPTANDABORT: 22448 case MHIOCGRP_REGISTERANDIGNOREKEY: 22449 case CDROMCLOSETRAY: 22450 case USCSICMD: 22451 case USCSIMAXXFER: 22452 goto skip_ready_valid; 22453 default: 22454 break; 22455 } 22456 22457 mutex_exit(SD_MUTEX(un)); 22458 err = sd_ready_and_valid(ssc, SDPART(dev)); 22459 mutex_enter(SD_MUTEX(un)); 22460 22461 if (err != SD_READY_VALID) { 22462 switch (cmd) { 22463 case DKIOCSTATE: 22464 case CDROMGDRVSPEED: 22465 case CDROMSDRVSPEED: 22466 case FDEJECT: /* for eject command */ 22467 case DKIOCEJECT: 22468 case CDROMEJECT: 22469 case DKIOCREMOVABLE: 22470 case DKIOCHOTPLUGGABLE: 22471 break; 22472 default: 22473 if (un->un_f_has_removable_media) { 22474 err = ENXIO; 22475 } else { 22476 /* Do not map SD_RESERVED_BY_OTHERS to EIO */ 22477 if (err == SD_RESERVED_BY_OTHERS) { 22478 err = EACCES; 22479 } else { 22480 err = EIO; 22481 } 22482 } 22483 un->un_ncmds_in_driver--; 22484 ASSERT(un->un_ncmds_in_driver >= 0); 22485 mutex_exit(SD_MUTEX(un)); 22486 22487 goto done_without_assess; 22488 } 22489 } 22490 } 22491 22492 skip_ready_valid: 22493 mutex_exit(SD_MUTEX(un)); 22494 22495 switch (cmd) { 22496 case DKIOCINFO: 22497 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n"); 22498 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag); 22499 break; 22500 22501 case DKIOCGMEDIAINFO: 22502 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n"); 22503 err = sd_get_media_info(dev, (caddr_t)arg, flag); 22504 break; 22505 22506 case DKIOCGMEDIAINFOEXT: 22507 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFOEXT\n"); 22508 err = sd_get_media_info_ext(dev, (caddr_t)arg, flag); 22509 break; 22510 22511 case DKIOCGGEOM: 22512 case DKIOCGVTOC: 22513 case DKIOCGEXTVTOC: 22514 case DKIOCGAPART: 22515 case DKIOCPARTINFO: 22516 case DKIOCEXTPARTINFO: 22517 case DKIOCSGEOM: 22518 case DKIOCSAPART: 22519 case DKIOCGETEFI: 22520 case DKIOCPARTITION: 22521 case DKIOCSVTOC: 22522 case DKIOCSEXTVTOC: 22523 case DKIOCSETEFI: 22524 case DKIOCGMBOOT: 22525 case DKIOCSMBOOT: 22526 case DKIOCG_PHYGEOM: 22527 case DKIOCG_VIRTGEOM: 22528 #if defined(__x86) 22529 case DKIOCSETEXTPART: 22530 #endif 22531 SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd); 22532 22533 /* TUR should spin up */ 22534 22535 if (un->un_f_has_removable_media) 22536 err = sd_send_scsi_TEST_UNIT_READY(ssc, 22537 SD_CHECK_FOR_MEDIA); 22538 22539 else 22540 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 22541 22542 if (err != 0) 22543 goto done_with_assess; 22544 22545 err = cmlb_ioctl(un->un_cmlbhandle, dev, 22546 cmd, arg, flag, cred_p, rval_p, (void *)SD_PATH_DIRECT); 22547 22548 if ((err == 0) && 22549 ((cmd == DKIOCSETEFI) || 22550 ((un->un_f_pkstats_enabled) && 22551 (cmd == DKIOCSAPART || cmd == DKIOCSVTOC || 22552 cmd == DKIOCSEXTVTOC)))) { 22553 22554 tmprval = cmlb_validate(un->un_cmlbhandle, CMLB_SILENT, 22555 (void *)SD_PATH_DIRECT); 22556 if ((tmprval == 0) && un->un_f_pkstats_enabled) { 22557 sd_set_pstats(un); 22558 SD_TRACE(SD_LOG_IO_PARTITION, un, 22559 "sd_ioctl: un:0x%p pstats created and " 22560 "set\n", un); 22561 } 22562 } 22563 22564 if ((cmd == DKIOCSVTOC || cmd == DKIOCSEXTVTOC) || 22565 ((cmd == DKIOCSETEFI) && (tmprval == 0))) { 22566 22567 mutex_enter(SD_MUTEX(un)); 22568 if (un->un_f_devid_supported && 22569 (un->un_f_opt_fab_devid == TRUE)) { 22570 if (un->un_devid == NULL) { 22571 sd_register_devid(ssc, SD_DEVINFO(un), 22572 SD_TARGET_IS_UNRESERVED); 22573 } else { 22574 /* 22575 * The device id for this disk 22576 * has been fabricated. The 22577 * device id must be preserved 22578 * by writing it back out to 22579 * disk. 22580 */ 22581 if (sd_write_deviceid(ssc) != 0) { 22582 ddi_devid_free(un->un_devid); 22583 un->un_devid = NULL; 22584 } 22585 } 22586 } 22587 mutex_exit(SD_MUTEX(un)); 22588 } 22589 22590 break; 22591 22592 case DKIOCLOCK: 22593 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n"); 22594 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 22595 SD_PATH_STANDARD); 22596 goto done_with_assess; 22597 22598 case DKIOCUNLOCK: 22599 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n"); 22600 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW, 22601 SD_PATH_STANDARD); 22602 goto done_with_assess; 22603 22604 case DKIOCSTATE: { 22605 enum dkio_state state; 22606 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n"); 22607 22608 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) { 22609 err = EFAULT; 22610 } else { 22611 err = sd_check_media(dev, state); 22612 if (err == 0) { 22613 if (ddi_copyout(&un->un_mediastate, (void *)arg, 22614 sizeof (int), flag) != 0) 22615 err = EFAULT; 22616 } 22617 } 22618 break; 22619 } 22620 22621 case DKIOCREMOVABLE: 22622 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n"); 22623 i = un->un_f_has_removable_media ? 1 : 0; 22624 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 22625 err = EFAULT; 22626 } else { 22627 err = 0; 22628 } 22629 break; 22630 22631 case DKIOCSOLIDSTATE: 22632 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSOLIDSTATE\n"); 22633 i = un->un_f_is_solid_state ? 1 : 0; 22634 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 22635 err = EFAULT; 22636 } else { 22637 err = 0; 22638 } 22639 break; 22640 22641 case DKIOCHOTPLUGGABLE: 22642 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCHOTPLUGGABLE\n"); 22643 i = un->un_f_is_hotpluggable ? 1 : 0; 22644 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 22645 err = EFAULT; 22646 } else { 22647 err = 0; 22648 } 22649 break; 22650 22651 case DKIOCREADONLY: 22652 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREADONLY\n"); 22653 i = 0; 22654 if ((ISCD(un) && !un->un_f_mmc_writable_media) || 22655 (sr_check_wp(dev) != 0)) { 22656 i = 1; 22657 } 22658 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 22659 err = EFAULT; 22660 } else { 22661 err = 0; 22662 } 22663 break; 22664 22665 case DKIOCGTEMPERATURE: 22666 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n"); 22667 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag); 22668 break; 22669 22670 case MHIOCENFAILFAST: 22671 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n"); 22672 if ((err = drv_priv(cred_p)) == 0) { 22673 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag); 22674 } 22675 break; 22676 22677 case MHIOCTKOWN: 22678 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n"); 22679 if ((err = drv_priv(cred_p)) == 0) { 22680 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag); 22681 } 22682 break; 22683 22684 case MHIOCRELEASE: 22685 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n"); 22686 if ((err = drv_priv(cred_p)) == 0) { 22687 err = sd_mhdioc_release(dev); 22688 } 22689 break; 22690 22691 case MHIOCSTATUS: 22692 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n"); 22693 if ((err = drv_priv(cred_p)) == 0) { 22694 switch (sd_send_scsi_TEST_UNIT_READY(ssc, 0)) { 22695 case 0: 22696 err = 0; 22697 break; 22698 case EACCES: 22699 *rval_p = 1; 22700 err = 0; 22701 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22702 break; 22703 default: 22704 err = EIO; 22705 goto done_with_assess; 22706 } 22707 } 22708 break; 22709 22710 case MHIOCQRESERVE: 22711 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n"); 22712 if ((err = drv_priv(cred_p)) == 0) { 22713 err = sd_reserve_release(dev, SD_RESERVE); 22714 } 22715 break; 22716 22717 case MHIOCREREGISTERDEVID: 22718 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n"); 22719 if (drv_priv(cred_p) == EPERM) { 22720 err = EPERM; 22721 } else if (!un->un_f_devid_supported) { 22722 err = ENOTTY; 22723 } else { 22724 err = sd_mhdioc_register_devid(dev); 22725 } 22726 break; 22727 22728 case MHIOCGRP_INKEYS: 22729 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n"); 22730 if (((err = drv_priv(cred_p)) != EPERM) && 22731 arg != (intptr_t)NULL) { 22732 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22733 err = ENOTSUP; 22734 } else { 22735 err = sd_mhdioc_inkeys(dev, (caddr_t)arg, 22736 flag); 22737 } 22738 } 22739 break; 22740 22741 case MHIOCGRP_INRESV: 22742 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n"); 22743 if (((err = drv_priv(cred_p)) != EPERM) && 22744 arg != (intptr_t)NULL) { 22745 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22746 err = ENOTSUP; 22747 } else { 22748 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag); 22749 } 22750 } 22751 break; 22752 22753 case MHIOCGRP_REGISTER: 22754 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n"); 22755 if ((err = drv_priv(cred_p)) != EPERM) { 22756 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22757 err = ENOTSUP; 22758 } else if (arg != (intptr_t)NULL) { 22759 mhioc_register_t reg; 22760 if (ddi_copyin((void *)arg, ®, 22761 sizeof (mhioc_register_t), flag) != 0) { 22762 err = EFAULT; 22763 } else { 22764 err = 22765 sd_send_scsi_PERSISTENT_RESERVE_OUT( 22766 ssc, SD_SCSI3_REGISTER, 22767 (uchar_t *)®); 22768 if (err != 0) 22769 goto done_with_assess; 22770 } 22771 } 22772 } 22773 break; 22774 22775 case MHIOCGRP_CLEAR: 22776 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_CLEAR\n"); 22777 if ((err = drv_priv(cred_p)) != EPERM) { 22778 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22779 err = ENOTSUP; 22780 } else if (arg != (intptr_t)NULL) { 22781 mhioc_register_t reg; 22782 if (ddi_copyin((void *)arg, ®, 22783 sizeof (mhioc_register_t), flag) != 0) { 22784 err = EFAULT; 22785 } else { 22786 err = 22787 sd_send_scsi_PERSISTENT_RESERVE_OUT( 22788 ssc, SD_SCSI3_CLEAR, 22789 (uchar_t *)®); 22790 if (err != 0) 22791 goto done_with_assess; 22792 } 22793 } 22794 } 22795 break; 22796 22797 case MHIOCGRP_RESERVE: 22798 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n"); 22799 if ((err = drv_priv(cred_p)) != EPERM) { 22800 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22801 err = ENOTSUP; 22802 } else if (arg != (intptr_t)NULL) { 22803 mhioc_resv_desc_t resv_desc; 22804 if (ddi_copyin((void *)arg, &resv_desc, 22805 sizeof (mhioc_resv_desc_t), flag) != 0) { 22806 err = EFAULT; 22807 } else { 22808 err = 22809 sd_send_scsi_PERSISTENT_RESERVE_OUT( 22810 ssc, SD_SCSI3_RESERVE, 22811 (uchar_t *)&resv_desc); 22812 if (err != 0) 22813 goto done_with_assess; 22814 } 22815 } 22816 } 22817 break; 22818 22819 case MHIOCGRP_PREEMPTANDABORT: 22820 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 22821 if ((err = drv_priv(cred_p)) != EPERM) { 22822 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22823 err = ENOTSUP; 22824 } else if (arg != (intptr_t)NULL) { 22825 mhioc_preemptandabort_t preempt_abort; 22826 if (ddi_copyin((void *)arg, &preempt_abort, 22827 sizeof (mhioc_preemptandabort_t), 22828 flag) != 0) { 22829 err = EFAULT; 22830 } else { 22831 err = 22832 sd_send_scsi_PERSISTENT_RESERVE_OUT( 22833 ssc, SD_SCSI3_PREEMPTANDABORT, 22834 (uchar_t *)&preempt_abort); 22835 if (err != 0) 22836 goto done_with_assess; 22837 } 22838 } 22839 } 22840 break; 22841 22842 case MHIOCGRP_REGISTERANDIGNOREKEY: 22843 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTERANDIGNOREKEY\n"); 22844 if ((err = drv_priv(cred_p)) != EPERM) { 22845 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 22846 err = ENOTSUP; 22847 } else if (arg != (intptr_t)NULL) { 22848 mhioc_registerandignorekey_t r_and_i; 22849 if (ddi_copyin((void *)arg, (void *)&r_and_i, 22850 sizeof (mhioc_registerandignorekey_t), 22851 flag) != 0) { 22852 err = EFAULT; 22853 } else { 22854 err = 22855 sd_send_scsi_PERSISTENT_RESERVE_OUT( 22856 ssc, SD_SCSI3_REGISTERANDIGNOREKEY, 22857 (uchar_t *)&r_and_i); 22858 if (err != 0) 22859 goto done_with_assess; 22860 } 22861 } 22862 } 22863 break; 22864 22865 case USCSICMD: 22866 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n"); 22867 cr = ddi_get_cred(); 22868 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 22869 err = EPERM; 22870 } else { 22871 enum uio_seg uioseg; 22872 22873 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE : 22874 UIO_USERSPACE; 22875 if (un->un_f_format_in_progress == TRUE) { 22876 err = EAGAIN; 22877 break; 22878 } 22879 22880 err = sd_ssc_send(ssc, 22881 (struct uscsi_cmd *)arg, 22882 flag, uioseg, SD_PATH_STANDARD); 22883 if (err != 0) 22884 goto done_with_assess; 22885 else 22886 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 22887 } 22888 break; 22889 22890 case USCSIMAXXFER: 22891 SD_TRACE(SD_LOG_IOCTL, un, "USCSIMAXXFER\n"); 22892 cr = ddi_get_cred(); 22893 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 22894 err = EPERM; 22895 } else { 22896 const uscsi_xfer_t xfer = un->un_max_xfer_size; 22897 22898 if (ddi_copyout(&xfer, (void *)arg, sizeof (xfer), 22899 flag) != 0) { 22900 err = EFAULT; 22901 } else { 22902 err = 0; 22903 } 22904 } 22905 break; 22906 22907 case CDROMPAUSE: 22908 case CDROMRESUME: 22909 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n"); 22910 if (!ISCD(un)) { 22911 err = ENOTTY; 22912 } else { 22913 err = sr_pause_resume(dev, cmd); 22914 } 22915 break; 22916 22917 case CDROMPLAYMSF: 22918 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n"); 22919 if (!ISCD(un)) { 22920 err = ENOTTY; 22921 } else { 22922 err = sr_play_msf(dev, (caddr_t)arg, flag); 22923 } 22924 break; 22925 22926 case CDROMPLAYTRKIND: 22927 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n"); 22928 #if defined(__x86) 22929 /* 22930 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead 22931 */ 22932 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 22933 #else 22934 if (!ISCD(un)) { 22935 #endif 22936 err = ENOTTY; 22937 } else { 22938 err = sr_play_trkind(dev, (caddr_t)arg, flag); 22939 } 22940 break; 22941 22942 case CDROMREADTOCHDR: 22943 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n"); 22944 if (!ISCD(un)) { 22945 err = ENOTTY; 22946 } else { 22947 err = sr_read_tochdr(dev, (caddr_t)arg, flag); 22948 } 22949 break; 22950 22951 case CDROMREADTOCENTRY: 22952 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n"); 22953 if (!ISCD(un)) { 22954 err = ENOTTY; 22955 } else { 22956 err = sr_read_tocentry(dev, (caddr_t)arg, flag); 22957 } 22958 break; 22959 22960 case CDROMSTOP: 22961 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n"); 22962 if (!ISCD(un)) { 22963 err = ENOTTY; 22964 } else { 22965 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 22966 SD_TARGET_STOP, SD_PATH_STANDARD); 22967 goto done_with_assess; 22968 } 22969 break; 22970 22971 case CDROMSTART: 22972 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n"); 22973 if (!ISCD(un)) { 22974 err = ENOTTY; 22975 } else { 22976 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 22977 SD_TARGET_START, SD_PATH_STANDARD); 22978 goto done_with_assess; 22979 } 22980 break; 22981 22982 case CDROMCLOSETRAY: 22983 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n"); 22984 if (!ISCD(un)) { 22985 err = ENOTTY; 22986 } else { 22987 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 22988 SD_TARGET_CLOSE, SD_PATH_STANDARD); 22989 goto done_with_assess; 22990 } 22991 break; 22992 22993 case FDEJECT: /* for eject command */ 22994 case DKIOCEJECT: 22995 case CDROMEJECT: 22996 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n"); 22997 if (!un->un_f_eject_media_supported) { 22998 err = ENOTTY; 22999 } else { 23000 err = sr_eject(dev); 23001 } 23002 break; 23003 23004 case CDROMVOLCTRL: 23005 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n"); 23006 if (!ISCD(un)) { 23007 err = ENOTTY; 23008 } else { 23009 err = sr_volume_ctrl(dev, (caddr_t)arg, flag); 23010 } 23011 break; 23012 23013 case CDROMSUBCHNL: 23014 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n"); 23015 if (!ISCD(un)) { 23016 err = ENOTTY; 23017 } else { 23018 err = sr_read_subchannel(dev, (caddr_t)arg, flag); 23019 } 23020 break; 23021 23022 case CDROMREADMODE2: 23023 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n"); 23024 if (!ISCD(un)) { 23025 err = ENOTTY; 23026 } else if (un->un_f_cfg_is_atapi == TRUE) { 23027 /* 23028 * If the drive supports READ CD, use that instead of 23029 * switching the LBA size via a MODE SELECT 23030 * Block Descriptor 23031 */ 23032 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag); 23033 } else { 23034 err = sr_read_mode2(dev, (caddr_t)arg, flag); 23035 } 23036 break; 23037 23038 case CDROMREADMODE1: 23039 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n"); 23040 if (!ISCD(un)) { 23041 err = ENOTTY; 23042 } else { 23043 err = sr_read_mode1(dev, (caddr_t)arg, flag); 23044 } 23045 break; 23046 23047 case CDROMREADOFFSET: 23048 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n"); 23049 if (!ISCD(un)) { 23050 err = ENOTTY; 23051 } else { 23052 err = sr_read_sony_session_offset(dev, (caddr_t)arg, 23053 flag); 23054 } 23055 break; 23056 23057 case CDROMSBLKMODE: 23058 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n"); 23059 /* 23060 * There is no means of changing block size in case of atapi 23061 * drives, thus return ENOTTY if drive type is atapi 23062 */ 23063 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 23064 err = ENOTTY; 23065 } else if (un->un_f_mmc_cap == TRUE) { 23066 23067 /* 23068 * MMC Devices do not support changing the 23069 * logical block size 23070 * 23071 * Note: EINVAL is being returned instead of ENOTTY to 23072 * maintain consistancy with the original mmc 23073 * driver update. 23074 */ 23075 err = EINVAL; 23076 } else { 23077 mutex_enter(SD_MUTEX(un)); 23078 if ((!(un->un_exclopen & (1<<SDPART(dev)))) || 23079 (un->un_ncmds_in_transport > 0)) { 23080 mutex_exit(SD_MUTEX(un)); 23081 err = EINVAL; 23082 } else { 23083 mutex_exit(SD_MUTEX(un)); 23084 err = sr_change_blkmode(dev, cmd, arg, flag); 23085 } 23086 } 23087 break; 23088 23089 case CDROMGBLKMODE: 23090 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n"); 23091 if (!ISCD(un)) { 23092 err = ENOTTY; 23093 } else if ((un->un_f_cfg_is_atapi != FALSE) && 23094 (un->un_f_blockcount_is_valid != FALSE)) { 23095 /* 23096 * Drive is an ATAPI drive so return target block 23097 * size for ATAPI drives since we cannot change the 23098 * blocksize on ATAPI drives. Used primarily to detect 23099 * if an ATAPI cdrom is present. 23100 */ 23101 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg, 23102 sizeof (int), flag) != 0) { 23103 err = EFAULT; 23104 } else { 23105 err = 0; 23106 } 23107 23108 } else { 23109 /* 23110 * Drive supports changing block sizes via a Mode 23111 * Select. 23112 */ 23113 err = sr_change_blkmode(dev, cmd, arg, flag); 23114 } 23115 break; 23116 23117 case CDROMGDRVSPEED: 23118 case CDROMSDRVSPEED: 23119 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n"); 23120 if (!ISCD(un)) { 23121 err = ENOTTY; 23122 } else if (un->un_f_mmc_cap == TRUE) { 23123 /* 23124 * Note: In the future the driver implementation 23125 * for getting and 23126 * setting cd speed should entail: 23127 * 1) If non-mmc try the Toshiba mode page 23128 * (sr_change_speed) 23129 * 2) If mmc but no support for Real Time Streaming try 23130 * the SET CD SPEED (0xBB) command 23131 * (sr_atapi_change_speed) 23132 * 3) If mmc and support for Real Time Streaming 23133 * try the GET PERFORMANCE and SET STREAMING 23134 * commands (not yet implemented, 4380808) 23135 */ 23136 /* 23137 * As per recent MMC spec, CD-ROM speed is variable 23138 * and changes with LBA. Since there is no such 23139 * things as drive speed now, fail this ioctl. 23140 * 23141 * Note: EINVAL is returned for consistancy of original 23142 * implementation which included support for getting 23143 * the drive speed of mmc devices but not setting 23144 * the drive speed. Thus EINVAL would be returned 23145 * if a set request was made for an mmc device. 23146 * We no longer support get or set speed for 23147 * mmc but need to remain consistent with regard 23148 * to the error code returned. 23149 */ 23150 err = EINVAL; 23151 } else if (un->un_f_cfg_is_atapi == TRUE) { 23152 err = sr_atapi_change_speed(dev, cmd, arg, flag); 23153 } else { 23154 err = sr_change_speed(dev, cmd, arg, flag); 23155 } 23156 break; 23157 23158 case CDROMCDDA: 23159 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n"); 23160 if (!ISCD(un)) { 23161 err = ENOTTY; 23162 } else { 23163 err = sr_read_cdda(dev, (void *)arg, flag); 23164 } 23165 break; 23166 23167 case CDROMCDXA: 23168 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n"); 23169 if (!ISCD(un)) { 23170 err = ENOTTY; 23171 } else { 23172 err = sr_read_cdxa(dev, (caddr_t)arg, flag); 23173 } 23174 break; 23175 23176 case CDROMSUBCODE: 23177 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n"); 23178 if (!ISCD(un)) { 23179 err = ENOTTY; 23180 } else { 23181 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag); 23182 } 23183 break; 23184 23185 23186 #ifdef SDDEBUG 23187 /* RESET/ABORTS testing ioctls */ 23188 case DKIOCRESET: { 23189 int reset_level; 23190 23191 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) { 23192 err = EFAULT; 23193 } else { 23194 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: " 23195 "reset_level = 0x%lx\n", reset_level); 23196 if (scsi_reset(SD_ADDRESS(un), reset_level)) { 23197 err = 0; 23198 } else { 23199 err = EIO; 23200 } 23201 } 23202 break; 23203 } 23204 23205 case DKIOCABORT: 23206 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n"); 23207 if (scsi_abort(SD_ADDRESS(un), NULL)) { 23208 err = 0; 23209 } else { 23210 err = EIO; 23211 } 23212 break; 23213 #endif 23214 23215 #ifdef SD_FAULT_INJECTION 23216 /* SDIOC FaultInjection testing ioctls */ 23217 case SDIOCSTART: 23218 case SDIOCSTOP: 23219 case SDIOCINSERTPKT: 23220 case SDIOCINSERTXB: 23221 case SDIOCINSERTUN: 23222 case SDIOCINSERTARQ: 23223 case SDIOCPUSH: 23224 case SDIOCRETRIEVE: 23225 case SDIOCRUN: 23226 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" 23227 "SDIOC detected cmd:0x%X:\n", cmd); 23228 /* call error generator */ 23229 sd_faultinjection_ioctl(cmd, arg, un); 23230 err = 0; 23231 break; 23232 23233 #endif /* SD_FAULT_INJECTION */ 23234 23235 case DKIOCFLUSHWRITECACHE: 23236 { 23237 struct dk_callback *dkc = (struct dk_callback *)arg; 23238 23239 mutex_enter(SD_MUTEX(un)); 23240 if (!un->un_f_sync_cache_supported || 23241 !un->un_f_write_cache_enabled) { 23242 err = un->un_f_sync_cache_supported ? 23243 0 : ENOTSUP; 23244 mutex_exit(SD_MUTEX(un)); 23245 if ((flag & FKIOCTL) && dkc != NULL && 23246 dkc->dkc_callback != NULL) { 23247 (*dkc->dkc_callback)(dkc->dkc_cookie, 23248 err); 23249 /* 23250 * Did callback and reported error. 23251 * Since we did a callback, ioctl 23252 * should return 0. 23253 */ 23254 err = 0; 23255 } 23256 break; 23257 } 23258 mutex_exit(SD_MUTEX(un)); 23259 23260 if ((flag & FKIOCTL) && dkc != NULL && 23261 dkc->dkc_callback != NULL) { 23262 /* async SYNC CACHE request */ 23263 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 23264 } else { 23265 /* synchronous SYNC CACHE request */ 23266 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); 23267 } 23268 } 23269 break; 23270 23271 case DKIOCFREE: 23272 { 23273 dkioc_free_list_t *dfl = (dkioc_free_list_t *)arg; 23274 23275 /* bad ioctls shouldn't panic */ 23276 if (dfl == NULL) { 23277 /* check kernel callers strictly in debug */ 23278 ASSERT0(flag & FKIOCTL); 23279 err = SET_ERROR(EINVAL); 23280 break; 23281 } 23282 /* synchronous UNMAP request */ 23283 err = sd_send_scsi_UNMAP(dev, ssc, dfl, flag); 23284 } 23285 break; 23286 23287 case DKIOC_CANFREE: 23288 SD_TRACE(SD_LOG_IOCTL, un, "DKIOC_CANFREE\n"); 23289 i = (un->un_thin_flags & SD_THIN_PROV_ENABLED) ? 1 : 0; 23290 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 23291 err = EFAULT; 23292 } else { 23293 err = 0; 23294 } 23295 break; 23296 23297 case DKIOCGETWCE: { 23298 23299 int wce; 23300 23301 if ((err = sd_get_write_cache_enabled(ssc, &wce)) != 0) { 23302 break; 23303 } 23304 23305 if (ddi_copyout(&wce, (void *)arg, sizeof (wce), flag)) { 23306 err = EFAULT; 23307 } 23308 break; 23309 } 23310 23311 case DKIOCSETWCE: { 23312 23313 int wce, sync_supported; 23314 int cur_wce = 0; 23315 23316 if (!un->un_f_cache_mode_changeable) { 23317 err = EINVAL; 23318 break; 23319 } 23320 23321 if (ddi_copyin((void *)arg, &wce, sizeof (wce), flag)) { 23322 err = EFAULT; 23323 break; 23324 } 23325 23326 /* 23327 * Synchronize multiple threads trying to enable 23328 * or disable the cache via the un_f_wcc_cv 23329 * condition variable. 23330 */ 23331 mutex_enter(SD_MUTEX(un)); 23332 23333 /* 23334 * Don't allow the cache to be enabled if the 23335 * config file has it disabled. 23336 */ 23337 if (un->un_f_opt_disable_cache && wce) { 23338 mutex_exit(SD_MUTEX(un)); 23339 err = EINVAL; 23340 break; 23341 } 23342 23343 /* 23344 * Wait for write cache change in progress 23345 * bit to be clear before proceeding. 23346 */ 23347 while (un->un_f_wcc_inprog) 23348 cv_wait(&un->un_wcc_cv, SD_MUTEX(un)); 23349 23350 un->un_f_wcc_inprog = 1; 23351 23352 mutex_exit(SD_MUTEX(un)); 23353 23354 /* 23355 * Get the current write cache state 23356 */ 23357 if ((err = sd_get_write_cache_enabled(ssc, &cur_wce)) != 0) { 23358 mutex_enter(SD_MUTEX(un)); 23359 un->un_f_wcc_inprog = 0; 23360 cv_broadcast(&un->un_wcc_cv); 23361 mutex_exit(SD_MUTEX(un)); 23362 break; 23363 } 23364 23365 mutex_enter(SD_MUTEX(un)); 23366 un->un_f_write_cache_enabled = (cur_wce != 0); 23367 23368 if (un->un_f_write_cache_enabled && wce == 0) { 23369 /* 23370 * Disable the write cache. Don't clear 23371 * un_f_write_cache_enabled until after 23372 * the mode select and flush are complete. 23373 */ 23374 sync_supported = un->un_f_sync_cache_supported; 23375 23376 /* 23377 * If cache flush is suppressed, we assume that the 23378 * controller firmware will take care of managing the 23379 * write cache for us: no need to explicitly 23380 * disable it. 23381 */ 23382 if (!un->un_f_suppress_cache_flush) { 23383 mutex_exit(SD_MUTEX(un)); 23384 if ((err = sd_cache_control(ssc, 23385 SD_CACHE_NOCHANGE, 23386 SD_CACHE_DISABLE)) == 0 && 23387 sync_supported) { 23388 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, 23389 NULL); 23390 } 23391 } else { 23392 mutex_exit(SD_MUTEX(un)); 23393 } 23394 23395 mutex_enter(SD_MUTEX(un)); 23396 if (err == 0) { 23397 un->un_f_write_cache_enabled = 0; 23398 } 23399 23400 } else if (!un->un_f_write_cache_enabled && wce != 0) { 23401 /* 23402 * Set un_f_write_cache_enabled first, so there is 23403 * no window where the cache is enabled, but the 23404 * bit says it isn't. 23405 */ 23406 un->un_f_write_cache_enabled = 1; 23407 23408 /* 23409 * If cache flush is suppressed, we assume that the 23410 * controller firmware will take care of managing the 23411 * write cache for us: no need to explicitly 23412 * enable it. 23413 */ 23414 if (!un->un_f_suppress_cache_flush) { 23415 mutex_exit(SD_MUTEX(un)); 23416 err = sd_cache_control(ssc, SD_CACHE_NOCHANGE, 23417 SD_CACHE_ENABLE); 23418 } else { 23419 mutex_exit(SD_MUTEX(un)); 23420 } 23421 23422 mutex_enter(SD_MUTEX(un)); 23423 23424 if (err) { 23425 un->un_f_write_cache_enabled = 0; 23426 } 23427 } 23428 23429 un->un_f_wcc_inprog = 0; 23430 cv_broadcast(&un->un_wcc_cv); 23431 mutex_exit(SD_MUTEX(un)); 23432 break; 23433 } 23434 23435 default: 23436 err = ENOTTY; 23437 break; 23438 } 23439 mutex_enter(SD_MUTEX(un)); 23440 un->un_ncmds_in_driver--; 23441 ASSERT(un->un_ncmds_in_driver >= 0); 23442 mutex_exit(SD_MUTEX(un)); 23443 23444 23445 done_without_assess: 23446 sd_ssc_fini(ssc); 23447 23448 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 23449 return (err); 23450 23451 done_with_assess: 23452 mutex_enter(SD_MUTEX(un)); 23453 un->un_ncmds_in_driver--; 23454 ASSERT(un->un_ncmds_in_driver >= 0); 23455 mutex_exit(SD_MUTEX(un)); 23456 23457 done_quick_assess: 23458 if (err != 0) 23459 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23460 /* Uninitialize sd_ssc_t pointer */ 23461 sd_ssc_fini(ssc); 23462 23463 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 23464 return (err); 23465 } 23466 23467 23468 /* 23469 * Function: sd_dkio_ctrl_info 23470 * 23471 * Description: This routine is the driver entry point for handling controller 23472 * information ioctl requests (DKIOCINFO). 23473 * 23474 * Arguments: dev - the device number 23475 * arg - pointer to user provided dk_cinfo structure 23476 * specifying the controller type and attributes. 23477 * flag - this argument is a pass through to ddi_copyxxx() 23478 * directly from the mode argument of ioctl(). 23479 * 23480 * Return Code: 0 23481 * EFAULT 23482 * ENXIO 23483 */ 23484 23485 static int 23486 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag) 23487 { 23488 struct sd_lun *un = NULL; 23489 struct dk_cinfo *info; 23490 dev_info_t *pdip; 23491 int lun, tgt; 23492 23493 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23494 return (ENXIO); 23495 } 23496 23497 info = (struct dk_cinfo *) 23498 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); 23499 23500 switch (un->un_ctype) { 23501 case CTYPE_CDROM: 23502 info->dki_ctype = DKC_CDROM; 23503 break; 23504 default: 23505 info->dki_ctype = DKC_SCSI_CCS; 23506 break; 23507 } 23508 pdip = ddi_get_parent(SD_DEVINFO(un)); 23509 info->dki_cnum = ddi_get_instance(pdip); 23510 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) { 23511 (void) strcpy(info->dki_cname, ddi_get_name(pdip)); 23512 } else { 23513 (void) strncpy(info->dki_cname, ddi_node_name(pdip), 23514 DK_DEVLEN - 1); 23515 } 23516 23517 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 23518 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0); 23519 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 23520 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0); 23521 23522 /* Unit Information */ 23523 info->dki_unit = ddi_get_instance(SD_DEVINFO(un)); 23524 info->dki_slave = ((tgt << 3) | lun); 23525 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)), 23526 DK_DEVLEN - 1); 23527 info->dki_flags = DKI_FMTVOL; 23528 info->dki_partition = SDPART(dev); 23529 23530 /* Max Transfer size of this device in blocks */ 23531 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize; 23532 info->dki_addr = 0; 23533 info->dki_space = 0; 23534 info->dki_prio = 0; 23535 info->dki_vec = 0; 23536 23537 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) { 23538 kmem_free(info, sizeof (struct dk_cinfo)); 23539 return (EFAULT); 23540 } else { 23541 kmem_free(info, sizeof (struct dk_cinfo)); 23542 return (0); 23543 } 23544 } 23545 23546 /* 23547 * Function: sd_get_media_info_com 23548 * 23549 * Description: This routine returns the information required to populate 23550 * the fields for the dk_minfo/dk_minfo_ext structures. 23551 * 23552 * Arguments: dev - the device number 23553 * dki_media_type - media_type 23554 * dki_lbsize - logical block size 23555 * dki_capacity - capacity in blocks 23556 * dki_pbsize - physical block size (if requested) 23557 * 23558 * Return Code: 0 23559 * EACCESS 23560 * EFAULT 23561 * ENXIO 23562 * EIO 23563 */ 23564 static int 23565 sd_get_media_info_com(dev_t dev, uint_t *dki_media_type, uint_t *dki_lbsize, 23566 diskaddr_t *dki_capacity, uint_t *dki_pbsize) 23567 { 23568 struct sd_lun *un = NULL; 23569 struct uscsi_cmd com; 23570 struct scsi_inquiry *sinq; 23571 u_longlong_t media_capacity; 23572 uint64_t capacity; 23573 uint_t lbasize; 23574 uint_t pbsize; 23575 uchar_t *out_data; 23576 uchar_t *rqbuf; 23577 int rval = 0; 23578 int rtn; 23579 sd_ssc_t *ssc; 23580 23581 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 23582 (un->un_state == SD_STATE_OFFLINE)) { 23583 return (ENXIO); 23584 } 23585 23586 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info_com: entry\n"); 23587 23588 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 23589 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 23590 ssc = sd_ssc_init(un); 23591 23592 /* Issue a TUR to determine if the drive is ready with media present */ 23593 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_CHECK_FOR_MEDIA); 23594 if (rval == ENXIO) { 23595 goto done; 23596 } else if (rval != 0) { 23597 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23598 } 23599 23600 /* Now get configuration data */ 23601 if (ISCD(un)) { 23602 *dki_media_type = DK_CDROM; 23603 23604 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 23605 if (un->un_f_mmc_cap == TRUE) { 23606 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, 23607 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN, 23608 SD_PATH_STANDARD); 23609 23610 if (rtn) { 23611 /* 23612 * We ignore all failures for CD and need to 23613 * put the assessment before processing code 23614 * to avoid missing assessment for FMA. 23615 */ 23616 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23617 /* 23618 * Failed for other than an illegal request 23619 * or command not supported 23620 */ 23621 if ((com.uscsi_status == STATUS_CHECK) && 23622 (com.uscsi_rqstatus == STATUS_GOOD)) { 23623 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 23624 (rqbuf[12] != 0x20)) { 23625 rval = EIO; 23626 goto no_assessment; 23627 } 23628 } 23629 } else { 23630 /* 23631 * The GET CONFIGURATION command succeeded 23632 * so set the media type according to the 23633 * returned data 23634 */ 23635 *dki_media_type = out_data[6]; 23636 *dki_media_type <<= 8; 23637 *dki_media_type |= out_data[7]; 23638 } 23639 } 23640 } else { 23641 /* 23642 * The profile list is not available, so we attempt to identify 23643 * the media type based on the inquiry data 23644 */ 23645 sinq = un->un_sd->sd_inq; 23646 if ((sinq->inq_dtype == DTYPE_DIRECT) || 23647 (sinq->inq_dtype == DTYPE_OPTICAL)) { 23648 /* This is a direct access device or optical disk */ 23649 *dki_media_type = DK_FIXED_DISK; 23650 23651 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 23652 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 23653 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 23654 *dki_media_type = DK_ZIP; 23655 } else if ( 23656 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 23657 *dki_media_type = DK_JAZ; 23658 } 23659 } 23660 } else { 23661 /* 23662 * Not a CD, direct access or optical disk so return 23663 * unknown media 23664 */ 23665 *dki_media_type = DK_UNKNOWN; 23666 } 23667 } 23668 23669 /* 23670 * Now read the capacity so we can provide the lbasize, 23671 * pbsize and capacity. 23672 */ 23673 if (dki_pbsize && un->un_f_descr_format_supported) { 23674 rval = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, &lbasize, 23675 &pbsize, SD_PATH_DIRECT); 23676 23677 /* 23678 * Override the physical blocksize if the instance already 23679 * has a larger value. 23680 */ 23681 pbsize = MAX(pbsize, un->un_phy_blocksize); 23682 } 23683 23684 if (dki_pbsize == NULL || rval != 0 || 23685 !un->un_f_descr_format_supported) { 23686 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize, 23687 SD_PATH_DIRECT); 23688 23689 switch (rval) { 23690 case 0: 23691 if (un->un_f_enable_rmw && 23692 un->un_phy_blocksize != 0) { 23693 pbsize = un->un_phy_blocksize; 23694 } else { 23695 pbsize = lbasize; 23696 } 23697 media_capacity = capacity; 23698 23699 /* 23700 * sd_send_scsi_READ_CAPACITY() reports capacity in 23701 * un->un_sys_blocksize chunks. So we need to convert 23702 * it into cap.lbsize chunks. 23703 */ 23704 if (un->un_f_has_removable_media) { 23705 media_capacity *= un->un_sys_blocksize; 23706 media_capacity /= lbasize; 23707 } 23708 break; 23709 case EACCES: 23710 rval = EACCES; 23711 goto done; 23712 default: 23713 rval = EIO; 23714 goto done; 23715 } 23716 } else { 23717 if (un->un_f_enable_rmw && 23718 !ISP2(pbsize % DEV_BSIZE)) { 23719 pbsize = SSD_SECSIZE; 23720 } else if (!ISP2(lbasize % DEV_BSIZE) || 23721 !ISP2(pbsize % DEV_BSIZE)) { 23722 pbsize = lbasize = DEV_BSIZE; 23723 } 23724 media_capacity = capacity; 23725 } 23726 23727 /* 23728 * If lun is expanded dynamically, update the un structure. 23729 */ 23730 mutex_enter(SD_MUTEX(un)); 23731 if ((un->un_f_blockcount_is_valid == TRUE) && 23732 (un->un_f_tgt_blocksize_is_valid == TRUE) && 23733 (capacity > un->un_blockcount)) { 23734 un->un_f_expnevent = B_FALSE; 23735 sd_update_block_info(un, lbasize, capacity); 23736 } 23737 mutex_exit(SD_MUTEX(un)); 23738 23739 *dki_lbsize = lbasize; 23740 *dki_capacity = media_capacity; 23741 if (dki_pbsize) 23742 *dki_pbsize = pbsize; 23743 23744 done: 23745 if (rval != 0) { 23746 if (rval == EIO) 23747 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23748 else 23749 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23750 } 23751 no_assessment: 23752 sd_ssc_fini(ssc); 23753 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 23754 kmem_free(rqbuf, SENSE_LENGTH); 23755 return (rval); 23756 } 23757 23758 /* 23759 * Function: sd_get_media_info 23760 * 23761 * Description: This routine is the driver entry point for handling ioctl 23762 * requests for the media type or command set profile used by the 23763 * drive to operate on the media (DKIOCGMEDIAINFO). 23764 * 23765 * Arguments: dev - the device number 23766 * arg - pointer to user provided dk_minfo structure 23767 * specifying the media type, logical block size and 23768 * drive capacity. 23769 * flag - this argument is a pass through to ddi_copyxxx() 23770 * directly from the mode argument of ioctl(). 23771 * 23772 * Return Code: returns the value from sd_get_media_info_com 23773 */ 23774 static int 23775 sd_get_media_info(dev_t dev, caddr_t arg, int flag) 23776 { 23777 struct dk_minfo mi; 23778 int rval; 23779 23780 rval = sd_get_media_info_com(dev, &mi.dki_media_type, 23781 &mi.dki_lbsize, &mi.dki_capacity, NULL); 23782 23783 if (rval) 23784 return (rval); 23785 if (ddi_copyout(&mi, arg, sizeof (struct dk_minfo), flag)) 23786 rval = EFAULT; 23787 return (rval); 23788 } 23789 23790 /* 23791 * Function: sd_get_media_info_ext 23792 * 23793 * Description: This routine is the driver entry point for handling ioctl 23794 * requests for the media type or command set profile used by the 23795 * drive to operate on the media (DKIOCGMEDIAINFOEXT). The 23796 * difference this ioctl and DKIOCGMEDIAINFO is the return value 23797 * of this ioctl contains both logical block size and physical 23798 * block size. 23799 * 23800 * 23801 * Arguments: dev - the device number 23802 * arg - pointer to user provided dk_minfo_ext structure 23803 * specifying the media type, logical block size, 23804 * physical block size and disk capacity. 23805 * flag - this argument is a pass through to ddi_copyxxx() 23806 * directly from the mode argument of ioctl(). 23807 * 23808 * Return Code: returns the value from sd_get_media_info_com 23809 */ 23810 static int 23811 sd_get_media_info_ext(dev_t dev, caddr_t arg, int flag) 23812 { 23813 struct dk_minfo_ext mie; 23814 int rval = 0; 23815 size_t len; 23816 23817 rval = sd_get_media_info_com(dev, &mie.dki_media_type, 23818 &mie.dki_lbsize, &mie.dki_capacity, &mie.dki_pbsize); 23819 23820 if (rval) 23821 return (rval); 23822 23823 switch (ddi_model_convert_from(flag & FMODELS)) { 23824 case DDI_MODEL_ILP32: 23825 len = sizeof (struct dk_minfo_ext32); 23826 break; 23827 default: 23828 len = sizeof (struct dk_minfo_ext); 23829 break; 23830 } 23831 23832 if (ddi_copyout(&mie, arg, len, flag)) 23833 rval = EFAULT; 23834 return (rval); 23835 23836 } 23837 23838 /* 23839 * Function: sd_watch_request_submit 23840 * 23841 * Description: Call scsi_watch_request_submit or scsi_mmc_watch_request_submit 23842 * depending on which is supported by device. 23843 */ 23844 static opaque_t 23845 sd_watch_request_submit(struct sd_lun *un) 23846 { 23847 dev_t dev; 23848 23849 /* All submissions are unified to use same device number */ 23850 dev = sd_make_device(SD_DEVINFO(un)); 23851 23852 if (un->un_f_mmc_cap && un->un_f_mmc_gesn_polling) { 23853 return (scsi_mmc_watch_request_submit(SD_SCSI_DEVP(un), 23854 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 23855 (caddr_t)dev)); 23856 } else { 23857 return (scsi_watch_request_submit(SD_SCSI_DEVP(un), 23858 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 23859 (caddr_t)dev)); 23860 } 23861 } 23862 23863 23864 /* 23865 * Function: sd_check_media 23866 * 23867 * Description: This utility routine implements the functionality for the 23868 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the 23869 * driver state changes from that specified by the user 23870 * (inserted or ejected). For example, if the user specifies 23871 * DKIO_EJECTED and the current media state is inserted this 23872 * routine will immediately return DKIO_INSERTED. However, if the 23873 * current media state is not inserted the user thread will be 23874 * blocked until the drive state changes. If DKIO_NONE is specified 23875 * the user thread will block until a drive state change occurs. 23876 * 23877 * Arguments: dev - the device number 23878 * state - user pointer to a dkio_state, updated with the current 23879 * drive state at return. 23880 * 23881 * Return Code: ENXIO 23882 * EIO 23883 * EAGAIN 23884 * EINTR 23885 */ 23886 23887 static int 23888 sd_check_media(dev_t dev, enum dkio_state state) 23889 { 23890 struct sd_lun *un = NULL; 23891 enum dkio_state prev_state; 23892 opaque_t token = NULL; 23893 int rval = 0; 23894 sd_ssc_t *ssc; 23895 23896 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23897 return (ENXIO); 23898 } 23899 23900 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n"); 23901 23902 ssc = sd_ssc_init(un); 23903 23904 mutex_enter(SD_MUTEX(un)); 23905 23906 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: " 23907 "state=%x, mediastate=%x\n", state, un->un_mediastate); 23908 23909 prev_state = un->un_mediastate; 23910 23911 /* is there anything to do? */ 23912 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) { 23913 /* 23914 * submit the request to the scsi_watch service; 23915 * scsi_media_watch_cb() does the real work 23916 */ 23917 mutex_exit(SD_MUTEX(un)); 23918 23919 /* 23920 * This change handles the case where a scsi watch request is 23921 * added to a device that is powered down. To accomplish this 23922 * we power up the device before adding the scsi watch request, 23923 * since the scsi watch sends a TUR directly to the device 23924 * which the device cannot handle if it is powered down. 23925 */ 23926 if (sd_pm_entry(un) != DDI_SUCCESS) { 23927 mutex_enter(SD_MUTEX(un)); 23928 goto done; 23929 } 23930 23931 token = sd_watch_request_submit(un); 23932 23933 sd_pm_exit(un); 23934 23935 mutex_enter(SD_MUTEX(un)); 23936 if (token == NULL) { 23937 rval = EAGAIN; 23938 goto done; 23939 } 23940 23941 /* 23942 * This is a special case IOCTL that doesn't return 23943 * until the media state changes. Routine sdpower 23944 * knows about and handles this so don't count it 23945 * as an active cmd in the driver, which would 23946 * keep the device busy to the pm framework. 23947 * If the count isn't decremented the device can't 23948 * be powered down. 23949 */ 23950 un->un_ncmds_in_driver--; 23951 ASSERT(un->un_ncmds_in_driver >= 0); 23952 23953 /* 23954 * if a prior request had been made, this will be the same 23955 * token, as scsi_watch was designed that way. 23956 */ 23957 un->un_swr_token = token; 23958 un->un_specified_mediastate = state; 23959 23960 /* 23961 * now wait for media change 23962 * we will not be signalled unless mediastate == state but it is 23963 * still better to test for this condition, since there is a 23964 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED 23965 */ 23966 SD_TRACE(SD_LOG_COMMON, un, 23967 "sd_check_media: waiting for media state change\n"); 23968 while (un->un_mediastate == state) { 23969 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) { 23970 SD_TRACE(SD_LOG_COMMON, un, 23971 "sd_check_media: waiting for media state " 23972 "was interrupted\n"); 23973 un->un_ncmds_in_driver++; 23974 rval = EINTR; 23975 goto done; 23976 } 23977 SD_TRACE(SD_LOG_COMMON, un, 23978 "sd_check_media: received signal, state=%x\n", 23979 un->un_mediastate); 23980 } 23981 /* 23982 * Inc the counter to indicate the device once again 23983 * has an active outstanding cmd. 23984 */ 23985 un->un_ncmds_in_driver++; 23986 } 23987 23988 /* invalidate geometry */ 23989 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) { 23990 sr_ejected(un); 23991 } 23992 23993 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) { 23994 uint64_t capacity; 23995 uint_t lbasize; 23996 23997 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n"); 23998 mutex_exit(SD_MUTEX(un)); 23999 /* 24000 * Since the following routines use SD_PATH_DIRECT, we must 24001 * call PM directly before the upcoming disk accesses. This 24002 * may cause the disk to be power/spin up. 24003 */ 24004 24005 if (sd_pm_entry(un) == DDI_SUCCESS) { 24006 rval = sd_send_scsi_READ_CAPACITY(ssc, 24007 &capacity, &lbasize, SD_PATH_DIRECT); 24008 if (rval != 0) { 24009 sd_pm_exit(un); 24010 if (rval == EIO) 24011 sd_ssc_assessment(ssc, 24012 SD_FMT_STATUS_CHECK); 24013 else 24014 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 24015 mutex_enter(SD_MUTEX(un)); 24016 goto done; 24017 } 24018 } else { 24019 rval = EIO; 24020 mutex_enter(SD_MUTEX(un)); 24021 goto done; 24022 } 24023 mutex_enter(SD_MUTEX(un)); 24024 24025 sd_update_block_info(un, lbasize, capacity); 24026 24027 /* 24028 * Check if the media in the device is writable or not 24029 */ 24030 if (ISCD(un)) { 24031 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT); 24032 } 24033 24034 mutex_exit(SD_MUTEX(un)); 24035 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 24036 if ((cmlb_validate(un->un_cmlbhandle, 0, 24037 (void *)SD_PATH_DIRECT) == 0) && un->un_f_pkstats_enabled) { 24038 sd_set_pstats(un); 24039 SD_TRACE(SD_LOG_IO_PARTITION, un, 24040 "sd_check_media: un:0x%p pstats created and " 24041 "set\n", un); 24042 } 24043 24044 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 24045 SD_PATH_DIRECT); 24046 24047 sd_pm_exit(un); 24048 24049 if (rval != 0) { 24050 if (rval == EIO) 24051 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 24052 else 24053 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 24054 } 24055 24056 mutex_enter(SD_MUTEX(un)); 24057 } 24058 done: 24059 sd_ssc_fini(ssc); 24060 un->un_f_watcht_stopped = FALSE; 24061 if (token != NULL && un->un_swr_token != NULL) { 24062 /* 24063 * Use of this local token and the mutex ensures that we avoid 24064 * some race conditions associated with terminating the 24065 * scsi watch. 24066 */ 24067 token = un->un_swr_token; 24068 mutex_exit(SD_MUTEX(un)); 24069 (void) scsi_watch_request_terminate(token, 24070 SCSI_WATCH_TERMINATE_WAIT); 24071 if (scsi_watch_get_ref_count(token) == 0) { 24072 mutex_enter(SD_MUTEX(un)); 24073 un->un_swr_token = (opaque_t)NULL; 24074 } else { 24075 mutex_enter(SD_MUTEX(un)); 24076 } 24077 } 24078 24079 /* 24080 * Update the capacity kstat value, if no media previously 24081 * (capacity kstat is 0) and a media has been inserted 24082 * (un_f_blockcount_is_valid == TRUE) 24083 */ 24084 if (un->un_errstats) { 24085 struct sd_errstats *stp = NULL; 24086 24087 stp = (struct sd_errstats *)un->un_errstats->ks_data; 24088 if ((stp->sd_capacity.value.ui64 == 0) && 24089 (un->un_f_blockcount_is_valid == TRUE)) { 24090 stp->sd_capacity.value.ui64 = 24091 (uint64_t)((uint64_t)un->un_blockcount * 24092 un->un_sys_blocksize); 24093 } 24094 } 24095 mutex_exit(SD_MUTEX(un)); 24096 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n"); 24097 return (rval); 24098 } 24099 24100 24101 /* 24102 * Function: sd_delayed_cv_broadcast 24103 * 24104 * Description: Delayed cv_broadcast to allow for target to recover from media 24105 * insertion. 24106 * 24107 * Arguments: arg - driver soft state (unit) structure 24108 */ 24109 24110 static void 24111 sd_delayed_cv_broadcast(void *arg) 24112 { 24113 struct sd_lun *un = arg; 24114 24115 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n"); 24116 24117 mutex_enter(SD_MUTEX(un)); 24118 un->un_dcvb_timeid = NULL; 24119 cv_broadcast(&un->un_state_cv); 24120 mutex_exit(SD_MUTEX(un)); 24121 } 24122 24123 24124 /* 24125 * Function: sd_media_watch_cb 24126 * 24127 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This 24128 * routine processes the TUR sense data and updates the driver 24129 * state if a transition has occurred. The user thread 24130 * (sd_check_media) is then signalled. 24131 * 24132 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24133 * among multiple watches that share this callback function 24134 * resultp - scsi watch facility result packet containing scsi 24135 * packet, status byte and sense data 24136 * 24137 * Return Code: 0 for success, -1 for failure 24138 */ 24139 24140 static int 24141 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 24142 { 24143 struct sd_lun *un; 24144 struct scsi_status *statusp = resultp->statusp; 24145 uint8_t *sensep = (uint8_t *)resultp->sensep; 24146 enum dkio_state state = DKIO_NONE; 24147 dev_t dev = (dev_t)arg; 24148 uchar_t actual_sense_length; 24149 uint8_t skey, asc, ascq; 24150 24151 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24152 return (-1); 24153 } 24154 actual_sense_length = resultp->actual_sense_length; 24155 24156 mutex_enter(SD_MUTEX(un)); 24157 SD_TRACE(SD_LOG_COMMON, un, 24158 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n", 24159 *((char *)statusp), (void *)sensep, actual_sense_length); 24160 24161 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) { 24162 un->un_mediastate = DKIO_DEV_GONE; 24163 cv_broadcast(&un->un_state_cv); 24164 mutex_exit(SD_MUTEX(un)); 24165 24166 return (0); 24167 } 24168 24169 if (un->un_f_mmc_cap && un->un_f_mmc_gesn_polling) { 24170 if (sd_gesn_media_data_valid(resultp->mmc_data)) { 24171 if ((resultp->mmc_data[5] & 24172 SD_GESN_MEDIA_EVENT_STATUS_PRESENT) != 0) { 24173 state = DKIO_INSERTED; 24174 } else { 24175 state = DKIO_EJECTED; 24176 } 24177 if ((resultp->mmc_data[4] & SD_GESN_MEDIA_EVENT_CODE) == 24178 SD_GESN_MEDIA_EVENT_EJECTREQUEST) { 24179 sd_log_eject_request_event(un, KM_NOSLEEP); 24180 } 24181 } 24182 } else if (sensep != NULL) { 24183 /* 24184 * If there was a check condition then sensep points to valid 24185 * sense data. If status was not a check condition but a 24186 * reservation or busy status then the new state is DKIO_NONE. 24187 */ 24188 skey = scsi_sense_key(sensep); 24189 asc = scsi_sense_asc(sensep); 24190 ascq = scsi_sense_ascq(sensep); 24191 24192 SD_INFO(SD_LOG_COMMON, un, 24193 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n", 24194 skey, asc, ascq); 24195 /* This routine only uses up to 13 bytes of sense data. */ 24196 if (actual_sense_length >= 13) { 24197 if (skey == KEY_UNIT_ATTENTION) { 24198 if (asc == 0x28) { 24199 state = DKIO_INSERTED; 24200 } 24201 } else if (skey == KEY_NOT_READY) { 24202 /* 24203 * Sense data of 02/06/00 means that the 24204 * drive could not read the media (No 24205 * reference position found). In this case 24206 * to prevent a hang on the DKIOCSTATE IOCTL 24207 * we set the media state to DKIO_INSERTED. 24208 */ 24209 if (asc == 0x06 && ascq == 0x00) 24210 state = DKIO_INSERTED; 24211 24212 /* 24213 * if 02/04/02 means that the host 24214 * should send start command. Explicitly 24215 * leave the media state as is 24216 * (inserted) as the media is inserted 24217 * and host has stopped device for PM 24218 * reasons. Upon next true read/write 24219 * to this media will bring the 24220 * device to the right state good for 24221 * media access. 24222 */ 24223 if (asc == 0x3a) { 24224 state = DKIO_EJECTED; 24225 } else { 24226 /* 24227 * If the drive is busy with an 24228 * operation or long write, keep the 24229 * media in an inserted state. 24230 */ 24231 24232 if ((asc == 0x04) && 24233 ((ascq == 0x02) || 24234 (ascq == 0x07) || 24235 (ascq == 0x08))) { 24236 state = DKIO_INSERTED; 24237 } 24238 } 24239 } else if (skey == KEY_NO_SENSE) { 24240 if ((asc == 0x00) && (ascq == 0x00)) { 24241 /* 24242 * Sense Data 00/00/00 does not provide 24243 * any information about the state of 24244 * the media. Ignore it. 24245 */ 24246 mutex_exit(SD_MUTEX(un)); 24247 return (0); 24248 } 24249 } 24250 } 24251 } else if ((*((char *)statusp) == STATUS_GOOD) && 24252 (resultp->pkt->pkt_reason == CMD_CMPLT)) { 24253 state = DKIO_INSERTED; 24254 } 24255 24256 SD_TRACE(SD_LOG_COMMON, un, 24257 "sd_media_watch_cb: state=%x, specified=%x\n", 24258 state, un->un_specified_mediastate); 24259 24260 /* 24261 * now signal the waiting thread if this is *not* the specified state; 24262 * delay the signal if the state is DKIO_INSERTED to allow the target 24263 * to recover 24264 */ 24265 if (state != un->un_specified_mediastate) { 24266 un->un_mediastate = state; 24267 if (state == DKIO_INSERTED) { 24268 /* 24269 * delay the signal to give the drive a chance 24270 * to do what it apparently needs to do 24271 */ 24272 SD_TRACE(SD_LOG_COMMON, un, 24273 "sd_media_watch_cb: delayed cv_broadcast\n"); 24274 if (un->un_dcvb_timeid == NULL) { 24275 un->un_dcvb_timeid = 24276 timeout(sd_delayed_cv_broadcast, un, 24277 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY)); 24278 } 24279 } else { 24280 SD_TRACE(SD_LOG_COMMON, un, 24281 "sd_media_watch_cb: immediate cv_broadcast\n"); 24282 cv_broadcast(&un->un_state_cv); 24283 } 24284 } 24285 mutex_exit(SD_MUTEX(un)); 24286 return (0); 24287 } 24288 24289 24290 /* 24291 * Function: sd_dkio_get_temp 24292 * 24293 * Description: This routine is the driver entry point for handling ioctl 24294 * requests to get the disk temperature. 24295 * 24296 * Arguments: dev - the device number 24297 * arg - pointer to user provided dk_temperature structure. 24298 * flag - this argument is a pass through to ddi_copyxxx() 24299 * directly from the mode argument of ioctl(). 24300 * 24301 * Return Code: 0 24302 * EFAULT 24303 * ENXIO 24304 * EAGAIN 24305 */ 24306 24307 static int 24308 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag) 24309 { 24310 struct sd_lun *un = NULL; 24311 struct dk_temperature *dktemp = NULL; 24312 uchar_t *temperature_page; 24313 int rval = 0; 24314 int path_flag = SD_PATH_STANDARD; 24315 sd_ssc_t *ssc; 24316 24317 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24318 return (ENXIO); 24319 } 24320 24321 ssc = sd_ssc_init(un); 24322 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP); 24323 24324 /* copyin the disk temp argument to get the user flags */ 24325 if (ddi_copyin((void *)arg, dktemp, 24326 sizeof (struct dk_temperature), flag) != 0) { 24327 rval = EFAULT; 24328 goto done; 24329 } 24330 24331 /* Initialize the temperature to invalid. */ 24332 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 24333 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 24334 24335 /* 24336 * Note: Investigate removing the "bypass pm" semantic. 24337 * Can we just bypass PM always? 24338 */ 24339 if (dktemp->dkt_flags & DKT_BYPASS_PM) { 24340 path_flag = SD_PATH_DIRECT; 24341 ASSERT(!mutex_owned(&un->un_pm_mutex)); 24342 mutex_enter(&un->un_pm_mutex); 24343 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 24344 /* 24345 * If DKT_BYPASS_PM is set, and the drive happens to be 24346 * in low power mode, we can not wake it up, Need to 24347 * return EAGAIN. 24348 */ 24349 mutex_exit(&un->un_pm_mutex); 24350 rval = EAGAIN; 24351 goto done; 24352 } else { 24353 /* 24354 * Indicate to PM the device is busy. This is required 24355 * to avoid a race - i.e. the ioctl is issuing a 24356 * command and the pm framework brings down the device 24357 * to low power mode (possible power cut-off on some 24358 * platforms). 24359 */ 24360 mutex_exit(&un->un_pm_mutex); 24361 if (sd_pm_entry(un) != DDI_SUCCESS) { 24362 rval = EAGAIN; 24363 goto done; 24364 } 24365 } 24366 } 24367 24368 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP); 24369 24370 rval = sd_send_scsi_LOG_SENSE(ssc, temperature_page, 24371 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag); 24372 if (rval != 0) 24373 goto done2; 24374 24375 /* 24376 * For the current temperature verify that the parameter length is 0x02 24377 * and the parameter code is 0x00 24378 */ 24379 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) && 24380 (temperature_page[5] == 0x00)) { 24381 if (temperature_page[9] == 0xFF) { 24382 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 24383 } else { 24384 dktemp->dkt_cur_temp = (short)(temperature_page[9]); 24385 } 24386 } 24387 24388 /* 24389 * For the reference temperature verify that the parameter 24390 * length is 0x02 and the parameter code is 0x01 24391 */ 24392 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) && 24393 (temperature_page[11] == 0x01)) { 24394 if (temperature_page[15] == 0xFF) { 24395 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 24396 } else { 24397 dktemp->dkt_ref_temp = (short)(temperature_page[15]); 24398 } 24399 } 24400 24401 /* Do the copyout regardless of the temperature commands status. */ 24402 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature), 24403 flag) != 0) { 24404 rval = EFAULT; 24405 goto done1; 24406 } 24407 24408 done2: 24409 if (rval != 0) { 24410 if (rval == EIO) 24411 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 24412 else 24413 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 24414 } 24415 done1: 24416 if (path_flag == SD_PATH_DIRECT) { 24417 sd_pm_exit(un); 24418 } 24419 24420 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE); 24421 done: 24422 sd_ssc_fini(ssc); 24423 if (dktemp != NULL) { 24424 kmem_free(dktemp, sizeof (struct dk_temperature)); 24425 } 24426 24427 return (rval); 24428 } 24429 24430 24431 /* 24432 * Function: sd_log_page_supported 24433 * 24434 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of 24435 * supported log pages. 24436 * 24437 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 24438 * structure for this target. 24439 * log_page - 24440 * 24441 * Return Code: -1 - on error (log sense is optional and may not be supported). 24442 * 0 - log page not found. 24443 * 1 - log page found. 24444 */ 24445 24446 static int 24447 sd_log_page_supported(sd_ssc_t *ssc, int log_page) 24448 { 24449 uchar_t *log_page_data; 24450 int i; 24451 int match = 0; 24452 int log_size; 24453 int status = 0; 24454 struct sd_lun *un; 24455 24456 ASSERT(ssc != NULL); 24457 un = ssc->ssc_un; 24458 ASSERT(un != NULL); 24459 24460 log_page_data = kmem_zalloc(0xFF, KM_SLEEP); 24461 24462 status = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 0xFF, 0, 0x01, 0, 24463 SD_PATH_DIRECT); 24464 24465 if (status != 0) { 24466 if (status == EIO) { 24467 /* 24468 * Some disks do not support log sense, we 24469 * should ignore this kind of error(sense key is 24470 * 0x5 - illegal request). 24471 */ 24472 uint8_t *sensep; 24473 int senlen; 24474 24475 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 24476 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 24477 ssc->ssc_uscsi_cmd->uscsi_rqresid); 24478 24479 if (senlen > 0 && 24480 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 24481 sd_ssc_assessment(ssc, 24482 SD_FMT_IGNORE_COMPROMISE); 24483 } else { 24484 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 24485 } 24486 } else { 24487 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 24488 } 24489 24490 SD_ERROR(SD_LOG_COMMON, un, 24491 "sd_log_page_supported: failed log page retrieval\n"); 24492 kmem_free(log_page_data, 0xFF); 24493 return (-1); 24494 } 24495 24496 log_size = log_page_data[3]; 24497 24498 /* 24499 * The list of supported log pages start from the fourth byte. Check 24500 * until we run out of log pages or a match is found. 24501 */ 24502 for (i = 4; (i < (log_size + 4)) && !match; i++) { 24503 if (log_page_data[i] == log_page) { 24504 match++; 24505 } 24506 } 24507 kmem_free(log_page_data, 0xFF); 24508 return (match); 24509 } 24510 24511 24512 /* 24513 * Function: sd_mhdioc_failfast 24514 * 24515 * Description: This routine is the driver entry point for handling ioctl 24516 * requests to enable/disable the multihost failfast option. 24517 * (MHIOCENFAILFAST) 24518 * 24519 * Arguments: dev - the device number 24520 * arg - user specified probing interval. 24521 * flag - this argument is a pass through to ddi_copyxxx() 24522 * directly from the mode argument of ioctl(). 24523 * 24524 * Return Code: 0 24525 * EFAULT 24526 * ENXIO 24527 */ 24528 24529 static int 24530 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag) 24531 { 24532 struct sd_lun *un = NULL; 24533 int mh_time; 24534 int rval = 0; 24535 24536 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24537 return (ENXIO); 24538 } 24539 24540 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag)) 24541 return (EFAULT); 24542 24543 if (mh_time) { 24544 mutex_enter(SD_MUTEX(un)); 24545 un->un_resvd_status |= SD_FAILFAST; 24546 mutex_exit(SD_MUTEX(un)); 24547 /* 24548 * If mh_time is INT_MAX, then this ioctl is being used for 24549 * SCSI-3 PGR purposes, and we don't need to spawn watch thread. 24550 */ 24551 if (mh_time != INT_MAX) { 24552 rval = sd_check_mhd(dev, mh_time); 24553 } 24554 } else { 24555 (void) sd_check_mhd(dev, 0); 24556 mutex_enter(SD_MUTEX(un)); 24557 un->un_resvd_status &= ~SD_FAILFAST; 24558 mutex_exit(SD_MUTEX(un)); 24559 } 24560 return (rval); 24561 } 24562 24563 24564 /* 24565 * Function: sd_mhdioc_takeown 24566 * 24567 * Description: This routine is the driver entry point for handling ioctl 24568 * requests to forcefully acquire exclusive access rights to the 24569 * multihost disk (MHIOCTKOWN). 24570 * 24571 * Arguments: dev - the device number 24572 * arg - user provided structure specifying the delay 24573 * parameters in milliseconds 24574 * flag - this argument is a pass through to ddi_copyxxx() 24575 * directly from the mode argument of ioctl(). 24576 * 24577 * Return Code: 0 24578 * EFAULT 24579 * ENXIO 24580 */ 24581 24582 static int 24583 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag) 24584 { 24585 struct sd_lun *un = NULL; 24586 struct mhioctkown *tkown = NULL; 24587 int rval = 0; 24588 24589 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24590 return (ENXIO); 24591 } 24592 24593 if (arg != NULL) { 24594 tkown = (struct mhioctkown *) 24595 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP); 24596 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag); 24597 if (rval != 0) { 24598 rval = EFAULT; 24599 goto error; 24600 } 24601 } 24602 24603 rval = sd_take_ownership(dev, tkown); 24604 mutex_enter(SD_MUTEX(un)); 24605 if (rval == 0) { 24606 un->un_resvd_status |= SD_RESERVE; 24607 if (tkown != NULL && tkown->reinstate_resv_delay != 0) { 24608 sd_reinstate_resv_delay = 24609 tkown->reinstate_resv_delay * 1000; 24610 } else { 24611 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 24612 } 24613 /* 24614 * Give the scsi_watch routine interval set by 24615 * the MHIOCENFAILFAST ioctl precedence here. 24616 */ 24617 if ((un->un_resvd_status & SD_FAILFAST) == 0) { 24618 mutex_exit(SD_MUTEX(un)); 24619 (void) sd_check_mhd(dev, 24620 sd_reinstate_resv_delay / 1000); 24621 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24622 "sd_mhdioc_takeown : %d\n", 24623 sd_reinstate_resv_delay); 24624 } else { 24625 mutex_exit(SD_MUTEX(un)); 24626 } 24627 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY, 24628 sd_mhd_reset_notify_cb, (caddr_t)un); 24629 } else { 24630 un->un_resvd_status &= ~SD_RESERVE; 24631 mutex_exit(SD_MUTEX(un)); 24632 } 24633 24634 error: 24635 if (tkown != NULL) { 24636 kmem_free(tkown, sizeof (struct mhioctkown)); 24637 } 24638 return (rval); 24639 } 24640 24641 24642 /* 24643 * Function: sd_mhdioc_release 24644 * 24645 * Description: This routine is the driver entry point for handling ioctl 24646 * requests to release exclusive access rights to the multihost 24647 * disk (MHIOCRELEASE). 24648 * 24649 * Arguments: dev - the device number 24650 * 24651 * Return Code: 0 24652 * ENXIO 24653 */ 24654 24655 static int 24656 sd_mhdioc_release(dev_t dev) 24657 { 24658 struct sd_lun *un = NULL; 24659 timeout_id_t resvd_timeid_save; 24660 int resvd_status_save; 24661 int rval = 0; 24662 24663 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24664 return (ENXIO); 24665 } 24666 24667 mutex_enter(SD_MUTEX(un)); 24668 resvd_status_save = un->un_resvd_status; 24669 un->un_resvd_status &= 24670 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE); 24671 if (un->un_resvd_timeid) { 24672 resvd_timeid_save = un->un_resvd_timeid; 24673 un->un_resvd_timeid = NULL; 24674 mutex_exit(SD_MUTEX(un)); 24675 (void) untimeout(resvd_timeid_save); 24676 } else { 24677 mutex_exit(SD_MUTEX(un)); 24678 } 24679 24680 /* 24681 * destroy any pending timeout thread that may be attempting to 24682 * reinstate reservation on this device. 24683 */ 24684 sd_rmv_resv_reclaim_req(dev); 24685 24686 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) { 24687 mutex_enter(SD_MUTEX(un)); 24688 if ((un->un_mhd_token) && 24689 ((un->un_resvd_status & SD_FAILFAST) == 0)) { 24690 mutex_exit(SD_MUTEX(un)); 24691 (void) sd_check_mhd(dev, 0); 24692 } else { 24693 mutex_exit(SD_MUTEX(un)); 24694 } 24695 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 24696 sd_mhd_reset_notify_cb, (caddr_t)un); 24697 } else { 24698 /* 24699 * sd_mhd_watch_cb will restart the resvd recover timeout thread 24700 */ 24701 mutex_enter(SD_MUTEX(un)); 24702 un->un_resvd_status = resvd_status_save; 24703 mutex_exit(SD_MUTEX(un)); 24704 } 24705 return (rval); 24706 } 24707 24708 24709 /* 24710 * Function: sd_mhdioc_register_devid 24711 * 24712 * Description: This routine is the driver entry point for handling ioctl 24713 * requests to register the device id (MHIOCREREGISTERDEVID). 24714 * 24715 * Note: The implementation for this ioctl has been updated to 24716 * be consistent with the original PSARC case (1999/357) 24717 * (4375899, 4241671, 4220005) 24718 * 24719 * Arguments: dev - the device number 24720 * 24721 * Return Code: 0 24722 * ENXIO 24723 */ 24724 24725 static int 24726 sd_mhdioc_register_devid(dev_t dev) 24727 { 24728 struct sd_lun *un = NULL; 24729 int rval = 0; 24730 sd_ssc_t *ssc; 24731 24732 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24733 return (ENXIO); 24734 } 24735 24736 ASSERT(!mutex_owned(SD_MUTEX(un))); 24737 24738 mutex_enter(SD_MUTEX(un)); 24739 24740 /* If a devid already exists, de-register it */ 24741 if (un->un_devid != NULL) { 24742 ddi_devid_unregister(SD_DEVINFO(un)); 24743 /* 24744 * After unregister devid, needs to free devid memory 24745 */ 24746 ddi_devid_free(un->un_devid); 24747 un->un_devid = NULL; 24748 } 24749 24750 /* Check for reservation conflict */ 24751 mutex_exit(SD_MUTEX(un)); 24752 ssc = sd_ssc_init(un); 24753 rval = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 24754 mutex_enter(SD_MUTEX(un)); 24755 24756 switch (rval) { 24757 case 0: 24758 sd_register_devid(ssc, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED); 24759 break; 24760 case EACCES: 24761 break; 24762 default: 24763 rval = EIO; 24764 } 24765 24766 mutex_exit(SD_MUTEX(un)); 24767 if (rval != 0) { 24768 if (rval == EIO) 24769 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 24770 else 24771 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 24772 } 24773 sd_ssc_fini(ssc); 24774 return (rval); 24775 } 24776 24777 24778 /* 24779 * Function: sd_mhdioc_inkeys 24780 * 24781 * Description: This routine is the driver entry point for handling ioctl 24782 * requests to issue the SCSI-3 Persistent In Read Keys command 24783 * to the device (MHIOCGRP_INKEYS). 24784 * 24785 * Arguments: dev - the device number 24786 * arg - user provided in_keys structure 24787 * flag - this argument is a pass through to ddi_copyxxx() 24788 * directly from the mode argument of ioctl(). 24789 * 24790 * Return Code: code returned by sd_persistent_reservation_in_read_keys() 24791 * ENXIO 24792 * EFAULT 24793 */ 24794 24795 static int 24796 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag) 24797 { 24798 struct sd_lun *un; 24799 mhioc_inkeys_t inkeys; 24800 int rval = 0; 24801 24802 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24803 return (ENXIO); 24804 } 24805 24806 #ifdef _MULTI_DATAMODEL 24807 switch (ddi_model_convert_from(flag & FMODELS)) { 24808 case DDI_MODEL_ILP32: { 24809 struct mhioc_inkeys32 inkeys32; 24810 24811 if (ddi_copyin(arg, &inkeys32, 24812 sizeof (struct mhioc_inkeys32), flag) != 0) { 24813 return (EFAULT); 24814 } 24815 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li; 24816 if ((rval = sd_persistent_reservation_in_read_keys(un, 24817 &inkeys, flag)) != 0) { 24818 return (rval); 24819 } 24820 inkeys32.generation = inkeys.generation; 24821 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32), 24822 flag) != 0) { 24823 return (EFAULT); 24824 } 24825 break; 24826 } 24827 case DDI_MODEL_NONE: 24828 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), 24829 flag) != 0) { 24830 return (EFAULT); 24831 } 24832 if ((rval = sd_persistent_reservation_in_read_keys(un, 24833 &inkeys, flag)) != 0) { 24834 return (rval); 24835 } 24836 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), 24837 flag) != 0) { 24838 return (EFAULT); 24839 } 24840 break; 24841 } 24842 24843 #else /* ! _MULTI_DATAMODEL */ 24844 24845 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) { 24846 return (EFAULT); 24847 } 24848 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag); 24849 if (rval != 0) { 24850 return (rval); 24851 } 24852 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) { 24853 return (EFAULT); 24854 } 24855 24856 #endif /* _MULTI_DATAMODEL */ 24857 24858 return (rval); 24859 } 24860 24861 24862 /* 24863 * Function: sd_mhdioc_inresv 24864 * 24865 * Description: This routine is the driver entry point for handling ioctl 24866 * requests to issue the SCSI-3 Persistent In Read Reservations 24867 * command to the device (MHIOCGRP_INKEYS). 24868 * 24869 * Arguments: dev - the device number 24870 * arg - user provided in_resv structure 24871 * flag - this argument is a pass through to ddi_copyxxx() 24872 * directly from the mode argument of ioctl(). 24873 * 24874 * Return Code: code returned by sd_persistent_reservation_in_read_resv() 24875 * ENXIO 24876 * EFAULT 24877 */ 24878 24879 static int 24880 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag) 24881 { 24882 struct sd_lun *un; 24883 mhioc_inresvs_t inresvs; 24884 int rval = 0; 24885 24886 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24887 return (ENXIO); 24888 } 24889 24890 #ifdef _MULTI_DATAMODEL 24891 24892 switch (ddi_model_convert_from(flag & FMODELS)) { 24893 case DDI_MODEL_ILP32: { 24894 struct mhioc_inresvs32 inresvs32; 24895 24896 if (ddi_copyin(arg, &inresvs32, 24897 sizeof (struct mhioc_inresvs32), flag) != 0) { 24898 return (EFAULT); 24899 } 24900 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li; 24901 if ((rval = sd_persistent_reservation_in_read_resv(un, 24902 &inresvs, flag)) != 0) { 24903 return (rval); 24904 } 24905 inresvs32.generation = inresvs.generation; 24906 if (ddi_copyout(&inresvs32, arg, 24907 sizeof (struct mhioc_inresvs32), flag) != 0) { 24908 return (EFAULT); 24909 } 24910 break; 24911 } 24912 case DDI_MODEL_NONE: 24913 if (ddi_copyin(arg, &inresvs, 24914 sizeof (mhioc_inresvs_t), flag) != 0) { 24915 return (EFAULT); 24916 } 24917 if ((rval = sd_persistent_reservation_in_read_resv(un, 24918 &inresvs, flag)) != 0) { 24919 return (rval); 24920 } 24921 if (ddi_copyout(&inresvs, arg, 24922 sizeof (mhioc_inresvs_t), flag) != 0) { 24923 return (EFAULT); 24924 } 24925 break; 24926 } 24927 24928 #else /* ! _MULTI_DATAMODEL */ 24929 24930 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) { 24931 return (EFAULT); 24932 } 24933 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag); 24934 if (rval != 0) { 24935 return (rval); 24936 } 24937 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) { 24938 return (EFAULT); 24939 } 24940 24941 #endif /* ! _MULTI_DATAMODEL */ 24942 24943 return (rval); 24944 } 24945 24946 24947 /* 24948 * The following routines support the clustering functionality described below 24949 * and implement lost reservation reclaim functionality. 24950 * 24951 * Clustering 24952 * ---------- 24953 * The clustering code uses two different, independent forms of SCSI 24954 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3 24955 * Persistent Group Reservations. For any particular disk, it will use either 24956 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk. 24957 * 24958 * SCSI-2 24959 * The cluster software takes ownership of a multi-hosted disk by issuing the 24960 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the 24961 * MHIOCRELEASE ioctl. Closely related is the MHIOCENFAILFAST ioctl -- a 24962 * cluster, just after taking ownership of the disk with the MHIOCTKOWN ioctl 24963 * then issues the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the 24964 * driver. The meaning of failfast is that if the driver (on this host) ever 24965 * encounters the scsi error return code RESERVATION_CONFLICT from the device, 24966 * it should immediately panic the host. The motivation for this ioctl is that 24967 * if this host does encounter reservation conflict, the underlying cause is 24968 * that some other host of the cluster has decided that this host is no longer 24969 * in the cluster and has seized control of the disks for itself. Since this 24970 * host is no longer in the cluster, it ought to panic itself. The 24971 * MHIOCENFAILFAST ioctl does two things: 24972 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT 24973 * error to panic the host 24974 * (b) it sets up a periodic timer to test whether this host still has 24975 * "access" (in that no other host has reserved the device): if the 24976 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The 24977 * purpose of that periodic timer is to handle scenarios where the host is 24978 * otherwise temporarily quiescent, temporarily doing no real i/o. 24979 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host, 24980 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for 24981 * the device itself. 24982 * 24983 * SCSI-3 PGR 24984 * A direct semantic implementation of the SCSI-3 Persistent Reservation 24985 * facility is supported through the shared multihost disk ioctls 24986 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE, 24987 * MHIOCGRP_PREEMPTANDABORT, MHIOCGRP_CLEAR) 24988 * 24989 * Reservation Reclaim: 24990 * -------------------- 24991 * To support the lost reservation reclaim operations this driver creates a 24992 * single thread to handle reinstating reservations on all devices that have 24993 * lost reservations sd_resv_reclaim_requests are logged for all devices that 24994 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb 24995 * and the reservation reclaim thread loops through the requests to regain the 24996 * lost reservations. 24997 */ 24998 24999 /* 25000 * Function: sd_check_mhd() 25001 * 25002 * Description: This function sets up and submits a scsi watch request or 25003 * terminates an existing watch request. This routine is used in 25004 * support of reservation reclaim. 25005 * 25006 * Arguments: dev - the device 'dev_t' is used for context to discriminate 25007 * among multiple watches that share the callback function 25008 * interval - the number of microseconds specifying the watch 25009 * interval for issuing TEST UNIT READY commands. If 25010 * set to 0 the watch should be terminated. If the 25011 * interval is set to 0 and if the device is required 25012 * to hold reservation while disabling failfast, the 25013 * watch is restarted with an interval of 25014 * reinstate_resv_delay. 25015 * 25016 * Return Code: 0 - Successful submit/terminate of scsi watch request 25017 * ENXIO - Indicates an invalid device was specified 25018 * EAGAIN - Unable to submit the scsi watch request 25019 */ 25020 25021 static int 25022 sd_check_mhd(dev_t dev, int interval) 25023 { 25024 struct sd_lun *un; 25025 opaque_t token; 25026 25027 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25028 return (ENXIO); 25029 } 25030 25031 /* is this a watch termination request? */ 25032 if (interval == 0) { 25033 mutex_enter(SD_MUTEX(un)); 25034 /* if there is an existing watch task then terminate it */ 25035 if (un->un_mhd_token) { 25036 token = un->un_mhd_token; 25037 un->un_mhd_token = NULL; 25038 mutex_exit(SD_MUTEX(un)); 25039 (void) scsi_watch_request_terminate(token, 25040 SCSI_WATCH_TERMINATE_ALL_WAIT); 25041 mutex_enter(SD_MUTEX(un)); 25042 } else { 25043 mutex_exit(SD_MUTEX(un)); 25044 /* 25045 * Note: If we return here we don't check for the 25046 * failfast case. This is the original legacy 25047 * implementation but perhaps we should be checking 25048 * the failfast case. 25049 */ 25050 return (0); 25051 } 25052 /* 25053 * If the device is required to hold reservation while 25054 * disabling failfast, we need to restart the scsi_watch 25055 * routine with an interval of reinstate_resv_delay. 25056 */ 25057 if (un->un_resvd_status & SD_RESERVE) { 25058 interval = sd_reinstate_resv_delay / 1000; 25059 } else { 25060 /* no failfast so bail */ 25061 mutex_exit(SD_MUTEX(un)); 25062 return (0); 25063 } 25064 mutex_exit(SD_MUTEX(un)); 25065 } 25066 25067 /* 25068 * adjust minimum time interval to 1 second, 25069 * and convert from msecs to usecs 25070 */ 25071 if (interval > 0 && interval < 1000) { 25072 interval = 1000; 25073 } 25074 interval *= 1000; 25075 25076 /* 25077 * submit the request to the scsi_watch service 25078 */ 25079 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval, 25080 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev); 25081 if (token == NULL) { 25082 return (EAGAIN); 25083 } 25084 25085 /* 25086 * save token for termination later on 25087 */ 25088 mutex_enter(SD_MUTEX(un)); 25089 un->un_mhd_token = token; 25090 mutex_exit(SD_MUTEX(un)); 25091 return (0); 25092 } 25093 25094 25095 /* 25096 * Function: sd_mhd_watch_cb() 25097 * 25098 * Description: This function is the call back function used by the scsi watch 25099 * facility. The scsi watch facility sends the "Test Unit Ready" 25100 * and processes the status. If applicable (i.e. a "Unit Attention" 25101 * status and automatic "Request Sense" not used) the scsi watch 25102 * facility will send a "Request Sense" and retrieve the sense data 25103 * to be passed to this callback function. In either case the 25104 * automatic "Request Sense" or the facility submitting one, this 25105 * callback is passed the status and sense data. 25106 * 25107 * Arguments: arg - the device 'dev_t' is used for context to discriminate 25108 * among multiple watches that share this callback function 25109 * resultp - scsi watch facility result packet containing scsi 25110 * packet, status byte and sense data 25111 * 25112 * Return Code: 0 - continue the watch task 25113 * non-zero - terminate the watch task 25114 */ 25115 25116 static int 25117 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 25118 { 25119 struct sd_lun *un; 25120 struct scsi_status *statusp; 25121 uint8_t *sensep; 25122 struct scsi_pkt *pkt; 25123 uchar_t actual_sense_length; 25124 dev_t dev = (dev_t)arg; 25125 25126 ASSERT(resultp != NULL); 25127 statusp = resultp->statusp; 25128 sensep = (uint8_t *)resultp->sensep; 25129 pkt = resultp->pkt; 25130 actual_sense_length = resultp->actual_sense_length; 25131 25132 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25133 return (ENXIO); 25134 } 25135 25136 SD_TRACE(SD_LOG_IOCTL_MHD, un, 25137 "sd_mhd_watch_cb: reason '%s', status '%s'\n", 25138 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp))); 25139 25140 /* Begin processing of the status and/or sense data */ 25141 if (pkt->pkt_reason != CMD_CMPLT) { 25142 /* Handle the incomplete packet */ 25143 sd_mhd_watch_incomplete(un, pkt); 25144 return (0); 25145 } else if (*((unsigned char *)statusp) != STATUS_GOOD) { 25146 if (*((unsigned char *)statusp) 25147 == STATUS_RESERVATION_CONFLICT) { 25148 /* 25149 * Handle a reservation conflict by panicking if 25150 * configured for failfast or by logging the conflict 25151 * and updating the reservation status 25152 */ 25153 mutex_enter(SD_MUTEX(un)); 25154 if ((un->un_resvd_status & SD_FAILFAST) && 25155 (sd_failfast_enable)) { 25156 sd_panic_for_res_conflict(un); 25157 /*NOTREACHED*/ 25158 } 25159 SD_INFO(SD_LOG_IOCTL_MHD, un, 25160 "sd_mhd_watch_cb: Reservation Conflict\n"); 25161 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 25162 mutex_exit(SD_MUTEX(un)); 25163 } 25164 } 25165 25166 if (sensep != NULL) { 25167 if (actual_sense_length >= (SENSE_LENGTH - 2)) { 25168 mutex_enter(SD_MUTEX(un)); 25169 if ((scsi_sense_asc(sensep) == 25170 SD_SCSI_RESET_SENSE_CODE) && 25171 (un->un_resvd_status & SD_RESERVE)) { 25172 /* 25173 * The additional sense code indicates a power 25174 * on or bus device reset has occurred; update 25175 * the reservation status. 25176 */ 25177 un->un_resvd_status |= 25178 (SD_LOST_RESERVE | SD_WANT_RESERVE); 25179 SD_INFO(SD_LOG_IOCTL_MHD, un, 25180 "sd_mhd_watch_cb: Lost Reservation\n"); 25181 } 25182 } else { 25183 return (0); 25184 } 25185 } else { 25186 mutex_enter(SD_MUTEX(un)); 25187 } 25188 25189 if ((un->un_resvd_status & SD_RESERVE) && 25190 (un->un_resvd_status & SD_LOST_RESERVE)) { 25191 if (un->un_resvd_status & SD_WANT_RESERVE) { 25192 /* 25193 * A reset occurred in between the last probe and this 25194 * one so if a timeout is pending cancel it. 25195 */ 25196 if (un->un_resvd_timeid) { 25197 timeout_id_t temp_id = un->un_resvd_timeid; 25198 un->un_resvd_timeid = NULL; 25199 mutex_exit(SD_MUTEX(un)); 25200 (void) untimeout(temp_id); 25201 mutex_enter(SD_MUTEX(un)); 25202 } 25203 un->un_resvd_status &= ~SD_WANT_RESERVE; 25204 } 25205 if (un->un_resvd_timeid == 0) { 25206 /* Schedule a timeout to handle the lost reservation */ 25207 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover, 25208 (void *)dev, 25209 drv_usectohz(sd_reinstate_resv_delay)); 25210 } 25211 } 25212 mutex_exit(SD_MUTEX(un)); 25213 return (0); 25214 } 25215 25216 25217 /* 25218 * Function: sd_mhd_watch_incomplete() 25219 * 25220 * Description: This function is used to find out why a scsi pkt sent by the 25221 * scsi watch facility was not completed. Under some scenarios this 25222 * routine will return. Otherwise it will send a bus reset to see 25223 * if the drive is still online. 25224 * 25225 * Arguments: un - driver soft state (unit) structure 25226 * pkt - incomplete scsi pkt 25227 */ 25228 25229 static void 25230 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt) 25231 { 25232 int be_chatty; 25233 int perr; 25234 25235 ASSERT(pkt != NULL); 25236 ASSERT(un != NULL); 25237 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT)); 25238 perr = (pkt->pkt_statistics & STAT_PERR); 25239 25240 mutex_enter(SD_MUTEX(un)); 25241 if (un->un_state == SD_STATE_DUMPING) { 25242 mutex_exit(SD_MUTEX(un)); 25243 return; 25244 } 25245 25246 switch (pkt->pkt_reason) { 25247 case CMD_UNX_BUS_FREE: 25248 /* 25249 * If we had a parity error that caused the target to drop BSY*, 25250 * don't be chatty about it. 25251 */ 25252 if (perr && be_chatty) { 25253 be_chatty = 0; 25254 } 25255 break; 25256 case CMD_TAG_REJECT: 25257 /* 25258 * The SCSI-2 spec states that a tag reject will be sent by the 25259 * target if tagged queuing is not supported. A tag reject may 25260 * also be sent during certain initialization periods or to 25261 * control internal resources. For the latter case the target 25262 * may also return Queue Full. 25263 * 25264 * If this driver receives a tag reject from a target that is 25265 * going through an init period or controlling internal 25266 * resources tagged queuing will be disabled. This is a less 25267 * than optimal behavior but the driver is unable to determine 25268 * the target state and assumes tagged queueing is not supported 25269 */ 25270 pkt->pkt_flags = 0; 25271 un->un_tagflags = 0; 25272 25273 if (un->un_f_opt_queueing == TRUE) { 25274 un->un_throttle = min(un->un_throttle, 3); 25275 } else { 25276 un->un_throttle = 1; 25277 } 25278 mutex_exit(SD_MUTEX(un)); 25279 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 25280 mutex_enter(SD_MUTEX(un)); 25281 break; 25282 case CMD_INCOMPLETE: 25283 /* 25284 * The transport stopped with an abnormal state, fallthrough and 25285 * reset the target and/or bus unless selection did not complete 25286 * (indicated by STATE_GOT_BUS) in which case we don't want to 25287 * go through a target/bus reset 25288 */ 25289 if (pkt->pkt_state == STATE_GOT_BUS) { 25290 break; 25291 } 25292 /*FALLTHROUGH*/ 25293 25294 case CMD_TIMEOUT: 25295 default: 25296 /* 25297 * The lun may still be running the command, so a lun reset 25298 * should be attempted. If the lun reset fails or cannot be 25299 * issued, than try a target reset. Lastly try a bus reset. 25300 */ 25301 if ((pkt->pkt_statistics & 25302 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) == 0) { 25303 int reset_retval = 0; 25304 mutex_exit(SD_MUTEX(un)); 25305 if (un->un_f_allow_bus_device_reset == TRUE) { 25306 if (un->un_f_lun_reset_enabled == TRUE) { 25307 reset_retval = 25308 scsi_reset(SD_ADDRESS(un), 25309 RESET_LUN); 25310 } 25311 if (reset_retval == 0) { 25312 reset_retval = 25313 scsi_reset(SD_ADDRESS(un), 25314 RESET_TARGET); 25315 } 25316 } 25317 if (reset_retval == 0) { 25318 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 25319 } 25320 mutex_enter(SD_MUTEX(un)); 25321 } 25322 break; 25323 } 25324 25325 /* A device/bus reset has occurred; update the reservation status. */ 25326 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics & 25327 (STAT_BUS_RESET | STAT_DEV_RESET))) { 25328 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 25329 un->un_resvd_status |= 25330 (SD_LOST_RESERVE | SD_WANT_RESERVE); 25331 SD_INFO(SD_LOG_IOCTL_MHD, un, 25332 "sd_mhd_watch_incomplete: Lost Reservation\n"); 25333 } 25334 } 25335 25336 /* 25337 * The disk has been turned off; Update the device state. 25338 * 25339 * Note: Should we be offlining the disk here? 25340 */ 25341 if (pkt->pkt_state == STATE_GOT_BUS) { 25342 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: " 25343 "Disk not responding to selection\n"); 25344 if (un->un_state != SD_STATE_OFFLINE) { 25345 New_state(un, SD_STATE_OFFLINE); 25346 } 25347 } else if (be_chatty) { 25348 /* 25349 * suppress messages if they are all the same pkt reason; 25350 * with TQ, many (up to 256) are returned with the same 25351 * pkt_reason 25352 */ 25353 if (pkt->pkt_reason != un->un_last_pkt_reason) { 25354 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25355 "sd_mhd_watch_incomplete: " 25356 "SCSI transport failed: reason '%s'\n", 25357 scsi_rname(pkt->pkt_reason)); 25358 } 25359 } 25360 un->un_last_pkt_reason = pkt->pkt_reason; 25361 mutex_exit(SD_MUTEX(un)); 25362 } 25363 25364 25365 /* 25366 * Function: sd_sname() 25367 * 25368 * Description: This is a simple little routine to return a string containing 25369 * a printable description of command status byte for use in 25370 * logging. 25371 * 25372 * Arguments: status - pointer to a status byte 25373 * 25374 * Return Code: char * - string containing status description. 25375 */ 25376 25377 static char * 25378 sd_sname(uchar_t status) 25379 { 25380 switch (status & STATUS_MASK) { 25381 case STATUS_GOOD: 25382 return ("good status"); 25383 case STATUS_CHECK: 25384 return ("check condition"); 25385 case STATUS_MET: 25386 return ("condition met"); 25387 case STATUS_BUSY: 25388 return ("busy"); 25389 case STATUS_INTERMEDIATE: 25390 return ("intermediate"); 25391 case STATUS_INTERMEDIATE_MET: 25392 return ("intermediate - condition met"); 25393 case STATUS_RESERVATION_CONFLICT: 25394 return ("reservation_conflict"); 25395 case STATUS_TERMINATED: 25396 return ("command terminated"); 25397 case STATUS_QFULL: 25398 return ("queue full"); 25399 default: 25400 return ("<unknown status>"); 25401 } 25402 } 25403 25404 25405 /* 25406 * Function: sd_mhd_resvd_recover() 25407 * 25408 * Description: This function adds a reservation entry to the 25409 * sd_resv_reclaim_request list and signals the reservation 25410 * reclaim thread that there is work pending. If the reservation 25411 * reclaim thread has not been previously created this function 25412 * will kick it off. 25413 * 25414 * Arguments: arg - the device 'dev_t' is used for context to discriminate 25415 * among multiple watches that share this callback function 25416 * 25417 * Context: This routine is called by timeout() and is run in interrupt 25418 * context. It must not sleep or call other functions which may 25419 * sleep. 25420 */ 25421 25422 static void 25423 sd_mhd_resvd_recover(void *arg) 25424 { 25425 dev_t dev = (dev_t)arg; 25426 struct sd_lun *un; 25427 struct sd_thr_request *sd_treq = NULL; 25428 struct sd_thr_request *sd_cur = NULL; 25429 struct sd_thr_request *sd_prev = NULL; 25430 int already_there = 0; 25431 25432 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25433 return; 25434 } 25435 25436 mutex_enter(SD_MUTEX(un)); 25437 un->un_resvd_timeid = NULL; 25438 if (un->un_resvd_status & SD_WANT_RESERVE) { 25439 /* 25440 * There was a reset so don't issue the reserve, allow the 25441 * sd_mhd_watch_cb callback function to notice this and 25442 * reschedule the timeout for reservation. 25443 */ 25444 mutex_exit(SD_MUTEX(un)); 25445 return; 25446 } 25447 mutex_exit(SD_MUTEX(un)); 25448 25449 /* 25450 * Add this device to the sd_resv_reclaim_request list and the 25451 * sd_resv_reclaim_thread should take care of the rest. 25452 * 25453 * Note: We can't sleep in this context so if the memory allocation 25454 * fails allow the sd_mhd_watch_cb callback function to notice this and 25455 * reschedule the timeout for reservation. (4378460) 25456 */ 25457 sd_treq = (struct sd_thr_request *) 25458 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP); 25459 if (sd_treq == NULL) { 25460 return; 25461 } 25462 25463 sd_treq->sd_thr_req_next = NULL; 25464 sd_treq->dev = dev; 25465 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25466 if (sd_tr.srq_thr_req_head == NULL) { 25467 sd_tr.srq_thr_req_head = sd_treq; 25468 } else { 25469 sd_cur = sd_prev = sd_tr.srq_thr_req_head; 25470 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) { 25471 if (sd_cur->dev == dev) { 25472 /* 25473 * already in Queue so don't log 25474 * another request for the device 25475 */ 25476 already_there = 1; 25477 break; 25478 } 25479 sd_prev = sd_cur; 25480 } 25481 if (!already_there) { 25482 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: " 25483 "logging request for %lx\n", dev); 25484 sd_prev->sd_thr_req_next = sd_treq; 25485 } else { 25486 kmem_free(sd_treq, sizeof (struct sd_thr_request)); 25487 } 25488 } 25489 25490 /* 25491 * Create a kernel thread to do the reservation reclaim and free up this 25492 * thread. We cannot block this thread while we go away to do the 25493 * reservation reclaim 25494 */ 25495 if (sd_tr.srq_resv_reclaim_thread == NULL) 25496 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0, 25497 sd_resv_reclaim_thread, NULL, 25498 0, &p0, TS_RUN, v.v_maxsyspri - 2); 25499 25500 /* Tell the reservation reclaim thread that it has work to do */ 25501 cv_signal(&sd_tr.srq_resv_reclaim_cv); 25502 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25503 } 25504 25505 /* 25506 * Function: sd_resv_reclaim_thread() 25507 * 25508 * Description: This function implements the reservation reclaim operations 25509 * 25510 * Arguments: arg - the device 'dev_t' is used for context to discriminate 25511 * among multiple watches that share this callback function 25512 */ 25513 25514 static void 25515 sd_resv_reclaim_thread() 25516 { 25517 struct sd_lun *un; 25518 struct sd_thr_request *sd_mhreq; 25519 25520 /* Wait for work */ 25521 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25522 if (sd_tr.srq_thr_req_head == NULL) { 25523 cv_wait(&sd_tr.srq_resv_reclaim_cv, 25524 &sd_tr.srq_resv_reclaim_mutex); 25525 } 25526 25527 /* Loop while we have work */ 25528 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) { 25529 un = ddi_get_soft_state(sd_state, 25530 SDUNIT(sd_tr.srq_thr_cur_req->dev)); 25531 if (un == NULL) { 25532 /* 25533 * softstate structure is NULL so just 25534 * dequeue the request and continue 25535 */ 25536 sd_tr.srq_thr_req_head = 25537 sd_tr.srq_thr_cur_req->sd_thr_req_next; 25538 kmem_free(sd_tr.srq_thr_cur_req, 25539 sizeof (struct sd_thr_request)); 25540 continue; 25541 } 25542 25543 /* dequeue the request */ 25544 sd_mhreq = sd_tr.srq_thr_cur_req; 25545 sd_tr.srq_thr_req_head = 25546 sd_tr.srq_thr_cur_req->sd_thr_req_next; 25547 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25548 25549 /* 25550 * Reclaim reservation only if SD_RESERVE is still set. There 25551 * may have been a call to MHIOCRELEASE before we got here. 25552 */ 25553 mutex_enter(SD_MUTEX(un)); 25554 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 25555 /* 25556 * Note: The SD_LOST_RESERVE flag is cleared before 25557 * reclaiming the reservation. If this is done after the 25558 * call to sd_reserve_release a reservation loss in the 25559 * window between pkt completion of reserve cmd and 25560 * mutex_enter below may not be recognized 25561 */ 25562 un->un_resvd_status &= ~SD_LOST_RESERVE; 25563 mutex_exit(SD_MUTEX(un)); 25564 25565 if (sd_reserve_release(sd_mhreq->dev, 25566 SD_RESERVE) == 0) { 25567 mutex_enter(SD_MUTEX(un)); 25568 un->un_resvd_status |= SD_RESERVE; 25569 mutex_exit(SD_MUTEX(un)); 25570 SD_INFO(SD_LOG_IOCTL_MHD, un, 25571 "sd_resv_reclaim_thread: " 25572 "Reservation Recovered\n"); 25573 } else { 25574 mutex_enter(SD_MUTEX(un)); 25575 un->un_resvd_status |= SD_LOST_RESERVE; 25576 mutex_exit(SD_MUTEX(un)); 25577 SD_INFO(SD_LOG_IOCTL_MHD, un, 25578 "sd_resv_reclaim_thread: Failed " 25579 "Reservation Recovery\n"); 25580 } 25581 } else { 25582 mutex_exit(SD_MUTEX(un)); 25583 } 25584 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25585 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req); 25586 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 25587 sd_mhreq = sd_tr.srq_thr_cur_req = NULL; 25588 /* 25589 * wakeup the destroy thread if anyone is waiting on 25590 * us to complete. 25591 */ 25592 cv_signal(&sd_tr.srq_inprocess_cv); 25593 SD_TRACE(SD_LOG_IOCTL_MHD, un, 25594 "sd_resv_reclaim_thread: cv_signalling current request \n"); 25595 } 25596 25597 /* 25598 * cleanup the sd_tr structure now that this thread will not exist 25599 */ 25600 ASSERT(sd_tr.srq_thr_req_head == NULL); 25601 ASSERT(sd_tr.srq_thr_cur_req == NULL); 25602 sd_tr.srq_resv_reclaim_thread = NULL; 25603 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25604 thread_exit(); 25605 } 25606 25607 25608 /* 25609 * Function: sd_rmv_resv_reclaim_req() 25610 * 25611 * Description: This function removes any pending reservation reclaim requests 25612 * for the specified device. 25613 * 25614 * Arguments: dev - the device 'dev_t' 25615 */ 25616 25617 static void 25618 sd_rmv_resv_reclaim_req(dev_t dev) 25619 { 25620 struct sd_thr_request *sd_mhreq; 25621 struct sd_thr_request *sd_prev; 25622 25623 /* Remove a reservation reclaim request from the list */ 25624 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25625 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) { 25626 /* 25627 * We are attempting to reinstate reservation for 25628 * this device. We wait for sd_reserve_release() 25629 * to return before we return. 25630 */ 25631 cv_wait(&sd_tr.srq_inprocess_cv, 25632 &sd_tr.srq_resv_reclaim_mutex); 25633 } else { 25634 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head; 25635 if (sd_mhreq && sd_mhreq->dev == dev) { 25636 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next; 25637 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 25638 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25639 return; 25640 } 25641 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) { 25642 if (sd_mhreq && sd_mhreq->dev == dev) { 25643 break; 25644 } 25645 sd_prev = sd_mhreq; 25646 } 25647 if (sd_mhreq != NULL) { 25648 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next; 25649 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 25650 } 25651 } 25652 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25653 } 25654 25655 25656 /* 25657 * Function: sd_mhd_reset_notify_cb() 25658 * 25659 * Description: This is a call back function for scsi_reset_notify. This 25660 * function updates the softstate reserved status and logs the 25661 * reset. The driver scsi watch facility callback function 25662 * (sd_mhd_watch_cb) and reservation reclaim thread functionality 25663 * will reclaim the reservation. 25664 * 25665 * Arguments: arg - driver soft state (unit) structure 25666 */ 25667 25668 static void 25669 sd_mhd_reset_notify_cb(caddr_t arg) 25670 { 25671 struct sd_lun *un = (struct sd_lun *)arg; 25672 25673 mutex_enter(SD_MUTEX(un)); 25674 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 25675 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE); 25676 SD_INFO(SD_LOG_IOCTL_MHD, un, 25677 "sd_mhd_reset_notify_cb: Lost Reservation\n"); 25678 } 25679 mutex_exit(SD_MUTEX(un)); 25680 } 25681 25682 25683 /* 25684 * Function: sd_take_ownership() 25685 * 25686 * Description: This routine implements an algorithm to achieve a stable 25687 * reservation on disks which don't implement priority reserve, 25688 * and makes sure that other host lose re-reservation attempts. 25689 * This algorithm contains of a loop that keeps issuing the RESERVE 25690 * for some period of time (min_ownership_delay, default 6 seconds) 25691 * During that loop, it looks to see if there has been a bus device 25692 * reset or bus reset (both of which cause an existing reservation 25693 * to be lost). If the reservation is lost issue RESERVE until a 25694 * period of min_ownership_delay with no resets has gone by, or 25695 * until max_ownership_delay has expired. This loop ensures that 25696 * the host really did manage to reserve the device, in spite of 25697 * resets. The looping for min_ownership_delay (default six 25698 * seconds) is important to early generation clustering products, 25699 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an 25700 * MHIOCENFAILFAST periodic timer of two seconds. By having 25701 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having 25702 * MHIOCENFAILFAST poll every two seconds, the idea is that by the 25703 * time the MHIOCTKOWN ioctl returns, the other host (if any) will 25704 * have already noticed, via the MHIOCENFAILFAST polling, that it 25705 * no longer "owns" the disk and will have panicked itself. Thus, 25706 * the host issuing the MHIOCTKOWN is assured (with timing 25707 * dependencies) that by the time it actually starts to use the 25708 * disk for real work, the old owner is no longer accessing it. 25709 * 25710 * min_ownership_delay is the minimum amount of time for which the 25711 * disk must be reserved continuously devoid of resets before the 25712 * MHIOCTKOWN ioctl will return success. 25713 * 25714 * max_ownership_delay indicates the amount of time by which the 25715 * take ownership should succeed or timeout with an error. 25716 * 25717 * Arguments: dev - the device 'dev_t' 25718 * *p - struct containing timing info. 25719 * 25720 * Return Code: 0 for success or error code 25721 */ 25722 25723 static int 25724 sd_take_ownership(dev_t dev, struct mhioctkown *p) 25725 { 25726 struct sd_lun *un; 25727 int rval; 25728 int err; 25729 int reservation_count = 0; 25730 int min_ownership_delay = 6000000; /* in usec */ 25731 int max_ownership_delay = 30000000; /* in usec */ 25732 clock_t start_time; /* starting time of this algorithm */ 25733 clock_t end_time; /* time limit for giving up */ 25734 clock_t ownership_time; /* time limit for stable ownership */ 25735 clock_t current_time; 25736 clock_t previous_current_time; 25737 25738 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25739 return (ENXIO); 25740 } 25741 25742 /* 25743 * Attempt a device reservation. A priority reservation is requested. 25744 */ 25745 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE)) 25746 != SD_SUCCESS) { 25747 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25748 "sd_take_ownership: return(1)=%d\n", rval); 25749 return (rval); 25750 } 25751 25752 /* Update the softstate reserved status to indicate the reservation */ 25753 mutex_enter(SD_MUTEX(un)); 25754 un->un_resvd_status |= SD_RESERVE; 25755 un->un_resvd_status &= 25756 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT); 25757 mutex_exit(SD_MUTEX(un)); 25758 25759 if (p != NULL) { 25760 if (p->min_ownership_delay != 0) { 25761 min_ownership_delay = p->min_ownership_delay * 1000; 25762 } 25763 if (p->max_ownership_delay != 0) { 25764 max_ownership_delay = p->max_ownership_delay * 1000; 25765 } 25766 } 25767 SD_INFO(SD_LOG_IOCTL_MHD, un, 25768 "sd_take_ownership: min, max delays: %d, %d\n", 25769 min_ownership_delay, max_ownership_delay); 25770 25771 start_time = ddi_get_lbolt(); 25772 current_time = start_time; 25773 ownership_time = current_time + drv_usectohz(min_ownership_delay); 25774 end_time = start_time + drv_usectohz(max_ownership_delay); 25775 25776 while (current_time - end_time < 0) { 25777 delay(drv_usectohz(500000)); 25778 25779 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) { 25780 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) { 25781 mutex_enter(SD_MUTEX(un)); 25782 rval = (un->un_resvd_status & 25783 SD_RESERVATION_CONFLICT) ? EACCES : EIO; 25784 mutex_exit(SD_MUTEX(un)); 25785 break; 25786 } 25787 } 25788 previous_current_time = current_time; 25789 current_time = ddi_get_lbolt(); 25790 mutex_enter(SD_MUTEX(un)); 25791 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) { 25792 ownership_time = ddi_get_lbolt() + 25793 drv_usectohz(min_ownership_delay); 25794 reservation_count = 0; 25795 } else { 25796 reservation_count++; 25797 } 25798 un->un_resvd_status |= SD_RESERVE; 25799 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE); 25800 mutex_exit(SD_MUTEX(un)); 25801 25802 SD_INFO(SD_LOG_IOCTL_MHD, un, 25803 "sd_take_ownership: ticks for loop iteration=%ld, " 25804 "reservation=%s\n", (current_time - previous_current_time), 25805 reservation_count ? "ok" : "reclaimed"); 25806 25807 if (current_time - ownership_time >= 0 && 25808 reservation_count >= 4) { 25809 rval = 0; /* Achieved a stable ownership */ 25810 break; 25811 } 25812 if (current_time - end_time >= 0) { 25813 rval = EACCES; /* No ownership in max possible time */ 25814 break; 25815 } 25816 } 25817 SD_TRACE(SD_LOG_IOCTL_MHD, un, 25818 "sd_take_ownership: return(2)=%d\n", rval); 25819 return (rval); 25820 } 25821 25822 25823 /* 25824 * Function: sd_reserve_release() 25825 * 25826 * Description: This function builds and sends scsi RESERVE, RELEASE, and 25827 * PRIORITY RESERVE commands based on a user specified command type 25828 * 25829 * Arguments: dev - the device 'dev_t' 25830 * cmd - user specified command type; one of SD_PRIORITY_RESERVE, 25831 * SD_RESERVE, SD_RELEASE 25832 * 25833 * Return Code: 0 or Error Code 25834 */ 25835 25836 static int 25837 sd_reserve_release(dev_t dev, int cmd) 25838 { 25839 struct uscsi_cmd *com = NULL; 25840 struct sd_lun *un = NULL; 25841 char cdb[CDB_GROUP0]; 25842 int rval; 25843 25844 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) || 25845 (cmd == SD_PRIORITY_RESERVE)); 25846 25847 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25848 return (ENXIO); 25849 } 25850 25851 /* instantiate and initialize the command and cdb */ 25852 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25853 bzero(cdb, CDB_GROUP0); 25854 com->uscsi_flags = USCSI_SILENT; 25855 com->uscsi_timeout = un->un_reserve_release_time; 25856 com->uscsi_cdblen = CDB_GROUP0; 25857 com->uscsi_cdb = cdb; 25858 if (cmd == SD_RELEASE) { 25859 cdb[0] = SCMD_RELEASE; 25860 } else { 25861 cdb[0] = SCMD_RESERVE; 25862 } 25863 25864 /* Send the command. */ 25865 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25866 SD_PATH_STANDARD); 25867 25868 /* 25869 * "break" a reservation that is held by another host, by issuing a 25870 * reset if priority reserve is desired, and we could not get the 25871 * device. 25872 */ 25873 if ((cmd == SD_PRIORITY_RESERVE) && 25874 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 25875 /* 25876 * First try to reset the LUN. If we cannot, then try a target 25877 * reset, followed by a bus reset if the target reset fails. 25878 */ 25879 int reset_retval = 0; 25880 if (un->un_f_lun_reset_enabled == TRUE) { 25881 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 25882 } 25883 if (reset_retval == 0) { 25884 /* The LUN reset either failed or was not issued */ 25885 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 25886 } 25887 if ((reset_retval == 0) && 25888 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) { 25889 rval = EIO; 25890 kmem_free(com, sizeof (*com)); 25891 return (rval); 25892 } 25893 25894 bzero(com, sizeof (struct uscsi_cmd)); 25895 com->uscsi_flags = USCSI_SILENT; 25896 com->uscsi_cdb = cdb; 25897 com->uscsi_cdblen = CDB_GROUP0; 25898 com->uscsi_timeout = 5; 25899 25900 /* 25901 * Reissue the last reserve command, this time without request 25902 * sense. Assume that it is just a regular reserve command. 25903 */ 25904 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 25905 SD_PATH_STANDARD); 25906 } 25907 25908 /* Return an error if still getting a reservation conflict. */ 25909 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 25910 rval = EACCES; 25911 } 25912 25913 kmem_free(com, sizeof (*com)); 25914 return (rval); 25915 } 25916 25917 25918 #define SD_NDUMP_RETRIES 12 25919 /* 25920 * System Crash Dump routine 25921 */ 25922 25923 static int 25924 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 25925 { 25926 int instance; 25927 int partition; 25928 int i; 25929 int err; 25930 struct sd_lun *un; 25931 struct scsi_pkt *wr_pktp; 25932 struct buf *wr_bp; 25933 struct buf wr_buf; 25934 daddr_t tgt_byte_offset; /* rmw - byte offset for target */ 25935 daddr_t tgt_blkno; /* rmw - blkno for target */ 25936 size_t tgt_byte_count; /* rmw - # of bytes to xfer */ 25937 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */ 25938 size_t io_start_offset; 25939 int doing_rmw = FALSE; 25940 int rval; 25941 ssize_t dma_resid; 25942 daddr_t oblkno; 25943 diskaddr_t nblks = 0; 25944 diskaddr_t start_block; 25945 25946 instance = SDUNIT(dev); 25947 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 25948 !SD_IS_VALID_LABEL(un) || ISCD(un)) { 25949 return (ENXIO); 25950 } 25951 25952 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 25953 25954 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); 25955 25956 partition = SDPART(dev); 25957 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition); 25958 25959 if (!(NOT_DEVBSIZE(un))) { 25960 int secmask = 0; 25961 int blknomask = 0; 25962 25963 blknomask = (un->un_tgt_blocksize / DEV_BSIZE) - 1; 25964 secmask = un->un_tgt_blocksize - 1; 25965 25966 if (blkno & blknomask) { 25967 SD_TRACE(SD_LOG_DUMP, un, 25968 "sddump: dump start block not modulo %d\n", 25969 un->un_tgt_blocksize); 25970 return (EINVAL); 25971 } 25972 25973 if ((nblk * DEV_BSIZE) & secmask) { 25974 SD_TRACE(SD_LOG_DUMP, un, 25975 "sddump: dump length not modulo %d\n", 25976 un->un_tgt_blocksize); 25977 return (EINVAL); 25978 } 25979 25980 } 25981 25982 /* Validate blocks to dump at against partition size. */ 25983 25984 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 25985 &nblks, &start_block, NULL, NULL, (void *)SD_PATH_DIRECT); 25986 25987 if (NOT_DEVBSIZE(un)) { 25988 if ((blkno + nblk) > nblks) { 25989 SD_TRACE(SD_LOG_DUMP, un, 25990 "sddump: dump range larger than partition: " 25991 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 25992 blkno, nblk, nblks); 25993 return (EINVAL); 25994 } 25995 } else { 25996 if (((blkno / (un->un_tgt_blocksize / DEV_BSIZE)) + 25997 (nblk / (un->un_tgt_blocksize / DEV_BSIZE))) > nblks) { 25998 SD_TRACE(SD_LOG_DUMP, un, 25999 "sddump: dump range larger than partition: " 26000 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 26001 blkno, nblk, nblks); 26002 return (EINVAL); 26003 } 26004 } 26005 26006 mutex_enter(&un->un_pm_mutex); 26007 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 26008 struct scsi_pkt *start_pktp; 26009 26010 mutex_exit(&un->un_pm_mutex); 26011 26012 /* 26013 * use pm framework to power on HBA 1st 26014 */ 26015 (void) pm_raise_power(SD_DEVINFO(un), 0, 26016 SD_PM_STATE_ACTIVE(un)); 26017 26018 /* 26019 * Dump no long uses sdpower to power on a device, it's 26020 * in-line here so it can be done in polled mode. 26021 */ 26022 26023 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n"); 26024 26025 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL, 26026 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL); 26027 26028 if (start_pktp == NULL) { 26029 /* We were not given a SCSI packet, fail. */ 26030 return (EIO); 26031 } 26032 bzero(start_pktp->pkt_cdbp, CDB_GROUP0); 26033 start_pktp->pkt_cdbp[0] = SCMD_START_STOP; 26034 start_pktp->pkt_cdbp[4] = SD_TARGET_START; 26035 start_pktp->pkt_flags = FLAG_NOINTR; 26036 26037 mutex_enter(SD_MUTEX(un)); 26038 SD_FILL_SCSI1_LUN(un, start_pktp); 26039 mutex_exit(SD_MUTEX(un)); 26040 /* 26041 * Scsi_poll returns 0 (success) if the command completes and 26042 * the status block is STATUS_GOOD. 26043 */ 26044 if (sd_scsi_poll(un, start_pktp) != 0) { 26045 scsi_destroy_pkt(start_pktp); 26046 return (EIO); 26047 } 26048 scsi_destroy_pkt(start_pktp); 26049 (void) sd_pm_state_change(un, SD_PM_STATE_ACTIVE(un), 26050 SD_PM_STATE_CHANGE); 26051 } else { 26052 mutex_exit(&un->un_pm_mutex); 26053 } 26054 26055 mutex_enter(SD_MUTEX(un)); 26056 un->un_throttle = 0; 26057 26058 /* 26059 * The first time through, reset the specific target device. 26060 * However, when cpr calls sddump we know that sd is in a 26061 * a good state so no bus reset is required. 26062 * Clear sense data via Request Sense cmd. 26063 * In sddump we don't care about allow_bus_device_reset anymore 26064 */ 26065 26066 if ((un->un_state != SD_STATE_SUSPENDED) && 26067 (un->un_state != SD_STATE_DUMPING)) { 26068 26069 New_state(un, SD_STATE_DUMPING); 26070 26071 if (un->un_f_is_fibre == FALSE) { 26072 mutex_exit(SD_MUTEX(un)); 26073 /* 26074 * Attempt a bus reset for parallel scsi. 26075 * 26076 * Note: A bus reset is required because on some host 26077 * systems (i.e. E420R) a bus device reset is 26078 * insufficient to reset the state of the target. 26079 * 26080 * Note: Don't issue the reset for fibre-channel, 26081 * because this tends to hang the bus (loop) for 26082 * too long while everyone is logging out and in 26083 * and the deadman timer for dumping will fire 26084 * before the dump is complete. 26085 */ 26086 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) { 26087 mutex_enter(SD_MUTEX(un)); 26088 Restore_state(un); 26089 mutex_exit(SD_MUTEX(un)); 26090 return (EIO); 26091 } 26092 26093 /* Delay to give the device some recovery time. */ 26094 drv_usecwait(10000); 26095 26096 if (sd_send_polled_RQS(un) == SD_FAILURE) { 26097 SD_INFO(SD_LOG_DUMP, un, 26098 "sddump: sd_send_polled_RQS failed\n"); 26099 } 26100 mutex_enter(SD_MUTEX(un)); 26101 } 26102 } 26103 26104 /* 26105 * Convert the partition-relative block number to a 26106 * disk physical block number. 26107 */ 26108 if (NOT_DEVBSIZE(un)) { 26109 blkno += start_block; 26110 } else { 26111 blkno = blkno / (un->un_tgt_blocksize / DEV_BSIZE); 26112 blkno += start_block; 26113 } 26114 26115 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno); 26116 26117 26118 /* 26119 * Check if the device has a non-512 block size. 26120 */ 26121 wr_bp = NULL; 26122 if (NOT_DEVBSIZE(un)) { 26123 tgt_byte_offset = blkno * un->un_sys_blocksize; 26124 tgt_byte_count = nblk * un->un_sys_blocksize; 26125 if ((tgt_byte_offset % un->un_tgt_blocksize) || 26126 (tgt_byte_count % un->un_tgt_blocksize)) { 26127 doing_rmw = TRUE; 26128 /* 26129 * Calculate the block number and number of block 26130 * in terms of the media block size. 26131 */ 26132 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 26133 tgt_nblk = 26134 ((tgt_byte_offset + tgt_byte_count + 26135 (un->un_tgt_blocksize - 1)) / 26136 un->un_tgt_blocksize) - tgt_blkno; 26137 26138 /* 26139 * Invoke the routine which is going to do read part 26140 * of read-modify-write. 26141 * Note that this routine returns a pointer to 26142 * a valid bp in wr_bp. 26143 */ 26144 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk, 26145 &wr_bp); 26146 if (err) { 26147 mutex_exit(SD_MUTEX(un)); 26148 return (err); 26149 } 26150 /* 26151 * Offset is being calculated as - 26152 * (original block # * system block size) - 26153 * (new block # * target block size) 26154 */ 26155 io_start_offset = 26156 ((uint64_t)(blkno * un->un_sys_blocksize)) - 26157 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize)); 26158 26159 ASSERT(io_start_offset < un->un_tgt_blocksize); 26160 /* 26161 * Do the modify portion of read modify write. 26162 */ 26163 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset], 26164 (size_t)nblk * un->un_sys_blocksize); 26165 } else { 26166 doing_rmw = FALSE; 26167 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 26168 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize; 26169 } 26170 26171 /* Convert blkno and nblk to target blocks */ 26172 blkno = tgt_blkno; 26173 nblk = tgt_nblk; 26174 } else { 26175 wr_bp = &wr_buf; 26176 bzero(wr_bp, sizeof (struct buf)); 26177 wr_bp->b_flags = B_BUSY; 26178 wr_bp->b_un.b_addr = addr; 26179 wr_bp->b_bcount = nblk << DEV_BSHIFT; 26180 wr_bp->b_resid = 0; 26181 } 26182 26183 mutex_exit(SD_MUTEX(un)); 26184 26185 /* 26186 * Obtain a SCSI packet for the write command. 26187 * It should be safe to call the allocator here without 26188 * worrying about being locked for DVMA mapping because 26189 * the address we're passed is already a DVMA mapping 26190 * 26191 * We are also not going to worry about semaphore ownership 26192 * in the dump buffer. Dumping is single threaded at present. 26193 */ 26194 26195 wr_pktp = NULL; 26196 26197 dma_resid = wr_bp->b_bcount; 26198 oblkno = blkno; 26199 26200 if (!(NOT_DEVBSIZE(un))) { 26201 nblk = nblk / (un->un_tgt_blocksize / DEV_BSIZE); 26202 } 26203 26204 while (dma_resid != 0) { 26205 26206 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 26207 wr_bp->b_flags &= ~B_ERROR; 26208 26209 if (un->un_partial_dma_supported == 1) { 26210 blkno = oblkno + 26211 ((wr_bp->b_bcount - dma_resid) / 26212 un->un_tgt_blocksize); 26213 nblk = dma_resid / un->un_tgt_blocksize; 26214 26215 if (wr_pktp) { 26216 /* 26217 * Partial DMA transfers after initial transfer 26218 */ 26219 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp, 26220 blkno, nblk); 26221 } else { 26222 /* Initial transfer */ 26223 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 26224 un->un_pkt_flags, NULL_FUNC, NULL, 26225 blkno, nblk); 26226 } 26227 } else { 26228 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 26229 0, NULL_FUNC, NULL, blkno, nblk); 26230 } 26231 26232 if (rval == 0) { 26233 /* We were given a SCSI packet, continue. */ 26234 break; 26235 } 26236 26237 if (i == 0) { 26238 if (wr_bp->b_flags & B_ERROR) { 26239 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26240 "no resources for dumping; " 26241 "error code: 0x%x, retrying", 26242 geterror(wr_bp)); 26243 } else { 26244 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26245 "no resources for dumping; retrying"); 26246 } 26247 } else if (i != (SD_NDUMP_RETRIES - 1)) { 26248 if (wr_bp->b_flags & B_ERROR) { 26249 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26250 "no resources for dumping; error code: " 26251 "0x%x, retrying\n", geterror(wr_bp)); 26252 } 26253 } else { 26254 if (wr_bp->b_flags & B_ERROR) { 26255 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26256 "no resources for dumping; " 26257 "error code: 0x%x, retries failed, " 26258 "giving up.\n", geterror(wr_bp)); 26259 } else { 26260 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 26261 "no resources for dumping; " 26262 "retries failed, giving up.\n"); 26263 } 26264 mutex_enter(SD_MUTEX(un)); 26265 Restore_state(un); 26266 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) { 26267 mutex_exit(SD_MUTEX(un)); 26268 scsi_free_consistent_buf(wr_bp); 26269 } else { 26270 mutex_exit(SD_MUTEX(un)); 26271 } 26272 return (EIO); 26273 } 26274 drv_usecwait(10000); 26275 } 26276 26277 if (un->un_partial_dma_supported == 1) { 26278 /* 26279 * save the resid from PARTIAL_DMA 26280 */ 26281 dma_resid = wr_pktp->pkt_resid; 26282 if (dma_resid != 0) 26283 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid); 26284 wr_pktp->pkt_resid = 0; 26285 } else { 26286 dma_resid = 0; 26287 } 26288 26289 /* SunBug 1222170 */ 26290 wr_pktp->pkt_flags = FLAG_NOINTR; 26291 26292 err = EIO; 26293 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 26294 26295 /* 26296 * Scsi_poll returns 0 (success) if the command completes and 26297 * the status block is STATUS_GOOD. We should only check 26298 * errors if this condition is not true. Even then we should 26299 * send our own request sense packet only if we have a check 26300 * condition and auto request sense has not been performed by 26301 * the hba. 26302 */ 26303 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n"); 26304 26305 if ((sd_scsi_poll(un, wr_pktp) == 0) && 26306 (wr_pktp->pkt_resid == 0)) { 26307 err = SD_SUCCESS; 26308 break; 26309 } 26310 26311 /* 26312 * Check CMD_DEV_GONE 1st, give up if device is gone. 26313 */ 26314 if (wr_pktp->pkt_reason == CMD_DEV_GONE) { 26315 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26316 "Error while dumping state...Device is gone\n"); 26317 break; 26318 } 26319 26320 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) { 26321 SD_INFO(SD_LOG_DUMP, un, 26322 "sddump: write failed with CHECK, try # %d\n", i); 26323 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) { 26324 (void) sd_send_polled_RQS(un); 26325 } 26326 26327 continue; 26328 } 26329 26330 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) { 26331 int reset_retval = 0; 26332 26333 SD_INFO(SD_LOG_DUMP, un, 26334 "sddump: write failed with BUSY, try # %d\n", i); 26335 26336 if (un->un_f_lun_reset_enabled == TRUE) { 26337 reset_retval = scsi_reset(SD_ADDRESS(un), 26338 RESET_LUN); 26339 } 26340 if (reset_retval == 0) { 26341 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 26342 } 26343 (void) sd_send_polled_RQS(un); 26344 26345 } else { 26346 SD_INFO(SD_LOG_DUMP, un, 26347 "sddump: write failed with 0x%x, try # %d\n", 26348 SD_GET_PKT_STATUS(wr_pktp), i); 26349 mutex_enter(SD_MUTEX(un)); 26350 sd_reset_target(un, wr_pktp); 26351 mutex_exit(SD_MUTEX(un)); 26352 } 26353 26354 /* 26355 * If we are not getting anywhere with lun/target resets, 26356 * let's reset the bus. 26357 */ 26358 if (i == SD_NDUMP_RETRIES / 2) { 26359 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 26360 (void) sd_send_polled_RQS(un); 26361 } 26362 } 26363 } 26364 26365 scsi_destroy_pkt(wr_pktp); 26366 mutex_enter(SD_MUTEX(un)); 26367 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) { 26368 mutex_exit(SD_MUTEX(un)); 26369 scsi_free_consistent_buf(wr_bp); 26370 } else { 26371 mutex_exit(SD_MUTEX(un)); 26372 } 26373 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err); 26374 return (err); 26375 } 26376 26377 /* 26378 * Function: sd_scsi_poll() 26379 * 26380 * Description: This is a wrapper for the scsi_poll call. 26381 * 26382 * Arguments: sd_lun - The unit structure 26383 * scsi_pkt - The scsi packet being sent to the device. 26384 * 26385 * Return Code: 0 - Command completed successfully with good status 26386 * -1 - Command failed. This could indicate a check condition 26387 * or other status value requiring recovery action. 26388 * 26389 * NOTE: This code is only called off sddump(). 26390 */ 26391 26392 static int 26393 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp) 26394 { 26395 int status; 26396 26397 ASSERT(un != NULL); 26398 ASSERT(!mutex_owned(SD_MUTEX(un))); 26399 ASSERT(pktp != NULL); 26400 26401 status = SD_SUCCESS; 26402 26403 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) { 26404 pktp->pkt_flags |= un->un_tagflags; 26405 pktp->pkt_flags &= ~FLAG_NODISCON; 26406 } 26407 26408 status = sd_ddi_scsi_poll(pktp); 26409 /* 26410 * Scsi_poll returns 0 (success) if the command completes and the 26411 * status block is STATUS_GOOD. We should only check errors if this 26412 * condition is not true. Even then we should send our own request 26413 * sense packet only if we have a check condition and auto 26414 * request sense has not been performed by the hba. 26415 * Don't get RQS data if pkt_reason is CMD_DEV_GONE. 26416 */ 26417 if ((status != SD_SUCCESS) && 26418 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) && 26419 (pktp->pkt_state & STATE_ARQ_DONE) == 0 && 26420 (pktp->pkt_reason != CMD_DEV_GONE)) 26421 (void) sd_send_polled_RQS(un); 26422 26423 return (status); 26424 } 26425 26426 /* 26427 * Function: sd_send_polled_RQS() 26428 * 26429 * Description: This sends the request sense command to a device. 26430 * 26431 * Arguments: sd_lun - The unit structure 26432 * 26433 * Return Code: 0 - Command completed successfully with good status 26434 * -1 - Command failed. 26435 * 26436 */ 26437 26438 static int 26439 sd_send_polled_RQS(struct sd_lun *un) 26440 { 26441 int ret_val; 26442 struct scsi_pkt *rqs_pktp; 26443 struct buf *rqs_bp; 26444 26445 ASSERT(un != NULL); 26446 ASSERT(!mutex_owned(SD_MUTEX(un))); 26447 26448 ret_val = SD_SUCCESS; 26449 26450 rqs_pktp = un->un_rqs_pktp; 26451 rqs_bp = un->un_rqs_bp; 26452 26453 mutex_enter(SD_MUTEX(un)); 26454 26455 if (un->un_sense_isbusy) { 26456 ret_val = SD_FAILURE; 26457 mutex_exit(SD_MUTEX(un)); 26458 return (ret_val); 26459 } 26460 26461 /* 26462 * If the request sense buffer (and packet) is not in use, 26463 * let's set the un_sense_isbusy and send our packet 26464 */ 26465 un->un_sense_isbusy = 1; 26466 rqs_pktp->pkt_resid = 0; 26467 rqs_pktp->pkt_reason = 0; 26468 rqs_pktp->pkt_flags |= FLAG_NOINTR; 26469 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH); 26470 26471 mutex_exit(SD_MUTEX(un)); 26472 26473 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at" 26474 " 0x%p\n", rqs_bp->b_un.b_addr); 26475 26476 /* 26477 * Can't send this to sd_scsi_poll, we wrap ourselves around the 26478 * axle - it has a call into us! 26479 */ 26480 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) { 26481 SD_INFO(SD_LOG_COMMON, un, 26482 "sd_send_polled_RQS: RQS failed\n"); 26483 } 26484 26485 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:", 26486 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX); 26487 26488 mutex_enter(SD_MUTEX(un)); 26489 un->un_sense_isbusy = 0; 26490 mutex_exit(SD_MUTEX(un)); 26491 26492 return (ret_val); 26493 } 26494 26495 /* 26496 * Defines needed for localized version of the scsi_poll routine. 26497 */ 26498 #define CSEC 10000 /* usecs */ 26499 #define SEC_TO_CSEC (1000000 / CSEC) 26500 26501 /* 26502 * Function: sd_ddi_scsi_poll() 26503 * 26504 * Description: Localized version of the scsi_poll routine. The purpose is to 26505 * send a scsi_pkt to a device as a polled command. This version 26506 * is to ensure more robust handling of transport errors. 26507 * Specifically this routine cures not ready, coming ready 26508 * transition for power up and reset of sonoma's. This can take 26509 * up to 45 seconds for power-on and 20 seconds for reset of a 26510 * sonoma lun. 26511 * 26512 * Arguments: scsi_pkt - The scsi_pkt being sent to a device 26513 * 26514 * Return Code: 0 - Command completed successfully with good status 26515 * -1 - Command failed. 26516 * 26517 * NOTE: This code is almost identical to scsi_poll, however before 6668774 can 26518 * be fixed (removing this code), we need to determine how to handle the 26519 * KEY_UNIT_ATTENTION condition below in conditions not as limited as sddump(). 26520 * 26521 * NOTE: This code is only called off sddump(). 26522 */ 26523 static int 26524 sd_ddi_scsi_poll(struct scsi_pkt *pkt) 26525 { 26526 int rval = -1; 26527 int savef; 26528 long savet; 26529 void (*savec)(); 26530 int timeout; 26531 int busy_count; 26532 int poll_delay; 26533 int rc; 26534 uint8_t *sensep; 26535 struct scsi_arq_status *arqstat; 26536 extern int do_polled_io; 26537 26538 ASSERT(pkt->pkt_scbp); 26539 26540 /* 26541 * save old flags.. 26542 */ 26543 savef = pkt->pkt_flags; 26544 savec = pkt->pkt_comp; 26545 savet = pkt->pkt_time; 26546 26547 pkt->pkt_flags |= FLAG_NOINTR; 26548 26549 /* 26550 * XXX there is nothing in the SCSA spec that states that we should not 26551 * do a callback for polled cmds; however, removing this will break sd 26552 * and probably other target drivers 26553 */ 26554 pkt->pkt_comp = NULL; 26555 26556 /* 26557 * we don't like a polled command without timeout. 26558 * 60 seconds seems long enough. 26559 */ 26560 if (pkt->pkt_time == 0) 26561 pkt->pkt_time = SCSI_POLL_TIMEOUT; 26562 26563 /* 26564 * Send polled cmd. 26565 * 26566 * We do some error recovery for various errors. Tran_busy, 26567 * queue full, and non-dispatched commands are retried every 10 msec. 26568 * as they are typically transient failures. Busy status and Not 26569 * Ready are retried every second as this status takes a while to 26570 * change. 26571 */ 26572 timeout = pkt->pkt_time * SEC_TO_CSEC; 26573 26574 for (busy_count = 0; busy_count < timeout; busy_count++) { 26575 /* 26576 * Initialize pkt status variables. 26577 */ 26578 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0; 26579 26580 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) { 26581 if (rc != TRAN_BUSY) { 26582 /* Transport failed - give up. */ 26583 break; 26584 } else { 26585 /* Transport busy - try again. */ 26586 poll_delay = 1 * CSEC; /* 10 msec. */ 26587 } 26588 } else { 26589 /* 26590 * Transport accepted - check pkt status. 26591 */ 26592 rc = (*pkt->pkt_scbp) & STATUS_MASK; 26593 if ((pkt->pkt_reason == CMD_CMPLT) && 26594 (rc == STATUS_CHECK) && 26595 (pkt->pkt_state & STATE_ARQ_DONE)) { 26596 arqstat = 26597 (struct scsi_arq_status *)(pkt->pkt_scbp); 26598 sensep = (uint8_t *)&arqstat->sts_sensedata; 26599 } else { 26600 sensep = NULL; 26601 } 26602 26603 if ((pkt->pkt_reason == CMD_CMPLT) && 26604 (rc == STATUS_GOOD)) { 26605 /* No error - we're done */ 26606 rval = 0; 26607 break; 26608 26609 } else if (pkt->pkt_reason == CMD_DEV_GONE) { 26610 /* Lost connection - give up */ 26611 break; 26612 26613 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) && 26614 (pkt->pkt_state == 0)) { 26615 /* Pkt not dispatched - try again. */ 26616 poll_delay = 1 * CSEC; /* 10 msec. */ 26617 26618 } else if ((pkt->pkt_reason == CMD_CMPLT) && 26619 (rc == STATUS_QFULL)) { 26620 /* Queue full - try again. */ 26621 poll_delay = 1 * CSEC; /* 10 msec. */ 26622 26623 } else if ((pkt->pkt_reason == CMD_CMPLT) && 26624 (rc == STATUS_BUSY)) { 26625 /* Busy - try again. */ 26626 poll_delay = 100 * CSEC; /* 1 sec. */ 26627 busy_count += (SEC_TO_CSEC - 1); 26628 26629 } else if ((sensep != NULL) && 26630 (scsi_sense_key(sensep) == KEY_UNIT_ATTENTION)) { 26631 /* 26632 * Unit Attention - try again. 26633 * Pretend it took 1 sec. 26634 * NOTE: 'continue' avoids poll_delay 26635 */ 26636 busy_count += (SEC_TO_CSEC - 1); 26637 continue; 26638 26639 } else if ((sensep != NULL) && 26640 (scsi_sense_key(sensep) == KEY_NOT_READY) && 26641 (scsi_sense_asc(sensep) == 0x04) && 26642 (scsi_sense_ascq(sensep) == 0x01)) { 26643 /* 26644 * Not ready -> ready - try again. 26645 * 04h/01h: LUN IS IN PROCESS OF BECOMING READY 26646 * ...same as STATUS_BUSY 26647 */ 26648 poll_delay = 100 * CSEC; /* 1 sec. */ 26649 busy_count += (SEC_TO_CSEC - 1); 26650 26651 } else { 26652 /* BAD status - give up. */ 26653 break; 26654 } 26655 } 26656 26657 if (((curthread->t_flag & T_INTR_THREAD) == 0) && 26658 !do_polled_io) { 26659 delay(drv_usectohz(poll_delay)); 26660 } else { 26661 /* we busy wait during cpr_dump or interrupt threads */ 26662 drv_usecwait(poll_delay); 26663 } 26664 } 26665 26666 pkt->pkt_flags = savef; 26667 pkt->pkt_comp = savec; 26668 pkt->pkt_time = savet; 26669 26670 /* return on error */ 26671 if (rval) 26672 return (rval); 26673 26674 /* 26675 * This is not a performance critical code path. 26676 * 26677 * As an accommodation for scsi_poll callers, to avoid ddi_dma_sync() 26678 * issues associated with looking at DMA memory prior to 26679 * scsi_pkt_destroy(), we scsi_sync_pkt() prior to return. 26680 */ 26681 scsi_sync_pkt(pkt); 26682 return (0); 26683 } 26684 26685 26686 26687 /* 26688 * Function: sd_persistent_reservation_in_read_keys 26689 * 26690 * Description: This routine is the driver entry point for handling CD-ROM 26691 * multi-host persistent reservation requests (MHIOCGRP_INKEYS) 26692 * by sending the SCSI-3 PRIN commands to the device. 26693 * Processes the read keys command response by copying the 26694 * reservation key information into the user provided buffer. 26695 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented. 26696 * 26697 * Arguments: un - Pointer to soft state struct for the target. 26698 * usrp - user provided pointer to multihost Persistent In Read 26699 * Keys structure (mhioc_inkeys_t) 26700 * flag - this argument is a pass through to ddi_copyxxx() 26701 * directly from the mode argument of ioctl(). 26702 * 26703 * Return Code: 0 - Success 26704 * EACCES 26705 * ENOTSUP 26706 * errno return code from sd_send_scsi_cmd() 26707 * 26708 * Context: Can sleep. Does not return until command is completed. 26709 */ 26710 26711 static int 26712 sd_persistent_reservation_in_read_keys(struct sd_lun *un, 26713 mhioc_inkeys_t *usrp, int flag) 26714 { 26715 #ifdef _MULTI_DATAMODEL 26716 struct mhioc_key_list32 li32; 26717 #endif 26718 sd_prin_readkeys_t *in; 26719 mhioc_inkeys_t *ptr; 26720 mhioc_key_list_t li; 26721 uchar_t *data_bufp = NULL; 26722 int data_len = 0; 26723 int rval = 0; 26724 size_t copysz = 0; 26725 sd_ssc_t *ssc; 26726 26727 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) { 26728 return (EINVAL); 26729 } 26730 bzero(&li, sizeof (mhioc_key_list_t)); 26731 26732 ssc = sd_ssc_init(un); 26733 26734 /* 26735 * Get the listsize from user 26736 */ 26737 #ifdef _MULTI_DATAMODEL 26738 switch (ddi_model_convert_from(flag & FMODELS)) { 26739 case DDI_MODEL_ILP32: 26740 copysz = sizeof (struct mhioc_key_list32); 26741 if (ddi_copyin(ptr->li, &li32, copysz, flag)) { 26742 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26743 "sd_persistent_reservation_in_read_keys: " 26744 "failed ddi_copyin: mhioc_key_list32_t\n"); 26745 rval = EFAULT; 26746 goto done; 26747 } 26748 li.listsize = li32.listsize; 26749 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list; 26750 break; 26751 26752 case DDI_MODEL_NONE: 26753 copysz = sizeof (mhioc_key_list_t); 26754 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 26755 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26756 "sd_persistent_reservation_in_read_keys: " 26757 "failed ddi_copyin: mhioc_key_list_t\n"); 26758 rval = EFAULT; 26759 goto done; 26760 } 26761 break; 26762 } 26763 26764 #else /* ! _MULTI_DATAMODEL */ 26765 copysz = sizeof (mhioc_key_list_t); 26766 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 26767 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26768 "sd_persistent_reservation_in_read_keys: " 26769 "failed ddi_copyin: mhioc_key_list_t\n"); 26770 rval = EFAULT; 26771 goto done; 26772 } 26773 #endif 26774 26775 data_len = li.listsize * MHIOC_RESV_KEY_SIZE; 26776 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t)); 26777 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 26778 26779 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS, 26780 data_len, data_bufp); 26781 if (rval != 0) { 26782 if (rval == EIO) 26783 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 26784 else 26785 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 26786 goto done; 26787 } 26788 in = (sd_prin_readkeys_t *)data_bufp; 26789 ptr->generation = BE_32(in->generation); 26790 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE; 26791 26792 /* 26793 * Return the min(listsize, listlen) keys 26794 */ 26795 #ifdef _MULTI_DATAMODEL 26796 26797 switch (ddi_model_convert_from(flag & FMODELS)) { 26798 case DDI_MODEL_ILP32: 26799 li32.listlen = li.listlen; 26800 if (ddi_copyout(&li32, ptr->li, copysz, flag)) { 26801 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26802 "sd_persistent_reservation_in_read_keys: " 26803 "failed ddi_copyout: mhioc_key_list32_t\n"); 26804 rval = EFAULT; 26805 goto done; 26806 } 26807 break; 26808 26809 case DDI_MODEL_NONE: 26810 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 26811 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26812 "sd_persistent_reservation_in_read_keys: " 26813 "failed ddi_copyout: mhioc_key_list_t\n"); 26814 rval = EFAULT; 26815 goto done; 26816 } 26817 break; 26818 } 26819 26820 #else /* ! _MULTI_DATAMODEL */ 26821 26822 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 26823 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26824 "sd_persistent_reservation_in_read_keys: " 26825 "failed ddi_copyout: mhioc_key_list_t\n"); 26826 rval = EFAULT; 26827 goto done; 26828 } 26829 26830 #endif /* _MULTI_DATAMODEL */ 26831 26832 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE, 26833 li.listsize * MHIOC_RESV_KEY_SIZE); 26834 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) { 26835 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26836 "sd_persistent_reservation_in_read_keys: " 26837 "failed ddi_copyout: keylist\n"); 26838 rval = EFAULT; 26839 } 26840 done: 26841 sd_ssc_fini(ssc); 26842 kmem_free(data_bufp, data_len); 26843 return (rval); 26844 } 26845 26846 26847 /* 26848 * Function: sd_persistent_reservation_in_read_resv 26849 * 26850 * Description: This routine is the driver entry point for handling CD-ROM 26851 * multi-host persistent reservation requests (MHIOCGRP_INRESV) 26852 * by sending the SCSI-3 PRIN commands to the device. 26853 * Process the read persistent reservations command response by 26854 * copying the reservation information into the user provided 26855 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented. 26856 * 26857 * Arguments: un - Pointer to soft state struct for the target. 26858 * usrp - user provided pointer to multihost Persistent In Read 26859 * Keys structure (mhioc_inkeys_t) 26860 * flag - this argument is a pass through to ddi_copyxxx() 26861 * directly from the mode argument of ioctl(). 26862 * 26863 * Return Code: 0 - Success 26864 * EACCES 26865 * ENOTSUP 26866 * errno return code from sd_send_scsi_cmd() 26867 * 26868 * Context: Can sleep. Does not return until command is completed. 26869 */ 26870 26871 static int 26872 sd_persistent_reservation_in_read_resv(struct sd_lun *un, 26873 mhioc_inresvs_t *usrp, int flag) 26874 { 26875 #ifdef _MULTI_DATAMODEL 26876 struct mhioc_resv_desc_list32 resvlist32; 26877 #endif 26878 sd_prin_readresv_t *in; 26879 mhioc_inresvs_t *ptr; 26880 sd_readresv_desc_t *readresv_ptr; 26881 mhioc_resv_desc_list_t resvlist; 26882 mhioc_resv_desc_t resvdesc; 26883 uchar_t *data_bufp = NULL; 26884 int data_len; 26885 int rval = 0; 26886 int i; 26887 size_t copysz = 0; 26888 mhioc_resv_desc_t *bufp; 26889 sd_ssc_t *ssc; 26890 26891 if ((ptr = usrp) == NULL) { 26892 return (EINVAL); 26893 } 26894 26895 ssc = sd_ssc_init(un); 26896 26897 /* 26898 * Get the listsize from user 26899 */ 26900 #ifdef _MULTI_DATAMODEL 26901 switch (ddi_model_convert_from(flag & FMODELS)) { 26902 case DDI_MODEL_ILP32: 26903 copysz = sizeof (struct mhioc_resv_desc_list32); 26904 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) { 26905 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26906 "sd_persistent_reservation_in_read_resv: " 26907 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26908 rval = EFAULT; 26909 goto done; 26910 } 26911 resvlist.listsize = resvlist32.listsize; 26912 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list; 26913 break; 26914 26915 case DDI_MODEL_NONE: 26916 copysz = sizeof (mhioc_resv_desc_list_t); 26917 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 26918 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26919 "sd_persistent_reservation_in_read_resv: " 26920 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26921 rval = EFAULT; 26922 goto done; 26923 } 26924 break; 26925 } 26926 #else /* ! _MULTI_DATAMODEL */ 26927 copysz = sizeof (mhioc_resv_desc_list_t); 26928 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 26929 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26930 "sd_persistent_reservation_in_read_resv: " 26931 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26932 rval = EFAULT; 26933 goto done; 26934 } 26935 #endif /* ! _MULTI_DATAMODEL */ 26936 26937 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN; 26938 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t)); 26939 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 26940 26941 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_RESV, 26942 data_len, data_bufp); 26943 if (rval != 0) { 26944 if (rval == EIO) 26945 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 26946 else 26947 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 26948 goto done; 26949 } 26950 in = (sd_prin_readresv_t *)data_bufp; 26951 ptr->generation = BE_32(in->generation); 26952 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN; 26953 26954 /* 26955 * Return the min(listsize, listlen( keys 26956 */ 26957 #ifdef _MULTI_DATAMODEL 26958 26959 switch (ddi_model_convert_from(flag & FMODELS)) { 26960 case DDI_MODEL_ILP32: 26961 resvlist32.listlen = resvlist.listlen; 26962 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) { 26963 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26964 "sd_persistent_reservation_in_read_resv: " 26965 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26966 rval = EFAULT; 26967 goto done; 26968 } 26969 break; 26970 26971 case DDI_MODEL_NONE: 26972 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 26973 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26974 "sd_persistent_reservation_in_read_resv: " 26975 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26976 rval = EFAULT; 26977 goto done; 26978 } 26979 break; 26980 } 26981 26982 #else /* ! _MULTI_DATAMODEL */ 26983 26984 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 26985 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26986 "sd_persistent_reservation_in_read_resv: " 26987 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26988 rval = EFAULT; 26989 goto done; 26990 } 26991 26992 #endif /* ! _MULTI_DATAMODEL */ 26993 26994 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc; 26995 bufp = resvlist.list; 26996 copysz = sizeof (mhioc_resv_desc_t); 26997 for (i = 0; i < min(resvlist.listlen, resvlist.listsize); 26998 i++, readresv_ptr++, bufp++) { 26999 27000 bcopy(&readresv_ptr->resvkey, &resvdesc.key, 27001 MHIOC_RESV_KEY_SIZE); 27002 resvdesc.type = readresv_ptr->type; 27003 resvdesc.scope = readresv_ptr->scope; 27004 resvdesc.scope_specific_addr = 27005 BE_32(readresv_ptr->scope_specific_addr); 27006 27007 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) { 27008 SD_ERROR(SD_LOG_IOCTL_MHD, un, 27009 "sd_persistent_reservation_in_read_resv: " 27010 "failed ddi_copyout: resvlist\n"); 27011 rval = EFAULT; 27012 goto done; 27013 } 27014 } 27015 done: 27016 sd_ssc_fini(ssc); 27017 /* only if data_bufp is allocated, we need to free it */ 27018 if (data_bufp) { 27019 kmem_free(data_bufp, data_len); 27020 } 27021 return (rval); 27022 } 27023 27024 27025 /* 27026 * Function: sr_change_blkmode() 27027 * 27028 * Description: This routine is the driver entry point for handling CD-ROM 27029 * block mode ioctl requests. Support for returning and changing 27030 * the current block size in use by the device is implemented. The 27031 * LBA size is changed via a MODE SELECT Block Descriptor. 27032 * 27033 * This routine issues a mode sense with an allocation length of 27034 * 12 bytes for the mode page header and a single block descriptor. 27035 * 27036 * Arguments: dev - the device 'dev_t' 27037 * cmd - the request type; one of CDROMGBLKMODE (get) or 27038 * CDROMSBLKMODE (set) 27039 * data - current block size or requested block size 27040 * flag - this argument is a pass through to ddi_copyxxx() directly 27041 * from the mode argument of ioctl(). 27042 * 27043 * Return Code: the code returned by sd_send_scsi_cmd() 27044 * EINVAL if invalid arguments are provided 27045 * EFAULT if ddi_copyxxx() fails 27046 * ENXIO if fail ddi_get_soft_state 27047 * EIO if invalid mode sense block descriptor length 27048 * 27049 */ 27050 27051 static int 27052 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag) 27053 { 27054 struct sd_lun *un = NULL; 27055 struct mode_header *sense_mhp, *select_mhp; 27056 struct block_descriptor *sense_desc, *select_desc; 27057 int current_bsize; 27058 int rval = EINVAL; 27059 uchar_t *sense = NULL; 27060 uchar_t *select = NULL; 27061 sd_ssc_t *ssc; 27062 27063 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE)); 27064 27065 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27066 return (ENXIO); 27067 } 27068 27069 /* 27070 * The block length is changed via the Mode Select block descriptor, the 27071 * "Read/Write Error Recovery" mode page (0x1) contents are not actually 27072 * required as part of this routine. Therefore the mode sense allocation 27073 * length is specified to be the length of a mode page header and a 27074 * block descriptor. 27075 */ 27076 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 27077 27078 ssc = sd_ssc_init(un); 27079 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 27080 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD); 27081 sd_ssc_fini(ssc); 27082 if (rval != 0) { 27083 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27084 "sr_change_blkmode: Mode Sense Failed\n"); 27085 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 27086 return (rval); 27087 } 27088 27089 /* Check the block descriptor len to handle only 1 block descriptor */ 27090 sense_mhp = (struct mode_header *)sense; 27091 if ((sense_mhp->bdesc_length == 0) || 27092 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) { 27093 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27094 "sr_change_blkmode: Mode Sense returned invalid block" 27095 " descriptor length\n"); 27096 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 27097 return (EIO); 27098 } 27099 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH); 27100 current_bsize = ((sense_desc->blksize_hi << 16) | 27101 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo); 27102 27103 /* Process command */ 27104 switch (cmd) { 27105 case CDROMGBLKMODE: 27106 /* Return the block size obtained during the mode sense */ 27107 if (ddi_copyout(¤t_bsize, (void *)data, 27108 sizeof (int), flag) != 0) 27109 rval = EFAULT; 27110 break; 27111 case CDROMSBLKMODE: 27112 /* Validate the requested block size */ 27113 switch (data) { 27114 case CDROM_BLK_512: 27115 case CDROM_BLK_1024: 27116 case CDROM_BLK_2048: 27117 case CDROM_BLK_2056: 27118 case CDROM_BLK_2336: 27119 case CDROM_BLK_2340: 27120 case CDROM_BLK_2352: 27121 case CDROM_BLK_2368: 27122 case CDROM_BLK_2448: 27123 case CDROM_BLK_2646: 27124 case CDROM_BLK_2647: 27125 break; 27126 default: 27127 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27128 "sr_change_blkmode: " 27129 "Block Size '%ld' Not Supported\n", data); 27130 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 27131 return (EINVAL); 27132 } 27133 27134 /* 27135 * The current block size matches the requested block size so 27136 * there is no need to send the mode select to change the size 27137 */ 27138 if (current_bsize == data) { 27139 break; 27140 } 27141 27142 /* Build the select data for the requested block size */ 27143 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 27144 select_mhp = (struct mode_header *)select; 27145 select_desc = 27146 (struct block_descriptor *)(select + MODE_HEADER_LENGTH); 27147 /* 27148 * The LBA size is changed via the block descriptor, so the 27149 * descriptor is built according to the user data 27150 */ 27151 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH; 27152 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16); 27153 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8); 27154 select_desc->blksize_lo = (char)((data) & 0x000000ff); 27155 27156 /* Send the mode select for the requested block size */ 27157 ssc = sd_ssc_init(un); 27158 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, 27159 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 27160 SD_PATH_STANDARD); 27161 sd_ssc_fini(ssc); 27162 if (rval != 0) { 27163 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27164 "sr_change_blkmode: Mode Select Failed\n"); 27165 /* 27166 * The mode select failed for the requested block size, 27167 * so reset the data for the original block size and 27168 * send it to the target. The error is indicated by the 27169 * return value for the failed mode select. 27170 */ 27171 select_desc->blksize_hi = sense_desc->blksize_hi; 27172 select_desc->blksize_mid = sense_desc->blksize_mid; 27173 select_desc->blksize_lo = sense_desc->blksize_lo; 27174 ssc = sd_ssc_init(un); 27175 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, 27176 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 27177 SD_PATH_STANDARD); 27178 sd_ssc_fini(ssc); 27179 } else { 27180 ASSERT(!mutex_owned(SD_MUTEX(un))); 27181 mutex_enter(SD_MUTEX(un)); 27182 sd_update_block_info(un, (uint32_t)data, 0); 27183 mutex_exit(SD_MUTEX(un)); 27184 } 27185 break; 27186 default: 27187 /* should not reach here, but check anyway */ 27188 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27189 "sr_change_blkmode: Command '%x' Not Supported\n", cmd); 27190 rval = EINVAL; 27191 break; 27192 } 27193 27194 if (select) { 27195 kmem_free(select, BUFLEN_CHG_BLK_MODE); 27196 } 27197 if (sense) { 27198 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 27199 } 27200 return (rval); 27201 } 27202 27203 27204 /* 27205 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines 27206 * implement driver support for getting and setting the CD speed. The command 27207 * set used will be based on the device type. If the device has not been 27208 * identified as MMC the Toshiba vendor specific mode page will be used. If 27209 * the device is MMC but does not support the Real Time Streaming feature 27210 * the SET CD SPEED command will be used to set speed and mode page 0x2A will 27211 * be used to read the speed. 27212 */ 27213 27214 /* 27215 * Function: sr_change_speed() 27216 * 27217 * Description: This routine is the driver entry point for handling CD-ROM 27218 * drive speed ioctl requests for devices supporting the Toshiba 27219 * vendor specific drive speed mode page. Support for returning 27220 * and changing the current drive speed in use by the device is 27221 * implemented. 27222 * 27223 * Arguments: dev - the device 'dev_t' 27224 * cmd - the request type; one of CDROMGDRVSPEED (get) or 27225 * CDROMSDRVSPEED (set) 27226 * data - current drive speed or requested drive speed 27227 * flag - this argument is a pass through to ddi_copyxxx() directly 27228 * from the mode argument of ioctl(). 27229 * 27230 * Return Code: the code returned by sd_send_scsi_cmd() 27231 * EINVAL if invalid arguments are provided 27232 * EFAULT if ddi_copyxxx() fails 27233 * ENXIO if fail ddi_get_soft_state 27234 * EIO if invalid mode sense block descriptor length 27235 */ 27236 27237 static int 27238 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 27239 { 27240 struct sd_lun *un = NULL; 27241 struct mode_header *sense_mhp, *select_mhp; 27242 struct mode_speed *sense_page, *select_page; 27243 int current_speed; 27244 int rval = EINVAL; 27245 int bd_len; 27246 uchar_t *sense = NULL; 27247 uchar_t *select = NULL; 27248 sd_ssc_t *ssc; 27249 27250 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 27251 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27252 return (ENXIO); 27253 } 27254 27255 /* 27256 * Note: The drive speed is being modified here according to a Toshiba 27257 * vendor specific mode page (0x31). 27258 */ 27259 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 27260 27261 ssc = sd_ssc_init(un); 27262 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 27263 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED, 27264 SD_PATH_STANDARD); 27265 sd_ssc_fini(ssc); 27266 if (rval != 0) { 27267 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27268 "sr_change_speed: Mode Sense Failed\n"); 27269 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 27270 return (rval); 27271 } 27272 sense_mhp = (struct mode_header *)sense; 27273 27274 /* Check the block descriptor len to handle only 1 block descriptor */ 27275 bd_len = sense_mhp->bdesc_length; 27276 if (bd_len > MODE_BLK_DESC_LENGTH) { 27277 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27278 "sr_change_speed: Mode Sense returned invalid block " 27279 "descriptor length\n"); 27280 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 27281 return (EIO); 27282 } 27283 27284 sense_page = (struct mode_speed *) 27285 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 27286 current_speed = sense_page->speed; 27287 27288 /* Process command */ 27289 switch (cmd) { 27290 case CDROMGDRVSPEED: 27291 /* Return the drive speed obtained during the mode sense */ 27292 if (current_speed == 0x2) { 27293 current_speed = CDROM_TWELVE_SPEED; 27294 } 27295 if (ddi_copyout(¤t_speed, (void *)data, 27296 sizeof (int), flag) != 0) { 27297 rval = EFAULT; 27298 } 27299 break; 27300 case CDROMSDRVSPEED: 27301 /* Validate the requested drive speed */ 27302 switch ((uchar_t)data) { 27303 case CDROM_TWELVE_SPEED: 27304 data = 0x2; 27305 /*FALLTHROUGH*/ 27306 case CDROM_NORMAL_SPEED: 27307 case CDROM_DOUBLE_SPEED: 27308 case CDROM_QUAD_SPEED: 27309 case CDROM_MAXIMUM_SPEED: 27310 break; 27311 default: 27312 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27313 "sr_change_speed: " 27314 "Drive Speed '%d' Not Supported\n", (uchar_t)data); 27315 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 27316 return (EINVAL); 27317 } 27318 27319 /* 27320 * The current drive speed matches the requested drive speed so 27321 * there is no need to send the mode select to change the speed 27322 */ 27323 if (current_speed == data) { 27324 break; 27325 } 27326 27327 /* Build the select data for the requested drive speed */ 27328 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 27329 select_mhp = (struct mode_header *)select; 27330 select_mhp->bdesc_length = 0; 27331 select_page = 27332 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 27333 select_page = 27334 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 27335 select_page->mode_page.code = CDROM_MODE_SPEED; 27336 select_page->mode_page.length = 2; 27337 select_page->speed = (uchar_t)data; 27338 27339 /* Send the mode select for the requested block size */ 27340 ssc = sd_ssc_init(un); 27341 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 27342 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 27343 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 27344 sd_ssc_fini(ssc); 27345 if (rval != 0) { 27346 /* 27347 * The mode select failed for the requested drive speed, 27348 * so reset the data for the original drive speed and 27349 * send it to the target. The error is indicated by the 27350 * return value for the failed mode select. 27351 */ 27352 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27353 "sr_drive_speed: Mode Select Failed\n"); 27354 select_page->speed = sense_page->speed; 27355 ssc = sd_ssc_init(un); 27356 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 27357 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 27358 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 27359 sd_ssc_fini(ssc); 27360 } 27361 break; 27362 default: 27363 /* should not reach here, but check anyway */ 27364 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27365 "sr_change_speed: Command '%x' Not Supported\n", cmd); 27366 rval = EINVAL; 27367 break; 27368 } 27369 27370 if (select) { 27371 kmem_free(select, BUFLEN_MODE_CDROM_SPEED); 27372 } 27373 if (sense) { 27374 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 27375 } 27376 27377 return (rval); 27378 } 27379 27380 27381 /* 27382 * Function: sr_atapi_change_speed() 27383 * 27384 * Description: This routine is the driver entry point for handling CD-ROM 27385 * drive speed ioctl requests for MMC devices that do not support 27386 * the Real Time Streaming feature (0x107). 27387 * 27388 * Note: This routine will use the SET SPEED command which may not 27389 * be supported by all devices. 27390 * 27391 * Arguments: dev- the device 'dev_t' 27392 * cmd- the request type; one of CDROMGDRVSPEED (get) or 27393 * CDROMSDRVSPEED (set) 27394 * data- current drive speed or requested drive speed 27395 * flag- this argument is a pass through to ddi_copyxxx() directly 27396 * from the mode argument of ioctl(). 27397 * 27398 * Return Code: the code returned by sd_send_scsi_cmd() 27399 * EINVAL if invalid arguments are provided 27400 * EFAULT if ddi_copyxxx() fails 27401 * ENXIO if fail ddi_get_soft_state 27402 * EIO if invalid mode sense block descriptor length 27403 */ 27404 27405 static int 27406 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 27407 { 27408 struct sd_lun *un; 27409 struct uscsi_cmd *com = NULL; 27410 struct mode_header_grp2 *sense_mhp; 27411 uchar_t *sense_page; 27412 uchar_t *sense = NULL; 27413 char cdb[CDB_GROUP5]; 27414 int bd_len; 27415 int current_speed = 0; 27416 int max_speed = 0; 27417 int rval; 27418 sd_ssc_t *ssc; 27419 27420 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 27421 27422 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27423 return (ENXIO); 27424 } 27425 27426 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 27427 27428 ssc = sd_ssc_init(un); 27429 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, 27430 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, 27431 SD_PATH_STANDARD); 27432 sd_ssc_fini(ssc); 27433 if (rval != 0) { 27434 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27435 "sr_atapi_change_speed: Mode Sense Failed\n"); 27436 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 27437 return (rval); 27438 } 27439 27440 /* Check the block descriptor len to handle only 1 block descriptor */ 27441 sense_mhp = (struct mode_header_grp2 *)sense; 27442 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 27443 if (bd_len > MODE_BLK_DESC_LENGTH) { 27444 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27445 "sr_atapi_change_speed: Mode Sense returned invalid " 27446 "block descriptor length\n"); 27447 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 27448 return (EIO); 27449 } 27450 27451 /* Calculate the current and maximum drive speeds */ 27452 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 27453 current_speed = (sense_page[14] << 8) | sense_page[15]; 27454 max_speed = (sense_page[8] << 8) | sense_page[9]; 27455 27456 /* Process the command */ 27457 switch (cmd) { 27458 case CDROMGDRVSPEED: 27459 current_speed /= SD_SPEED_1X; 27460 if (ddi_copyout(¤t_speed, (void *)data, 27461 sizeof (int), flag) != 0) 27462 rval = EFAULT; 27463 break; 27464 case CDROMSDRVSPEED: 27465 /* Convert the speed code to KB/sec */ 27466 switch ((uchar_t)data) { 27467 case CDROM_NORMAL_SPEED: 27468 current_speed = SD_SPEED_1X; 27469 break; 27470 case CDROM_DOUBLE_SPEED: 27471 current_speed = 2 * SD_SPEED_1X; 27472 break; 27473 case CDROM_QUAD_SPEED: 27474 current_speed = 4 * SD_SPEED_1X; 27475 break; 27476 case CDROM_TWELVE_SPEED: 27477 current_speed = 12 * SD_SPEED_1X; 27478 break; 27479 case CDROM_MAXIMUM_SPEED: 27480 current_speed = 0xffff; 27481 break; 27482 default: 27483 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27484 "sr_atapi_change_speed: invalid drive speed %d\n", 27485 (uchar_t)data); 27486 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 27487 return (EINVAL); 27488 } 27489 27490 /* Check the request against the drive's max speed. */ 27491 if (current_speed != 0xffff) { 27492 if (current_speed > max_speed) { 27493 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 27494 return (EINVAL); 27495 } 27496 } 27497 27498 /* 27499 * Build and send the SET SPEED command 27500 * 27501 * Note: The SET SPEED (0xBB) command used in this routine is 27502 * obsolete per the SCSI MMC spec but still supported in the 27503 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 27504 * therefore the command is still implemented in this routine. 27505 */ 27506 bzero(cdb, sizeof (cdb)); 27507 cdb[0] = (char)SCMD_SET_CDROM_SPEED; 27508 cdb[2] = (uchar_t)(current_speed >> 8); 27509 cdb[3] = (uchar_t)current_speed; 27510 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27511 com->uscsi_cdb = (caddr_t)cdb; 27512 com->uscsi_cdblen = CDB_GROUP5; 27513 com->uscsi_bufaddr = NULL; 27514 com->uscsi_buflen = 0; 27515 com->uscsi_flags = USCSI_DIAGNOSE | USCSI_SILENT; 27516 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, 0, SD_PATH_STANDARD); 27517 break; 27518 default: 27519 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27520 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd); 27521 rval = EINVAL; 27522 } 27523 27524 if (sense) { 27525 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 27526 } 27527 if (com) { 27528 kmem_free(com, sizeof (*com)); 27529 } 27530 return (rval); 27531 } 27532 27533 27534 /* 27535 * Function: sr_pause_resume() 27536 * 27537 * Description: This routine is the driver entry point for handling CD-ROM 27538 * pause/resume ioctl requests. This only affects the audio play 27539 * operation. 27540 * 27541 * Arguments: dev - the device 'dev_t' 27542 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used 27543 * for setting the resume bit of the cdb. 27544 * 27545 * Return Code: the code returned by sd_send_scsi_cmd() 27546 * EINVAL if invalid mode specified 27547 * 27548 */ 27549 27550 static int 27551 sr_pause_resume(dev_t dev, int cmd) 27552 { 27553 struct sd_lun *un; 27554 struct uscsi_cmd *com; 27555 char cdb[CDB_GROUP1]; 27556 int rval; 27557 27558 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27559 return (ENXIO); 27560 } 27561 27562 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27563 bzero(cdb, CDB_GROUP1); 27564 cdb[0] = SCMD_PAUSE_RESUME; 27565 switch (cmd) { 27566 case CDROMRESUME: 27567 cdb[8] = 1; 27568 break; 27569 case CDROMPAUSE: 27570 cdb[8] = 0; 27571 break; 27572 default: 27573 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:" 27574 " Command '%x' Not Supported\n", cmd); 27575 rval = EINVAL; 27576 goto done; 27577 } 27578 27579 com->uscsi_cdb = cdb; 27580 com->uscsi_cdblen = CDB_GROUP1; 27581 com->uscsi_flags = USCSI_DIAGNOSE | USCSI_SILENT; 27582 27583 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27584 SD_PATH_STANDARD); 27585 27586 done: 27587 kmem_free(com, sizeof (*com)); 27588 return (rval); 27589 } 27590 27591 27592 /* 27593 * Function: sr_play_msf() 27594 * 27595 * Description: This routine is the driver entry point for handling CD-ROM 27596 * ioctl requests to output the audio signals at the specified 27597 * starting address and continue the audio play until the specified 27598 * ending address (CDROMPLAYMSF) The address is in Minute Second 27599 * Frame (MSF) format. 27600 * 27601 * Arguments: dev - the device 'dev_t' 27602 * data - pointer to user provided audio msf structure, 27603 * specifying start/end addresses. 27604 * flag - this argument is a pass through to ddi_copyxxx() 27605 * directly from the mode argument of ioctl(). 27606 * 27607 * Return Code: the code returned by sd_send_scsi_cmd() 27608 * EFAULT if ddi_copyxxx() fails 27609 * ENXIO if fail ddi_get_soft_state 27610 * EINVAL if data pointer is NULL 27611 */ 27612 27613 static int 27614 sr_play_msf(dev_t dev, caddr_t data, int flag) 27615 { 27616 struct sd_lun *un; 27617 struct uscsi_cmd *com; 27618 struct cdrom_msf msf_struct; 27619 struct cdrom_msf *msf = &msf_struct; 27620 char cdb[CDB_GROUP1]; 27621 int rval; 27622 27623 if (data == NULL) { 27624 return (EINVAL); 27625 } 27626 27627 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27628 return (ENXIO); 27629 } 27630 27631 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) { 27632 return (EFAULT); 27633 } 27634 27635 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27636 bzero(cdb, CDB_GROUP1); 27637 cdb[0] = SCMD_PLAYAUDIO_MSF; 27638 if (un->un_f_cfg_playmsf_bcd == TRUE) { 27639 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0); 27640 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0); 27641 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0); 27642 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1); 27643 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1); 27644 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1); 27645 } else { 27646 cdb[3] = msf->cdmsf_min0; 27647 cdb[4] = msf->cdmsf_sec0; 27648 cdb[5] = msf->cdmsf_frame0; 27649 cdb[6] = msf->cdmsf_min1; 27650 cdb[7] = msf->cdmsf_sec1; 27651 cdb[8] = msf->cdmsf_frame1; 27652 } 27653 com->uscsi_cdb = cdb; 27654 com->uscsi_cdblen = CDB_GROUP1; 27655 com->uscsi_flags = USCSI_DIAGNOSE | USCSI_SILENT; 27656 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27657 SD_PATH_STANDARD); 27658 kmem_free(com, sizeof (*com)); 27659 return (rval); 27660 } 27661 27662 27663 /* 27664 * Function: sr_play_trkind() 27665 * 27666 * Description: This routine is the driver entry point for handling CD-ROM 27667 * ioctl requests to output the audio signals at the specified 27668 * starting address and continue the audio play until the specified 27669 * ending address (CDROMPLAYTRKIND). The address is in Track Index 27670 * format. 27671 * 27672 * Arguments: dev - the device 'dev_t' 27673 * data - pointer to user provided audio track/index structure, 27674 * specifying start/end addresses. 27675 * flag - this argument is a pass through to ddi_copyxxx() 27676 * directly from the mode argument of ioctl(). 27677 * 27678 * Return Code: the code returned by sd_send_scsi_cmd() 27679 * EFAULT if ddi_copyxxx() fails 27680 * ENXIO if fail ddi_get_soft_state 27681 * EINVAL if data pointer is NULL 27682 */ 27683 27684 static int 27685 sr_play_trkind(dev_t dev, caddr_t data, int flag) 27686 { 27687 struct cdrom_ti ti_struct; 27688 struct cdrom_ti *ti = &ti_struct; 27689 struct uscsi_cmd *com = NULL; 27690 char cdb[CDB_GROUP1]; 27691 int rval; 27692 27693 if (data == NULL) { 27694 return (EINVAL); 27695 } 27696 27697 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) { 27698 return (EFAULT); 27699 } 27700 27701 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27702 bzero(cdb, CDB_GROUP1); 27703 cdb[0] = SCMD_PLAYAUDIO_TI; 27704 cdb[4] = ti->cdti_trk0; 27705 cdb[5] = ti->cdti_ind0; 27706 cdb[7] = ti->cdti_trk1; 27707 cdb[8] = ti->cdti_ind1; 27708 com->uscsi_cdb = cdb; 27709 com->uscsi_cdblen = CDB_GROUP1; 27710 com->uscsi_flags = USCSI_DIAGNOSE | USCSI_SILENT; 27711 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27712 SD_PATH_STANDARD); 27713 kmem_free(com, sizeof (*com)); 27714 return (rval); 27715 } 27716 27717 27718 /* 27719 * Function: sr_read_all_subcodes() 27720 * 27721 * Description: This routine is the driver entry point for handling CD-ROM 27722 * ioctl requests to return raw subcode data while the target is 27723 * playing audio (CDROMSUBCODE). 27724 * 27725 * Arguments: dev - the device 'dev_t' 27726 * data - pointer to user provided cdrom subcode structure, 27727 * specifying the transfer length and address. 27728 * flag - this argument is a pass through to ddi_copyxxx() 27729 * directly from the mode argument of ioctl(). 27730 * 27731 * Return Code: the code returned by sd_send_scsi_cmd() 27732 * EFAULT if ddi_copyxxx() fails 27733 * ENXIO if fail ddi_get_soft_state 27734 * EINVAL if data pointer is NULL 27735 */ 27736 27737 static int 27738 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag) 27739 { 27740 struct sd_lun *un = NULL; 27741 struct uscsi_cmd *com = NULL; 27742 struct cdrom_subcode *subcode = NULL; 27743 int rval; 27744 size_t buflen; 27745 char cdb[CDB_GROUP5]; 27746 27747 #ifdef _MULTI_DATAMODEL 27748 /* To support ILP32 applications in an LP64 world */ 27749 struct cdrom_subcode32 cdrom_subcode32; 27750 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32; 27751 #endif 27752 if (data == NULL) { 27753 return (EINVAL); 27754 } 27755 27756 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27757 return (ENXIO); 27758 } 27759 27760 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP); 27761 27762 #ifdef _MULTI_DATAMODEL 27763 switch (ddi_model_convert_from(flag & FMODELS)) { 27764 case DDI_MODEL_ILP32: 27765 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) { 27766 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27767 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27768 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27769 return (EFAULT); 27770 } 27771 /* Convert the ILP32 uscsi data from the application to LP64 */ 27772 cdrom_subcode32tocdrom_subcode(cdsc32, subcode); 27773 break; 27774 case DDI_MODEL_NONE: 27775 if (ddi_copyin(data, subcode, 27776 sizeof (struct cdrom_subcode), flag)) { 27777 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27778 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27779 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27780 return (EFAULT); 27781 } 27782 break; 27783 } 27784 #else /* ! _MULTI_DATAMODEL */ 27785 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) { 27786 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27787 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27788 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27789 return (EFAULT); 27790 } 27791 #endif /* _MULTI_DATAMODEL */ 27792 27793 /* 27794 * Since MMC-2 expects max 3 bytes for length, check if the 27795 * length input is greater than 3 bytes 27796 */ 27797 if ((subcode->cdsc_length & 0xFF000000) != 0) { 27798 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27799 "sr_read_all_subcodes: " 27800 "cdrom transfer length too large: %d (limit %d)\n", 27801 subcode->cdsc_length, 0xFFFFFF); 27802 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27803 return (EINVAL); 27804 } 27805 27806 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length; 27807 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27808 bzero(cdb, CDB_GROUP5); 27809 27810 if (un->un_f_mmc_cap == TRUE) { 27811 cdb[0] = (char)SCMD_READ_CD; 27812 cdb[2] = (char)0xff; 27813 cdb[3] = (char)0xff; 27814 cdb[4] = (char)0xff; 27815 cdb[5] = (char)0xff; 27816 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 27817 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 27818 cdb[8] = ((subcode->cdsc_length) & 0x000000ff); 27819 cdb[10] = 1; 27820 } else { 27821 /* 27822 * Note: A vendor specific command (0xDF) is being used here to 27823 * request a read of all subcodes. 27824 */ 27825 cdb[0] = (char)SCMD_READ_ALL_SUBCODES; 27826 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24); 27827 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 27828 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 27829 cdb[9] = ((subcode->cdsc_length) & 0x000000ff); 27830 } 27831 com->uscsi_cdb = cdb; 27832 com->uscsi_cdblen = CDB_GROUP5; 27833 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr; 27834 com->uscsi_buflen = buflen; 27835 com->uscsi_flags = USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ; 27836 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27837 SD_PATH_STANDARD); 27838 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27839 kmem_free(com, sizeof (*com)); 27840 return (rval); 27841 } 27842 27843 27844 /* 27845 * Function: sr_read_subchannel() 27846 * 27847 * Description: This routine is the driver entry point for handling CD-ROM 27848 * ioctl requests to return the Q sub-channel data of the CD 27849 * current position block. (CDROMSUBCHNL) The data includes the 27850 * track number, index number, absolute CD-ROM address (LBA or MSF 27851 * format per the user) , track relative CD-ROM address (LBA or MSF 27852 * format per the user), control data and audio status. 27853 * 27854 * Arguments: dev - the device 'dev_t' 27855 * data - pointer to user provided cdrom sub-channel structure 27856 * flag - this argument is a pass through to ddi_copyxxx() 27857 * directly from the mode argument of ioctl(). 27858 * 27859 * Return Code: the code returned by sd_send_scsi_cmd() 27860 * EFAULT if ddi_copyxxx() fails 27861 * ENXIO if fail ddi_get_soft_state 27862 * EINVAL if data pointer is NULL 27863 */ 27864 27865 static int 27866 sr_read_subchannel(dev_t dev, caddr_t data, int flag) 27867 { 27868 struct sd_lun *un; 27869 struct uscsi_cmd *com; 27870 struct cdrom_subchnl subchanel; 27871 struct cdrom_subchnl *subchnl = &subchanel; 27872 char cdb[CDB_GROUP1]; 27873 caddr_t buffer; 27874 int rval; 27875 27876 if (data == NULL) { 27877 return (EINVAL); 27878 } 27879 27880 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27881 (un->un_state == SD_STATE_OFFLINE)) { 27882 return (ENXIO); 27883 } 27884 27885 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) { 27886 return (EFAULT); 27887 } 27888 27889 buffer = kmem_zalloc((size_t)16, KM_SLEEP); 27890 bzero(cdb, CDB_GROUP1); 27891 cdb[0] = SCMD_READ_SUBCHANNEL; 27892 /* Set the MSF bit based on the user requested address format */ 27893 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02; 27894 /* 27895 * Set the Q bit in byte 2 to indicate that Q sub-channel data be 27896 * returned 27897 */ 27898 cdb[2] = 0x40; 27899 /* 27900 * Set byte 3 to specify the return data format. A value of 0x01 27901 * indicates that the CD-ROM current position should be returned. 27902 */ 27903 cdb[3] = 0x01; 27904 cdb[8] = 0x10; 27905 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27906 com->uscsi_cdb = cdb; 27907 com->uscsi_cdblen = CDB_GROUP1; 27908 com->uscsi_bufaddr = buffer; 27909 com->uscsi_buflen = 16; 27910 com->uscsi_flags = USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ; 27911 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 27912 SD_PATH_STANDARD); 27913 if (rval != 0) { 27914 kmem_free(buffer, 16); 27915 kmem_free(com, sizeof (*com)); 27916 return (rval); 27917 } 27918 27919 /* Process the returned Q sub-channel data */ 27920 subchnl->cdsc_audiostatus = buffer[1]; 27921 subchnl->cdsc_adr = (buffer[5] & 0xF0) >> 4; 27922 subchnl->cdsc_ctrl = (buffer[5] & 0x0F); 27923 subchnl->cdsc_trk = buffer[6]; 27924 subchnl->cdsc_ind = buffer[7]; 27925 if (subchnl->cdsc_format & CDROM_LBA) { 27926 subchnl->cdsc_absaddr.lba = 27927 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 27928 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 27929 subchnl->cdsc_reladdr.lba = 27930 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) + 27931 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]); 27932 } else if (un->un_f_cfg_readsub_bcd == TRUE) { 27933 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]); 27934 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]); 27935 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]); 27936 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]); 27937 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]); 27938 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]); 27939 } else { 27940 subchnl->cdsc_absaddr.msf.minute = buffer[9]; 27941 subchnl->cdsc_absaddr.msf.second = buffer[10]; 27942 subchnl->cdsc_absaddr.msf.frame = buffer[11]; 27943 subchnl->cdsc_reladdr.msf.minute = buffer[13]; 27944 subchnl->cdsc_reladdr.msf.second = buffer[14]; 27945 subchnl->cdsc_reladdr.msf.frame = buffer[15]; 27946 } 27947 kmem_free(buffer, 16); 27948 kmem_free(com, sizeof (*com)); 27949 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag) 27950 != 0) { 27951 return (EFAULT); 27952 } 27953 return (rval); 27954 } 27955 27956 27957 /* 27958 * Function: sr_read_tocentry() 27959 * 27960 * Description: This routine is the driver entry point for handling CD-ROM 27961 * ioctl requests to read from the Table of Contents (TOC) 27962 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL 27963 * fields, the starting address (LBA or MSF format per the user) 27964 * and the data mode if the user specified track is a data track. 27965 * 27966 * Note: The READ HEADER (0x44) command used in this routine is 27967 * obsolete per the SCSI MMC spec but still supported in the 27968 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 27969 * therefore the command is still implemented in this routine. 27970 * 27971 * Arguments: dev - the device 'dev_t' 27972 * data - pointer to user provided toc entry structure, 27973 * specifying the track # and the address format 27974 * (LBA or MSF). 27975 * flag - this argument is a pass through to ddi_copyxxx() 27976 * directly from the mode argument of ioctl(). 27977 * 27978 * Return Code: the code returned by sd_send_scsi_cmd() 27979 * EFAULT if ddi_copyxxx() fails 27980 * ENXIO if fail ddi_get_soft_state 27981 * EINVAL if data pointer is NULL 27982 */ 27983 27984 static int 27985 sr_read_tocentry(dev_t dev, caddr_t data, int flag) 27986 { 27987 struct sd_lun *un = NULL; 27988 struct uscsi_cmd *com; 27989 struct cdrom_tocentry toc_entry; 27990 struct cdrom_tocentry *entry = &toc_entry; 27991 caddr_t buffer; 27992 int rval; 27993 char cdb[CDB_GROUP1]; 27994 27995 if (data == NULL) { 27996 return (EINVAL); 27997 } 27998 27999 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28000 (un->un_state == SD_STATE_OFFLINE)) { 28001 return (ENXIO); 28002 } 28003 28004 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) { 28005 return (EFAULT); 28006 } 28007 28008 /* Validate the requested track and address format */ 28009 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) { 28010 return (EINVAL); 28011 } 28012 28013 if (entry->cdte_track == 0) { 28014 return (EINVAL); 28015 } 28016 28017 buffer = kmem_zalloc((size_t)12, KM_SLEEP); 28018 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28019 bzero(cdb, CDB_GROUP1); 28020 28021 cdb[0] = SCMD_READ_TOC; 28022 /* Set the MSF bit based on the user requested address format */ 28023 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2); 28024 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 28025 cdb[6] = BYTE_TO_BCD(entry->cdte_track); 28026 } else { 28027 cdb[6] = entry->cdte_track; 28028 } 28029 28030 /* 28031 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 28032 * (4 byte TOC response header + 8 byte track descriptor) 28033 */ 28034 cdb[8] = 12; 28035 com->uscsi_cdb = cdb; 28036 com->uscsi_cdblen = CDB_GROUP1; 28037 com->uscsi_bufaddr = buffer; 28038 com->uscsi_buflen = 0x0C; 28039 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ); 28040 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 28041 SD_PATH_STANDARD); 28042 if (rval != 0) { 28043 kmem_free(buffer, 12); 28044 kmem_free(com, sizeof (*com)); 28045 return (rval); 28046 } 28047 28048 /* Process the toc entry */ 28049 entry->cdte_adr = (buffer[5] & 0xF0) >> 4; 28050 entry->cdte_ctrl = (buffer[5] & 0x0F); 28051 if (entry->cdte_format & CDROM_LBA) { 28052 entry->cdte_addr.lba = 28053 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 28054 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 28055 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) { 28056 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]); 28057 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]); 28058 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]); 28059 /* 28060 * Send a READ TOC command using the LBA address format to get 28061 * the LBA for the track requested so it can be used in the 28062 * READ HEADER request 28063 * 28064 * Note: The MSF bit of the READ HEADER command specifies the 28065 * output format. The block address specified in that command 28066 * must be in LBA format. 28067 */ 28068 cdb[1] = 0; 28069 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 28070 SD_PATH_STANDARD); 28071 if (rval != 0) { 28072 kmem_free(buffer, 12); 28073 kmem_free(com, sizeof (*com)); 28074 return (rval); 28075 } 28076 } else { 28077 entry->cdte_addr.msf.minute = buffer[9]; 28078 entry->cdte_addr.msf.second = buffer[10]; 28079 entry->cdte_addr.msf.frame = buffer[11]; 28080 /* 28081 * Send a READ TOC command using the LBA address format to get 28082 * the LBA for the track requested so it can be used in the 28083 * READ HEADER request 28084 * 28085 * Note: The MSF bit of the READ HEADER command specifies the 28086 * output format. The block address specified in that command 28087 * must be in LBA format. 28088 */ 28089 cdb[1] = 0; 28090 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 28091 SD_PATH_STANDARD); 28092 if (rval != 0) { 28093 kmem_free(buffer, 12); 28094 kmem_free(com, sizeof (*com)); 28095 return (rval); 28096 } 28097 } 28098 28099 /* 28100 * Build and send the READ HEADER command to determine the data mode of 28101 * the user specified track. 28102 */ 28103 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) && 28104 (entry->cdte_track != CDROM_LEADOUT)) { 28105 bzero(cdb, CDB_GROUP1); 28106 cdb[0] = SCMD_READ_HEADER; 28107 cdb[2] = buffer[8]; 28108 cdb[3] = buffer[9]; 28109 cdb[4] = buffer[10]; 28110 cdb[5] = buffer[11]; 28111 cdb[8] = 0x08; 28112 com->uscsi_buflen = 0x08; 28113 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 28114 SD_PATH_STANDARD); 28115 if (rval == 0) { 28116 entry->cdte_datamode = buffer[0]; 28117 } else { 28118 /* 28119 * READ HEADER command failed, since this is 28120 * obsoleted in one spec, its better to return 28121 * -1 for an invlid track so that we can still 28122 * receive the rest of the TOC data. 28123 */ 28124 entry->cdte_datamode = (uchar_t)-1; 28125 } 28126 } else { 28127 entry->cdte_datamode = (uchar_t)-1; 28128 } 28129 28130 kmem_free(buffer, 12); 28131 kmem_free(com, sizeof (*com)); 28132 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0) 28133 return (EFAULT); 28134 28135 return (rval); 28136 } 28137 28138 28139 /* 28140 * Function: sr_read_tochdr() 28141 * 28142 * Description: This routine is the driver entry point for handling CD-ROM 28143 * ioctl requests to read the Table of Contents (TOC) header 28144 * (CDROMREADTOHDR). The TOC header consists of the disk starting 28145 * and ending track numbers 28146 * 28147 * Arguments: dev - the device 'dev_t' 28148 * data - pointer to user provided toc header structure, 28149 * specifying the starting and ending track numbers. 28150 * flag - this argument is a pass through to ddi_copyxxx() 28151 * directly from the mode argument of ioctl(). 28152 * 28153 * Return Code: the code returned by sd_send_scsi_cmd() 28154 * EFAULT if ddi_copyxxx() fails 28155 * ENXIO if fail ddi_get_soft_state 28156 * EINVAL if data pointer is NULL 28157 */ 28158 28159 static int 28160 sr_read_tochdr(dev_t dev, caddr_t data, int flag) 28161 { 28162 struct sd_lun *un; 28163 struct uscsi_cmd *com; 28164 struct cdrom_tochdr toc_header; 28165 struct cdrom_tochdr *hdr = &toc_header; 28166 char cdb[CDB_GROUP1]; 28167 int rval; 28168 caddr_t buffer; 28169 28170 if (data == NULL) { 28171 return (EINVAL); 28172 } 28173 28174 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28175 (un->un_state == SD_STATE_OFFLINE)) { 28176 return (ENXIO); 28177 } 28178 28179 buffer = kmem_zalloc(4, KM_SLEEP); 28180 bzero(cdb, CDB_GROUP1); 28181 cdb[0] = SCMD_READ_TOC; 28182 /* 28183 * Specifying a track number of 0x00 in the READ TOC command indicates 28184 * that the TOC header should be returned 28185 */ 28186 cdb[6] = 0x00; 28187 /* 28188 * Bytes 7 & 8 are the 4 byte allocation length for TOC header. 28189 * (2 byte data len + 1 byte starting track # + 1 byte ending track #) 28190 */ 28191 cdb[8] = 0x04; 28192 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28193 com->uscsi_cdb = cdb; 28194 com->uscsi_cdblen = CDB_GROUP1; 28195 com->uscsi_bufaddr = buffer; 28196 com->uscsi_buflen = 0x04; 28197 com->uscsi_timeout = 300; 28198 com->uscsi_flags = USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ; 28199 28200 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 28201 SD_PATH_STANDARD); 28202 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 28203 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]); 28204 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]); 28205 } else { 28206 hdr->cdth_trk0 = buffer[2]; 28207 hdr->cdth_trk1 = buffer[3]; 28208 } 28209 kmem_free(buffer, 4); 28210 kmem_free(com, sizeof (*com)); 28211 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) { 28212 return (EFAULT); 28213 } 28214 return (rval); 28215 } 28216 28217 28218 /* 28219 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(), 28220 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for 28221 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data, 28222 * digital audio and extended architecture digital audio. These modes are 28223 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3 28224 * MMC specs. 28225 * 28226 * In addition to support for the various data formats these routines also 28227 * include support for devices that implement only the direct access READ 28228 * commands (0x08, 0x28), devices that implement the READ_CD commands 28229 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and 28230 * READ CDXA commands (0xD8, 0xDB) 28231 */ 28232 28233 /* 28234 * Function: sr_read_mode1() 28235 * 28236 * Description: This routine is the driver entry point for handling CD-ROM 28237 * ioctl read mode1 requests (CDROMREADMODE1). 28238 * 28239 * Arguments: dev - the device 'dev_t' 28240 * data - pointer to user provided cd read structure specifying 28241 * the lba buffer address and length. 28242 * flag - this argument is a pass through to ddi_copyxxx() 28243 * directly from the mode argument of ioctl(). 28244 * 28245 * Return Code: the code returned by sd_send_scsi_cmd() 28246 * EFAULT if ddi_copyxxx() fails 28247 * ENXIO if fail ddi_get_soft_state 28248 * EINVAL if data pointer is NULL 28249 */ 28250 28251 static int 28252 sr_read_mode1(dev_t dev, caddr_t data, int flag) 28253 { 28254 struct sd_lun *un; 28255 struct cdrom_read mode1_struct; 28256 struct cdrom_read *mode1 = &mode1_struct; 28257 int rval; 28258 sd_ssc_t *ssc; 28259 28260 #ifdef _MULTI_DATAMODEL 28261 /* To support ILP32 applications in an LP64 world */ 28262 struct cdrom_read32 cdrom_read32; 28263 struct cdrom_read32 *cdrd32 = &cdrom_read32; 28264 #endif /* _MULTI_DATAMODEL */ 28265 28266 if (data == NULL) { 28267 return (EINVAL); 28268 } 28269 28270 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28271 (un->un_state == SD_STATE_OFFLINE)) { 28272 return (ENXIO); 28273 } 28274 28275 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 28276 "sd_read_mode1: entry: un:0x%p\n", un); 28277 28278 #ifdef _MULTI_DATAMODEL 28279 switch (ddi_model_convert_from(flag & FMODELS)) { 28280 case DDI_MODEL_ILP32: 28281 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 28282 return (EFAULT); 28283 } 28284 /* Convert the ILP32 uscsi data from the application to LP64 */ 28285 cdrom_read32tocdrom_read(cdrd32, mode1); 28286 break; 28287 case DDI_MODEL_NONE: 28288 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 28289 return (EFAULT); 28290 } 28291 } 28292 #else /* ! _MULTI_DATAMODEL */ 28293 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 28294 return (EFAULT); 28295 } 28296 #endif /* _MULTI_DATAMODEL */ 28297 28298 ssc = sd_ssc_init(un); 28299 rval = sd_send_scsi_READ(ssc, mode1->cdread_bufaddr, 28300 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD); 28301 sd_ssc_fini(ssc); 28302 28303 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 28304 "sd_read_mode1: exit: un:0x%p\n", un); 28305 28306 return (rval); 28307 } 28308 28309 28310 /* 28311 * Function: sr_read_cd_mode2() 28312 * 28313 * Description: This routine is the driver entry point for handling CD-ROM 28314 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 28315 * support the READ CD (0xBE) command or the 1st generation 28316 * READ CD (0xD4) command. 28317 * 28318 * Arguments: dev - the device 'dev_t' 28319 * data - pointer to user provided cd read structure specifying 28320 * the lba buffer address and length. 28321 * flag - this argument is a pass through to ddi_copyxxx() 28322 * directly from the mode argument of ioctl(). 28323 * 28324 * Return Code: the code returned by sd_send_scsi_cmd() 28325 * EFAULT if ddi_copyxxx() fails 28326 * ENXIO if fail ddi_get_soft_state 28327 * EINVAL if data pointer is NULL 28328 */ 28329 28330 static int 28331 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag) 28332 { 28333 struct sd_lun *un; 28334 struct uscsi_cmd *com; 28335 struct cdrom_read mode2_struct; 28336 struct cdrom_read *mode2 = &mode2_struct; 28337 uchar_t cdb[CDB_GROUP5]; 28338 int nblocks; 28339 int rval; 28340 #ifdef _MULTI_DATAMODEL 28341 /* To support ILP32 applications in an LP64 world */ 28342 struct cdrom_read32 cdrom_read32; 28343 struct cdrom_read32 *cdrd32 = &cdrom_read32; 28344 #endif /* _MULTI_DATAMODEL */ 28345 28346 if (data == NULL) { 28347 return (EINVAL); 28348 } 28349 28350 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28351 (un->un_state == SD_STATE_OFFLINE)) { 28352 return (ENXIO); 28353 } 28354 28355 #ifdef _MULTI_DATAMODEL 28356 switch (ddi_model_convert_from(flag & FMODELS)) { 28357 case DDI_MODEL_ILP32: 28358 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 28359 return (EFAULT); 28360 } 28361 /* Convert the ILP32 uscsi data from the application to LP64 */ 28362 cdrom_read32tocdrom_read(cdrd32, mode2); 28363 break; 28364 case DDI_MODEL_NONE: 28365 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 28366 return (EFAULT); 28367 } 28368 break; 28369 } 28370 28371 #else /* ! _MULTI_DATAMODEL */ 28372 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 28373 return (EFAULT); 28374 } 28375 #endif /* _MULTI_DATAMODEL */ 28376 28377 bzero(cdb, sizeof (cdb)); 28378 if (un->un_f_cfg_read_cd_xd4 == TRUE) { 28379 /* Read command supported by 1st generation atapi drives */ 28380 cdb[0] = SCMD_READ_CDD4; 28381 } else { 28382 /* Universal CD Access Command */ 28383 cdb[0] = SCMD_READ_CD; 28384 } 28385 28386 /* 28387 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book 28388 */ 28389 cdb[1] = CDROM_SECTOR_TYPE_MODE2; 28390 28391 /* set the start address */ 28392 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF); 28393 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF); 28394 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 28395 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF); 28396 28397 /* set the transfer length */ 28398 nblocks = mode2->cdread_buflen / 2336; 28399 cdb[6] = (uchar_t)(nblocks >> 16); 28400 cdb[7] = (uchar_t)(nblocks >> 8); 28401 cdb[8] = (uchar_t)nblocks; 28402 28403 /* set the filter bits */ 28404 cdb[9] = CDROM_READ_CD_USERDATA; 28405 28406 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28407 com->uscsi_cdb = (caddr_t)cdb; 28408 com->uscsi_cdblen = sizeof (cdb); 28409 com->uscsi_bufaddr = mode2->cdread_bufaddr; 28410 com->uscsi_buflen = mode2->cdread_buflen; 28411 com->uscsi_flags = USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ; 28412 28413 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 28414 SD_PATH_STANDARD); 28415 kmem_free(com, sizeof (*com)); 28416 return (rval); 28417 } 28418 28419 28420 /* 28421 * Function: sr_read_mode2() 28422 * 28423 * Description: This routine is the driver entry point for handling CD-ROM 28424 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 28425 * do not support the READ CD (0xBE) command. 28426 * 28427 * Arguments: dev - the device 'dev_t' 28428 * data - pointer to user provided cd read structure specifying 28429 * the lba buffer address and length. 28430 * flag - this argument is a pass through to ddi_copyxxx() 28431 * directly from the mode argument of ioctl(). 28432 * 28433 * Return Code: the code returned by sd_send_scsi_cmd() 28434 * EFAULT if ddi_copyxxx() fails 28435 * ENXIO if fail ddi_get_soft_state 28436 * EINVAL if data pointer is NULL 28437 * EIO if fail to reset block size 28438 * EAGAIN if commands are in progress in the driver 28439 */ 28440 28441 static int 28442 sr_read_mode2(dev_t dev, caddr_t data, int flag) 28443 { 28444 struct sd_lun *un; 28445 struct cdrom_read mode2_struct; 28446 struct cdrom_read *mode2 = &mode2_struct; 28447 int rval; 28448 uint32_t restore_blksize; 28449 struct uscsi_cmd *com; 28450 uchar_t cdb[CDB_GROUP0]; 28451 int nblocks; 28452 28453 #ifdef _MULTI_DATAMODEL 28454 /* To support ILP32 applications in an LP64 world */ 28455 struct cdrom_read32 cdrom_read32; 28456 struct cdrom_read32 *cdrd32 = &cdrom_read32; 28457 #endif /* _MULTI_DATAMODEL */ 28458 28459 if (data == NULL) { 28460 return (EINVAL); 28461 } 28462 28463 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28464 (un->un_state == SD_STATE_OFFLINE)) { 28465 return (ENXIO); 28466 } 28467 28468 /* 28469 * Because this routine will update the device and driver block size 28470 * being used we want to make sure there are no commands in progress. 28471 * If commands are in progress the user will have to try again. 28472 * 28473 * We check for 1 instead of 0 because we increment un_ncmds_in_driver 28474 * in sdioctl to protect commands from sdioctl through to the top of 28475 * sd_uscsi_strategy. See sdioctl for details. 28476 */ 28477 mutex_enter(SD_MUTEX(un)); 28478 if (un->un_ncmds_in_driver != 1) { 28479 mutex_exit(SD_MUTEX(un)); 28480 return (EAGAIN); 28481 } 28482 mutex_exit(SD_MUTEX(un)); 28483 28484 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 28485 "sd_read_mode2: entry: un:0x%p\n", un); 28486 28487 #ifdef _MULTI_DATAMODEL 28488 switch (ddi_model_convert_from(flag & FMODELS)) { 28489 case DDI_MODEL_ILP32: 28490 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 28491 return (EFAULT); 28492 } 28493 /* Convert the ILP32 uscsi data from the application to LP64 */ 28494 cdrom_read32tocdrom_read(cdrd32, mode2); 28495 break; 28496 case DDI_MODEL_NONE: 28497 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 28498 return (EFAULT); 28499 } 28500 break; 28501 } 28502 #else /* ! _MULTI_DATAMODEL */ 28503 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) { 28504 return (EFAULT); 28505 } 28506 #endif /* _MULTI_DATAMODEL */ 28507 28508 /* Store the current target block size for restoration later */ 28509 restore_blksize = un->un_tgt_blocksize; 28510 28511 /* Change the device and soft state target block size to 2336 */ 28512 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) { 28513 rval = EIO; 28514 goto done; 28515 } 28516 28517 28518 bzero(cdb, sizeof (cdb)); 28519 28520 /* set READ operation */ 28521 cdb[0] = SCMD_READ; 28522 28523 /* adjust lba for 2kbyte blocks from 512 byte blocks */ 28524 mode2->cdread_lba >>= 2; 28525 28526 /* set the start address */ 28527 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F); 28528 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 28529 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF); 28530 28531 /* set the transfer length */ 28532 nblocks = mode2->cdread_buflen / 2336; 28533 cdb[4] = (uchar_t)nblocks & 0xFF; 28534 28535 /* build command */ 28536 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28537 com->uscsi_cdb = (caddr_t)cdb; 28538 com->uscsi_cdblen = sizeof (cdb); 28539 com->uscsi_bufaddr = mode2->cdread_bufaddr; 28540 com->uscsi_buflen = mode2->cdread_buflen; 28541 com->uscsi_flags = USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ; 28542 28543 /* 28544 * Issue SCSI command with user space address for read buffer. 28545 * 28546 * This sends the command through main channel in the driver. 28547 * 28548 * Since this is accessed via an IOCTL call, we go through the 28549 * standard path, so that if the device was powered down, then 28550 * it would be 'awakened' to handle the command. 28551 */ 28552 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 28553 SD_PATH_STANDARD); 28554 28555 kmem_free(com, sizeof (*com)); 28556 28557 /* Restore the device and soft state target block size */ 28558 if (sr_sector_mode(dev, restore_blksize) != 0) { 28559 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28560 "can't do switch back to mode 1\n"); 28561 /* 28562 * If sd_send_scsi_READ succeeded we still need to report 28563 * an error because we failed to reset the block size 28564 */ 28565 if (rval == 0) { 28566 rval = EIO; 28567 } 28568 } 28569 28570 done: 28571 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 28572 "sd_read_mode2: exit: un:0x%p\n", un); 28573 28574 return (rval); 28575 } 28576 28577 28578 /* 28579 * Function: sr_sector_mode() 28580 * 28581 * Description: This utility function is used by sr_read_mode2 to set the target 28582 * block size based on the user specified size. This is a legacy 28583 * implementation based upon a vendor specific mode page 28584 * 28585 * Arguments: dev - the device 'dev_t' 28586 * data - flag indicating if block size is being set to 2336 or 28587 * 512. 28588 * 28589 * Return Code: the code returned by sd_send_scsi_cmd() 28590 * EFAULT if ddi_copyxxx() fails 28591 * ENXIO if fail ddi_get_soft_state 28592 * EINVAL if data pointer is NULL 28593 */ 28594 28595 static int 28596 sr_sector_mode(dev_t dev, uint32_t blksize) 28597 { 28598 struct sd_lun *un; 28599 uchar_t *sense; 28600 uchar_t *select; 28601 int rval; 28602 sd_ssc_t *ssc; 28603 28604 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28605 (un->un_state == SD_STATE_OFFLINE)) { 28606 return (ENXIO); 28607 } 28608 28609 sense = kmem_zalloc(20, KM_SLEEP); 28610 28611 /* Note: This is a vendor specific mode page (0x81) */ 28612 ssc = sd_ssc_init(un); 28613 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 20, 0x81, 28614 SD_PATH_STANDARD); 28615 sd_ssc_fini(ssc); 28616 if (rval != 0) { 28617 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 28618 "sr_sector_mode: Mode Sense failed\n"); 28619 kmem_free(sense, 20); 28620 return (rval); 28621 } 28622 select = kmem_zalloc(20, KM_SLEEP); 28623 select[3] = 0x08; 28624 select[10] = ((blksize >> 8) & 0xff); 28625 select[11] = (blksize & 0xff); 28626 select[12] = 0x01; 28627 select[13] = 0x06; 28628 select[14] = sense[14]; 28629 select[15] = sense[15]; 28630 if (blksize == SD_MODE2_BLKSIZE) { 28631 select[14] |= 0x01; 28632 } 28633 28634 ssc = sd_ssc_init(un); 28635 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 20, 28636 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 28637 sd_ssc_fini(ssc); 28638 if (rval != 0) { 28639 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 28640 "sr_sector_mode: Mode Select failed\n"); 28641 } else { 28642 /* 28643 * Only update the softstate block size if we successfully 28644 * changed the device block mode. 28645 */ 28646 mutex_enter(SD_MUTEX(un)); 28647 sd_update_block_info(un, blksize, 0); 28648 mutex_exit(SD_MUTEX(un)); 28649 } 28650 kmem_free(sense, 20); 28651 kmem_free(select, 20); 28652 return (rval); 28653 } 28654 28655 28656 /* 28657 * Function: sr_read_cdda() 28658 * 28659 * Description: This routine is the driver entry point for handling CD-ROM 28660 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If 28661 * the target supports CDDA these requests are handled via a vendor 28662 * specific command (0xD8) If the target does not support CDDA 28663 * these requests are handled via the READ CD command (0xBE). 28664 * 28665 * Arguments: dev - the device 'dev_t' 28666 * data - pointer to user provided CD-DA structure specifying 28667 * the track starting address, transfer length, and 28668 * subcode options. 28669 * flag - this argument is a pass through to ddi_copyxxx() 28670 * directly from the mode argument of ioctl(). 28671 * 28672 * Return Code: the code returned by sd_send_scsi_cmd() 28673 * EFAULT if ddi_copyxxx() fails 28674 * ENXIO if fail ddi_get_soft_state 28675 * EINVAL if invalid arguments are provided 28676 * ENOTTY 28677 */ 28678 28679 static int 28680 sr_read_cdda(dev_t dev, caddr_t data, int flag) 28681 { 28682 struct sd_lun *un; 28683 struct uscsi_cmd *com; 28684 struct cdrom_cdda *cdda; 28685 int rval; 28686 size_t buflen; 28687 char cdb[CDB_GROUP5]; 28688 28689 #ifdef _MULTI_DATAMODEL 28690 /* To support ILP32 applications in an LP64 world */ 28691 struct cdrom_cdda32 cdrom_cdda32; 28692 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32; 28693 #endif /* _MULTI_DATAMODEL */ 28694 28695 if (data == NULL) { 28696 return (EINVAL); 28697 } 28698 28699 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28700 return (ENXIO); 28701 } 28702 28703 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP); 28704 28705 #ifdef _MULTI_DATAMODEL 28706 switch (ddi_model_convert_from(flag & FMODELS)) { 28707 case DDI_MODEL_ILP32: 28708 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) { 28709 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28710 "sr_read_cdda: ddi_copyin Failed\n"); 28711 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28712 return (EFAULT); 28713 } 28714 /* Convert the ILP32 uscsi data from the application to LP64 */ 28715 cdrom_cdda32tocdrom_cdda(cdda32, cdda); 28716 break; 28717 case DDI_MODEL_NONE: 28718 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 28719 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28720 "sr_read_cdda: ddi_copyin Failed\n"); 28721 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28722 return (EFAULT); 28723 } 28724 break; 28725 } 28726 #else /* ! _MULTI_DATAMODEL */ 28727 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 28728 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28729 "sr_read_cdda: ddi_copyin Failed\n"); 28730 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28731 return (EFAULT); 28732 } 28733 #endif /* _MULTI_DATAMODEL */ 28734 28735 /* 28736 * Since MMC-2 expects max 3 bytes for length, check if the 28737 * length input is greater than 3 bytes 28738 */ 28739 if ((cdda->cdda_length & 0xFF000000) != 0) { 28740 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: " 28741 "cdrom transfer length too large: %d (limit %d)\n", 28742 cdda->cdda_length, 0xFFFFFF); 28743 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28744 return (EINVAL); 28745 } 28746 28747 switch (cdda->cdda_subcode) { 28748 case CDROM_DA_NO_SUBCODE: 28749 buflen = CDROM_BLK_2352 * cdda->cdda_length; 28750 break; 28751 case CDROM_DA_SUBQ: 28752 buflen = CDROM_BLK_2368 * cdda->cdda_length; 28753 break; 28754 case CDROM_DA_ALL_SUBCODE: 28755 buflen = CDROM_BLK_2448 * cdda->cdda_length; 28756 break; 28757 case CDROM_DA_SUBCODE_ONLY: 28758 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length; 28759 break; 28760 default: 28761 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28762 "sr_read_cdda: Subcode '0x%x' Not Supported\n", 28763 cdda->cdda_subcode); 28764 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28765 return (EINVAL); 28766 } 28767 28768 /* Build and send the command */ 28769 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28770 bzero(cdb, CDB_GROUP5); 28771 28772 if (un->un_f_cfg_cdda == TRUE) { 28773 cdb[0] = (char)SCMD_READ_CD; 28774 cdb[1] = 0x04; 28775 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 28776 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 28777 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 28778 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 28779 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 28780 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 28781 cdb[8] = ((cdda->cdda_length) & 0x000000ff); 28782 cdb[9] = 0x10; 28783 switch (cdda->cdda_subcode) { 28784 case CDROM_DA_NO_SUBCODE : 28785 cdb[10] = 0x0; 28786 break; 28787 case CDROM_DA_SUBQ : 28788 cdb[10] = 0x2; 28789 break; 28790 case CDROM_DA_ALL_SUBCODE : 28791 cdb[10] = 0x1; 28792 break; 28793 case CDROM_DA_SUBCODE_ONLY : 28794 /* FALLTHROUGH */ 28795 default : 28796 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28797 kmem_free(com, sizeof (*com)); 28798 return (ENOTTY); 28799 } 28800 } else { 28801 cdb[0] = (char)SCMD_READ_CDDA; 28802 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 28803 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 28804 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 28805 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 28806 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24); 28807 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 28808 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 28809 cdb[9] = ((cdda->cdda_length) & 0x000000ff); 28810 cdb[10] = cdda->cdda_subcode; 28811 } 28812 28813 com->uscsi_cdb = cdb; 28814 com->uscsi_cdblen = CDB_GROUP5; 28815 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data; 28816 com->uscsi_buflen = buflen; 28817 com->uscsi_flags = USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ; 28818 28819 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 28820 SD_PATH_STANDARD); 28821 28822 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28823 kmem_free(com, sizeof (*com)); 28824 return (rval); 28825 } 28826 28827 28828 /* 28829 * Function: sr_read_cdxa() 28830 * 28831 * Description: This routine is the driver entry point for handling CD-ROM 28832 * ioctl requests to return CD-XA (Extended Architecture) data. 28833 * (CDROMCDXA). 28834 * 28835 * Arguments: dev - the device 'dev_t' 28836 * data - pointer to user provided CD-XA structure specifying 28837 * the data starting address, transfer length, and format 28838 * flag - this argument is a pass through to ddi_copyxxx() 28839 * directly from the mode argument of ioctl(). 28840 * 28841 * Return Code: the code returned by sd_send_scsi_cmd() 28842 * EFAULT if ddi_copyxxx() fails 28843 * ENXIO if fail ddi_get_soft_state 28844 * EINVAL if data pointer is NULL 28845 */ 28846 28847 static int 28848 sr_read_cdxa(dev_t dev, caddr_t data, int flag) 28849 { 28850 struct sd_lun *un; 28851 struct uscsi_cmd *com; 28852 struct cdrom_cdxa *cdxa; 28853 int rval; 28854 size_t buflen; 28855 char cdb[CDB_GROUP5]; 28856 uchar_t read_flags; 28857 28858 #ifdef _MULTI_DATAMODEL 28859 /* To support ILP32 applications in an LP64 world */ 28860 struct cdrom_cdxa32 cdrom_cdxa32; 28861 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32; 28862 #endif /* _MULTI_DATAMODEL */ 28863 28864 if (data == NULL) { 28865 return (EINVAL); 28866 } 28867 28868 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28869 return (ENXIO); 28870 } 28871 28872 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP); 28873 28874 #ifdef _MULTI_DATAMODEL 28875 switch (ddi_model_convert_from(flag & FMODELS)) { 28876 case DDI_MODEL_ILP32: 28877 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) { 28878 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28879 return (EFAULT); 28880 } 28881 /* 28882 * Convert the ILP32 uscsi data from the 28883 * application to LP64 for internal use. 28884 */ 28885 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa); 28886 break; 28887 case DDI_MODEL_NONE: 28888 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 28889 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28890 return (EFAULT); 28891 } 28892 break; 28893 } 28894 #else /* ! _MULTI_DATAMODEL */ 28895 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 28896 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28897 return (EFAULT); 28898 } 28899 #endif /* _MULTI_DATAMODEL */ 28900 28901 /* 28902 * Since MMC-2 expects max 3 bytes for length, check if the 28903 * length input is greater than 3 bytes 28904 */ 28905 if ((cdxa->cdxa_length & 0xFF000000) != 0) { 28906 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: " 28907 "cdrom transfer length too large: %d (limit %d)\n", 28908 cdxa->cdxa_length, 0xFFFFFF); 28909 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28910 return (EINVAL); 28911 } 28912 28913 switch (cdxa->cdxa_format) { 28914 case CDROM_XA_DATA: 28915 buflen = CDROM_BLK_2048 * cdxa->cdxa_length; 28916 read_flags = 0x10; 28917 break; 28918 case CDROM_XA_SECTOR_DATA: 28919 buflen = CDROM_BLK_2352 * cdxa->cdxa_length; 28920 read_flags = 0xf8; 28921 break; 28922 case CDROM_XA_DATA_W_ERROR: 28923 buflen = CDROM_BLK_2646 * cdxa->cdxa_length; 28924 read_flags = 0xfc; 28925 break; 28926 default: 28927 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28928 "sr_read_cdxa: Format '0x%x' Not Supported\n", 28929 cdxa->cdxa_format); 28930 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28931 return (EINVAL); 28932 } 28933 28934 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28935 bzero(cdb, CDB_GROUP5); 28936 if (un->un_f_mmc_cap == TRUE) { 28937 cdb[0] = (char)SCMD_READ_CD; 28938 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 28939 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 28940 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 28941 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 28942 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 28943 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 28944 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff); 28945 cdb[9] = (char)read_flags; 28946 } else { 28947 /* 28948 * Note: A vendor specific command (0xDB) is being used her to 28949 * request a read of all subcodes. 28950 */ 28951 cdb[0] = (char)SCMD_READ_CDXA; 28952 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 28953 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 28954 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 28955 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 28956 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24); 28957 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 28958 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 28959 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff); 28960 cdb[10] = cdxa->cdxa_format; 28961 } 28962 com->uscsi_cdb = cdb; 28963 com->uscsi_cdblen = CDB_GROUP5; 28964 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data; 28965 com->uscsi_buflen = buflen; 28966 com->uscsi_flags = USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ; 28967 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 28968 SD_PATH_STANDARD); 28969 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28970 kmem_free(com, sizeof (*com)); 28971 return (rval); 28972 } 28973 28974 28975 /* 28976 * Function: sr_eject() 28977 * 28978 * Description: This routine is the driver entry point for handling CD-ROM 28979 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT) 28980 * 28981 * Arguments: dev - the device 'dev_t' 28982 * 28983 * Return Code: the code returned by sd_send_scsi_cmd() 28984 */ 28985 28986 static int 28987 sr_eject(dev_t dev) 28988 { 28989 struct sd_lun *un; 28990 int rval; 28991 sd_ssc_t *ssc; 28992 28993 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28994 (un->un_state == SD_STATE_OFFLINE)) { 28995 return (ENXIO); 28996 } 28997 28998 /* 28999 * To prevent race conditions with the eject 29000 * command, keep track of an eject command as 29001 * it progresses. If we are already handling 29002 * an eject command in the driver for the given 29003 * unit and another request to eject is received 29004 * immediately return EAGAIN so we don't lose 29005 * the command if the current eject command fails. 29006 */ 29007 mutex_enter(SD_MUTEX(un)); 29008 if (un->un_f_ejecting == TRUE) { 29009 mutex_exit(SD_MUTEX(un)); 29010 return (EAGAIN); 29011 } 29012 un->un_f_ejecting = TRUE; 29013 mutex_exit(SD_MUTEX(un)); 29014 29015 ssc = sd_ssc_init(un); 29016 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW, 29017 SD_PATH_STANDARD); 29018 sd_ssc_fini(ssc); 29019 29020 if (rval != 0) { 29021 mutex_enter(SD_MUTEX(un)); 29022 un->un_f_ejecting = FALSE; 29023 mutex_exit(SD_MUTEX(un)); 29024 return (rval); 29025 } 29026 29027 ssc = sd_ssc_init(un); 29028 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP, 29029 SD_TARGET_EJECT, SD_PATH_STANDARD); 29030 sd_ssc_fini(ssc); 29031 29032 if (rval == 0) { 29033 mutex_enter(SD_MUTEX(un)); 29034 sr_ejected(un); 29035 un->un_mediastate = DKIO_EJECTED; 29036 un->un_f_ejecting = FALSE; 29037 cv_broadcast(&un->un_state_cv); 29038 mutex_exit(SD_MUTEX(un)); 29039 } else { 29040 mutex_enter(SD_MUTEX(un)); 29041 un->un_f_ejecting = FALSE; 29042 mutex_exit(SD_MUTEX(un)); 29043 } 29044 return (rval); 29045 } 29046 29047 29048 /* 29049 * Function: sr_ejected() 29050 * 29051 * Description: This routine updates the soft state structure to invalidate the 29052 * geometry information after the media has been ejected or a 29053 * media eject has been detected. 29054 * 29055 * Arguments: un - driver soft state (unit) structure 29056 */ 29057 29058 static void 29059 sr_ejected(struct sd_lun *un) 29060 { 29061 struct sd_errstats *stp; 29062 29063 ASSERT(un != NULL); 29064 ASSERT(mutex_owned(SD_MUTEX(un))); 29065 29066 un->un_f_blockcount_is_valid = FALSE; 29067 un->un_f_tgt_blocksize_is_valid = FALSE; 29068 mutex_exit(SD_MUTEX(un)); 29069 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 29070 mutex_enter(SD_MUTEX(un)); 29071 29072 if (un->un_errstats != NULL) { 29073 stp = (struct sd_errstats *)un->un_errstats->ks_data; 29074 stp->sd_capacity.value.ui64 = 0; 29075 } 29076 } 29077 29078 29079 /* 29080 * Function: sr_check_wp() 29081 * 29082 * Description: This routine checks the write protection of a removable 29083 * media disk and hotpluggable devices via the write protect bit of 29084 * the Mode Page Header device specific field. Some devices choke 29085 * on unsupported mode page. In order to workaround this issue, 29086 * this routine has been implemented to use 0x3f mode page(request 29087 * for all pages) for all device types. 29088 * 29089 * Arguments: dev - the device 'dev_t' 29090 * 29091 * Return Code: int indicating if the device is write protected (1) or not (0) 29092 * 29093 * Context: Kernel thread. 29094 * 29095 */ 29096 29097 static int 29098 sr_check_wp(dev_t dev) 29099 { 29100 struct sd_lun *un; 29101 uchar_t device_specific; 29102 uchar_t *sense; 29103 int hdrlen; 29104 int rval = FALSE; 29105 int status; 29106 sd_ssc_t *ssc; 29107 29108 /* 29109 * Note: The return codes for this routine should be reworked to 29110 * properly handle the case of a NULL softstate. 29111 */ 29112 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 29113 return (FALSE); 29114 } 29115 29116 if (un->un_f_cfg_is_atapi == TRUE) { 29117 /* 29118 * The mode page contents are not required; set the allocation 29119 * length for the mode page header only 29120 */ 29121 hdrlen = MODE_HEADER_LENGTH_GRP2; 29122 sense = kmem_zalloc(hdrlen, KM_SLEEP); 29123 ssc = sd_ssc_init(un); 29124 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, hdrlen, 29125 MODEPAGE_ALLPAGES, SD_PATH_STANDARD); 29126 sd_ssc_fini(ssc); 29127 if (status != 0) 29128 goto err_exit; 29129 device_specific = 29130 ((struct mode_header_grp2 *)sense)->device_specific; 29131 } else { 29132 hdrlen = MODE_HEADER_LENGTH; 29133 sense = kmem_zalloc(hdrlen, KM_SLEEP); 29134 ssc = sd_ssc_init(un); 29135 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, hdrlen, 29136 MODEPAGE_ALLPAGES, SD_PATH_STANDARD); 29137 sd_ssc_fini(ssc); 29138 if (status != 0) 29139 goto err_exit; 29140 device_specific = 29141 ((struct mode_header *)sense)->device_specific; 29142 } 29143 29144 29145 /* 29146 * Write protect mode sense failed; not all disks 29147 * understand this query. Return FALSE assuming that 29148 * these devices are not writable. 29149 */ 29150 if (device_specific & WRITE_PROTECT) { 29151 rval = TRUE; 29152 } 29153 29154 err_exit: 29155 kmem_free(sense, hdrlen); 29156 return (rval); 29157 } 29158 29159 /* 29160 * Function: sr_volume_ctrl() 29161 * 29162 * Description: This routine is the driver entry point for handling CD-ROM 29163 * audio output volume ioctl requests. (CDROMVOLCTRL) 29164 * 29165 * Arguments: dev - the device 'dev_t' 29166 * data - pointer to user audio volume control structure 29167 * flag - this argument is a pass through to ddi_copyxxx() 29168 * directly from the mode argument of ioctl(). 29169 * 29170 * Return Code: the code returned by sd_send_scsi_cmd() 29171 * EFAULT if ddi_copyxxx() fails 29172 * ENXIO if fail ddi_get_soft_state 29173 * EINVAL if data pointer is NULL 29174 * 29175 */ 29176 29177 static int 29178 sr_volume_ctrl(dev_t dev, caddr_t data, int flag) 29179 { 29180 struct sd_lun *un; 29181 struct cdrom_volctrl volume; 29182 struct cdrom_volctrl *vol = &volume; 29183 uchar_t *sense_page; 29184 uchar_t *select_page; 29185 uchar_t *sense; 29186 uchar_t *select; 29187 int sense_buflen; 29188 int select_buflen; 29189 int rval; 29190 sd_ssc_t *ssc; 29191 29192 if (data == NULL) { 29193 return (EINVAL); 29194 } 29195 29196 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 29197 (un->un_state == SD_STATE_OFFLINE)) { 29198 return (ENXIO); 29199 } 29200 29201 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) { 29202 return (EFAULT); 29203 } 29204 29205 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 29206 struct mode_header_grp2 *sense_mhp; 29207 struct mode_header_grp2 *select_mhp; 29208 int bd_len; 29209 29210 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN; 29211 select_buflen = MODE_HEADER_LENGTH_GRP2 + 29212 MODEPAGE_AUDIO_CTRL_LEN; 29213 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 29214 select = kmem_zalloc(select_buflen, KM_SLEEP); 29215 ssc = sd_ssc_init(un); 29216 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, 29217 sense_buflen, MODEPAGE_AUDIO_CTRL, 29218 SD_PATH_STANDARD); 29219 sd_ssc_fini(ssc); 29220 29221 if (rval != 0) { 29222 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 29223 "sr_volume_ctrl: Mode Sense Failed\n"); 29224 kmem_free(sense, sense_buflen); 29225 kmem_free(select, select_buflen); 29226 return (rval); 29227 } 29228 sense_mhp = (struct mode_header_grp2 *)sense; 29229 select_mhp = (struct mode_header_grp2 *)select; 29230 bd_len = (sense_mhp->bdesc_length_hi << 8) | 29231 sense_mhp->bdesc_length_lo; 29232 if (bd_len > MODE_BLK_DESC_LENGTH) { 29233 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29234 "sr_volume_ctrl: Mode Sense returned invalid " 29235 "block descriptor length\n"); 29236 kmem_free(sense, sense_buflen); 29237 kmem_free(select, select_buflen); 29238 return (EIO); 29239 } 29240 sense_page = (uchar_t *) 29241 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 29242 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2); 29243 select_mhp->length_msb = 0; 29244 select_mhp->length_lsb = 0; 29245 select_mhp->bdesc_length_hi = 0; 29246 select_mhp->bdesc_length_lo = 0; 29247 } else { 29248 struct mode_header *sense_mhp, *select_mhp; 29249 29250 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 29251 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 29252 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 29253 select = kmem_zalloc(select_buflen, KM_SLEEP); 29254 ssc = sd_ssc_init(un); 29255 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 29256 sense_buflen, MODEPAGE_AUDIO_CTRL, 29257 SD_PATH_STANDARD); 29258 sd_ssc_fini(ssc); 29259 29260 if (rval != 0) { 29261 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29262 "sr_volume_ctrl: Mode Sense Failed\n"); 29263 kmem_free(sense, sense_buflen); 29264 kmem_free(select, select_buflen); 29265 return (rval); 29266 } 29267 sense_mhp = (struct mode_header *)sense; 29268 select_mhp = (struct mode_header *)select; 29269 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) { 29270 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29271 "sr_volume_ctrl: Mode Sense returned invalid " 29272 "block descriptor length\n"); 29273 kmem_free(sense, sense_buflen); 29274 kmem_free(select, select_buflen); 29275 return (EIO); 29276 } 29277 sense_page = (uchar_t *) 29278 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 29279 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH); 29280 select_mhp->length = 0; 29281 select_mhp->bdesc_length = 0; 29282 } 29283 /* 29284 * Note: An audio control data structure could be created and overlayed 29285 * on the following in place of the array indexing method implemented. 29286 */ 29287 29288 /* Build the select data for the user volume data */ 29289 select_page[0] = MODEPAGE_AUDIO_CTRL; 29290 select_page[1] = 0xE; 29291 /* Set the immediate bit */ 29292 select_page[2] = 0x04; 29293 /* Zero out reserved fields */ 29294 select_page[3] = 0x00; 29295 select_page[4] = 0x00; 29296 /* Return sense data for fields not to be modified */ 29297 select_page[5] = sense_page[5]; 29298 select_page[6] = sense_page[6]; 29299 select_page[7] = sense_page[7]; 29300 /* Set the user specified volume levels for channel 0 and 1 */ 29301 select_page[8] = 0x01; 29302 select_page[9] = vol->channel0; 29303 select_page[10] = 0x02; 29304 select_page[11] = vol->channel1; 29305 /* Channel 2 and 3 are currently unsupported so return the sense data */ 29306 select_page[12] = sense_page[12]; 29307 select_page[13] = sense_page[13]; 29308 select_page[14] = sense_page[14]; 29309 select_page[15] = sense_page[15]; 29310 29311 ssc = sd_ssc_init(un); 29312 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 29313 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP1, select, 29314 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 29315 } else { 29316 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 29317 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 29318 } 29319 sd_ssc_fini(ssc); 29320 29321 kmem_free(sense, sense_buflen); 29322 kmem_free(select, select_buflen); 29323 return (rval); 29324 } 29325 29326 29327 /* 29328 * Function: sr_read_sony_session_offset() 29329 * 29330 * Description: This routine is the driver entry point for handling CD-ROM 29331 * ioctl requests for session offset information. (CDROMREADOFFSET) 29332 * The address of the first track in the last session of a 29333 * multi-session CD-ROM is returned 29334 * 29335 * Note: This routine uses a vendor specific key value in the 29336 * command control field without implementing any vendor check here 29337 * or in the ioctl routine. 29338 * 29339 * Arguments: dev - the device 'dev_t' 29340 * data - pointer to an int to hold the requested address 29341 * flag - this argument is a pass through to ddi_copyxxx() 29342 * directly from the mode argument of ioctl(). 29343 * 29344 * Return Code: the code returned by sd_send_scsi_cmd() 29345 * EFAULT if ddi_copyxxx() fails 29346 * ENXIO if fail ddi_get_soft_state 29347 * EINVAL if data pointer is NULL 29348 */ 29349 29350 static int 29351 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag) 29352 { 29353 struct sd_lun *un; 29354 struct uscsi_cmd *com; 29355 caddr_t buffer; 29356 char cdb[CDB_GROUP1]; 29357 int session_offset = 0; 29358 int rval; 29359 29360 if (data == NULL) { 29361 return (EINVAL); 29362 } 29363 29364 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 29365 (un->un_state == SD_STATE_OFFLINE)) { 29366 return (ENXIO); 29367 } 29368 29369 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP); 29370 bzero(cdb, CDB_GROUP1); 29371 cdb[0] = SCMD_READ_TOC; 29372 /* 29373 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 29374 * (4 byte TOC response header + 8 byte response data) 29375 */ 29376 cdb[8] = SONY_SESSION_OFFSET_LEN; 29377 /* Byte 9 is the control byte. A vendor specific value is used */ 29378 cdb[9] = SONY_SESSION_OFFSET_KEY; 29379 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 29380 com->uscsi_cdb = cdb; 29381 com->uscsi_cdblen = CDB_GROUP1; 29382 com->uscsi_bufaddr = buffer; 29383 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN; 29384 com->uscsi_flags = USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ; 29385 29386 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 29387 SD_PATH_STANDARD); 29388 if (rval != 0) { 29389 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 29390 kmem_free(com, sizeof (*com)); 29391 return (rval); 29392 } 29393 if (buffer[1] == SONY_SESSION_OFFSET_VALID) { 29394 session_offset = 29395 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 29396 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 29397 /* 29398 * Offset returned offset in current lbasize block's. Convert to 29399 * 2k block's to return to the user 29400 */ 29401 if (un->un_tgt_blocksize == CDROM_BLK_512) { 29402 session_offset >>= 2; 29403 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) { 29404 session_offset >>= 1; 29405 } 29406 } 29407 29408 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) { 29409 rval = EFAULT; 29410 } 29411 29412 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 29413 kmem_free(com, sizeof (*com)); 29414 return (rval); 29415 } 29416 29417 29418 /* 29419 * Function: sd_wm_cache_constructor() 29420 * 29421 * Description: Cache Constructor for the wmap cache for the read/modify/write 29422 * devices. 29423 * 29424 * Arguments: wm - A pointer to the sd_w_map to be initialized. 29425 * un - sd_lun structure for the device. 29426 * flag - the km flags passed to constructor 29427 * 29428 * Return Code: 0 on success. 29429 * -1 on failure. 29430 */ 29431 29432 /*ARGSUSED*/ 29433 static int 29434 sd_wm_cache_constructor(void *wm, void *un, int flags) 29435 { 29436 bzero(wm, sizeof (struct sd_w_map)); 29437 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL); 29438 return (0); 29439 } 29440 29441 29442 /* 29443 * Function: sd_wm_cache_destructor() 29444 * 29445 * Description: Cache destructor for the wmap cache for the read/modify/write 29446 * devices. 29447 * 29448 * Arguments: wm - A pointer to the sd_w_map to be initialized. 29449 * un - sd_lun structure for the device. 29450 */ 29451 /*ARGSUSED*/ 29452 static void 29453 sd_wm_cache_destructor(void *wm, void *un) 29454 { 29455 cv_destroy(&((struct sd_w_map *)wm)->wm_avail); 29456 } 29457 29458 29459 /* 29460 * Function: sd_range_lock() 29461 * 29462 * Description: Lock the range of blocks specified as parameter to ensure 29463 * that read, modify write is atomic and no other i/o writes 29464 * to the same location. The range is specified in terms 29465 * of start and end blocks. Block numbers are the actual 29466 * media block numbers and not system. 29467 * 29468 * Arguments: un - sd_lun structure for the device. 29469 * startb - The starting block number 29470 * endb - The end block number 29471 * typ - type of i/o - simple/read_modify_write 29472 * 29473 * Return Code: wm - pointer to the wmap structure. 29474 * 29475 * Context: This routine can sleep. 29476 */ 29477 29478 static struct sd_w_map * 29479 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ) 29480 { 29481 struct sd_w_map *wmp = NULL; 29482 struct sd_w_map *sl_wmp = NULL; 29483 struct sd_w_map *tmp_wmp; 29484 wm_state state = SD_WM_CHK_LIST; 29485 29486 29487 ASSERT(un != NULL); 29488 ASSERT(!mutex_owned(SD_MUTEX(un))); 29489 29490 mutex_enter(SD_MUTEX(un)); 29491 29492 while (state != SD_WM_DONE) { 29493 29494 switch (state) { 29495 case SD_WM_CHK_LIST: 29496 /* 29497 * This is the starting state. Check the wmap list 29498 * to see if the range is currently available. 29499 */ 29500 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) { 29501 /* 29502 * If this is a simple write and no rmw 29503 * i/o is pending then try to lock the 29504 * range as the range should be available. 29505 */ 29506 state = SD_WM_LOCK_RANGE; 29507 } else { 29508 tmp_wmp = sd_get_range(un, startb, endb); 29509 if (tmp_wmp != NULL) { 29510 if ((wmp != NULL) && ONLIST(un, wmp)) { 29511 /* 29512 * Should not keep onlist wmps 29513 * while waiting this macro 29514 * will also do wmp = NULL; 29515 */ 29516 FREE_ONLIST_WMAP(un, wmp); 29517 } 29518 /* 29519 * sl_wmp is the wmap on which wait 29520 * is done, since the tmp_wmp points 29521 * to the inuse wmap, set sl_wmp to 29522 * tmp_wmp and change the state to sleep 29523 */ 29524 sl_wmp = tmp_wmp; 29525 state = SD_WM_WAIT_MAP; 29526 } else { 29527 state = SD_WM_LOCK_RANGE; 29528 } 29529 29530 } 29531 break; 29532 29533 case SD_WM_LOCK_RANGE: 29534 ASSERT(un->un_wm_cache); 29535 /* 29536 * The range need to be locked, try to get a wmap. 29537 * First attempt it with NO_SLEEP, want to avoid a sleep 29538 * if possible as we will have to release the sd mutex 29539 * if we have to sleep. 29540 */ 29541 if (wmp == NULL) 29542 wmp = kmem_cache_alloc(un->un_wm_cache, 29543 KM_NOSLEEP); 29544 if (wmp == NULL) { 29545 mutex_exit(SD_MUTEX(un)); 29546 _NOTE(DATA_READABLE_WITHOUT_LOCK 29547 (sd_lun::un_wm_cache)) 29548 wmp = kmem_cache_alloc(un->un_wm_cache, 29549 KM_SLEEP); 29550 mutex_enter(SD_MUTEX(un)); 29551 /* 29552 * we released the mutex so recheck and go to 29553 * check list state. 29554 */ 29555 state = SD_WM_CHK_LIST; 29556 } else { 29557 /* 29558 * We exit out of state machine since we 29559 * have the wmap. Do the housekeeping first. 29560 * place the wmap on the wmap list if it is not 29561 * on it already and then set the state to done. 29562 */ 29563 wmp->wm_start = startb; 29564 wmp->wm_end = endb; 29565 wmp->wm_flags = typ | SD_WM_BUSY; 29566 if (typ & SD_WTYPE_RMW) { 29567 un->un_rmw_count++; 29568 } 29569 /* 29570 * If not already on the list then link 29571 */ 29572 if (!ONLIST(un, wmp)) { 29573 wmp->wm_next = un->un_wm; 29574 wmp->wm_prev = NULL; 29575 if (wmp->wm_next) 29576 wmp->wm_next->wm_prev = wmp; 29577 un->un_wm = wmp; 29578 } 29579 state = SD_WM_DONE; 29580 } 29581 break; 29582 29583 case SD_WM_WAIT_MAP: 29584 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY); 29585 /* 29586 * Wait is done on sl_wmp, which is set in the 29587 * check_list state. 29588 */ 29589 sl_wmp->wm_wanted_count++; 29590 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un)); 29591 sl_wmp->wm_wanted_count--; 29592 /* 29593 * We can reuse the memory from the completed sl_wmp 29594 * lock range for our new lock, but only if noone is 29595 * waiting for it. 29596 */ 29597 ASSERT(!(sl_wmp->wm_flags & SD_WM_BUSY)); 29598 if (sl_wmp->wm_wanted_count == 0) { 29599 if (wmp != NULL) { 29600 CHK_N_FREEWMP(un, wmp); 29601 } 29602 wmp = sl_wmp; 29603 } 29604 sl_wmp = NULL; 29605 /* 29606 * After waking up, need to recheck for availability of 29607 * range. 29608 */ 29609 state = SD_WM_CHK_LIST; 29610 break; 29611 29612 default: 29613 panic("sd_range_lock: " 29614 "Unknown state %d in sd_range_lock", state); 29615 /*NOTREACHED*/ 29616 } /* switch(state) */ 29617 29618 } /* while(state != SD_WM_DONE) */ 29619 29620 mutex_exit(SD_MUTEX(un)); 29621 29622 ASSERT(wmp != NULL); 29623 29624 return (wmp); 29625 } 29626 29627 29628 /* 29629 * Function: sd_get_range() 29630 * 29631 * Description: Find if there any overlapping I/O to this one 29632 * Returns the write-map of 1st such I/O, NULL otherwise. 29633 * 29634 * Arguments: un - sd_lun structure for the device. 29635 * startb - The starting block number 29636 * endb - The end block number 29637 * 29638 * Return Code: wm - pointer to the wmap structure. 29639 */ 29640 29641 static struct sd_w_map * 29642 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb) 29643 { 29644 struct sd_w_map *wmp; 29645 29646 ASSERT(un != NULL); 29647 29648 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) { 29649 if (!(wmp->wm_flags & SD_WM_BUSY)) { 29650 continue; 29651 } 29652 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) { 29653 break; 29654 } 29655 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) { 29656 break; 29657 } 29658 } 29659 29660 return (wmp); 29661 } 29662 29663 29664 /* 29665 * Function: sd_free_inlist_wmap() 29666 * 29667 * Description: Unlink and free a write map struct. 29668 * 29669 * Arguments: un - sd_lun structure for the device. 29670 * wmp - sd_w_map which needs to be unlinked. 29671 */ 29672 29673 static void 29674 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp) 29675 { 29676 ASSERT(un != NULL); 29677 29678 if (un->un_wm == wmp) { 29679 un->un_wm = wmp->wm_next; 29680 } else { 29681 wmp->wm_prev->wm_next = wmp->wm_next; 29682 } 29683 29684 if (wmp->wm_next) { 29685 wmp->wm_next->wm_prev = wmp->wm_prev; 29686 } 29687 29688 wmp->wm_next = wmp->wm_prev = NULL; 29689 29690 kmem_cache_free(un->un_wm_cache, wmp); 29691 } 29692 29693 29694 /* 29695 * Function: sd_range_unlock() 29696 * 29697 * Description: Unlock the range locked by wm. 29698 * Free write map if nobody else is waiting on it. 29699 * 29700 * Arguments: un - sd_lun structure for the device. 29701 * wmp - sd_w_map which needs to be unlinked. 29702 */ 29703 29704 static void 29705 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm) 29706 { 29707 ASSERT(un != NULL); 29708 ASSERT(wm != NULL); 29709 ASSERT(!mutex_owned(SD_MUTEX(un))); 29710 29711 mutex_enter(SD_MUTEX(un)); 29712 29713 if (wm->wm_flags & SD_WTYPE_RMW) { 29714 un->un_rmw_count--; 29715 } 29716 29717 if (wm->wm_wanted_count) { 29718 wm->wm_flags = 0; 29719 /* 29720 * Broadcast that the wmap is available now. 29721 */ 29722 cv_broadcast(&wm->wm_avail); 29723 } else { 29724 /* 29725 * If no one is waiting on the map, it should be free'ed. 29726 */ 29727 sd_free_inlist_wmap(un, wm); 29728 } 29729 29730 mutex_exit(SD_MUTEX(un)); 29731 } 29732 29733 29734 /* 29735 * Function: sd_read_modify_write_task 29736 * 29737 * Description: Called from a taskq thread to initiate the write phase of 29738 * a read-modify-write request. This is used for targets where 29739 * un->un_sys_blocksize != un->un_tgt_blocksize. 29740 * 29741 * Arguments: arg - a pointer to the buf(9S) struct for the write command. 29742 * 29743 * Context: Called under taskq thread context. 29744 */ 29745 29746 static void 29747 sd_read_modify_write_task(void *arg) 29748 { 29749 struct sd_mapblocksize_info *bsp; 29750 struct buf *bp; 29751 struct sd_xbuf *xp; 29752 struct sd_lun *un; 29753 29754 bp = arg; /* The bp is given in arg */ 29755 ASSERT(bp != NULL); 29756 29757 /* Get the pointer to the layer-private data struct */ 29758 xp = SD_GET_XBUF(bp); 29759 ASSERT(xp != NULL); 29760 bsp = xp->xb_private; 29761 ASSERT(bsp != NULL); 29762 29763 un = SD_GET_UN(bp); 29764 ASSERT(un != NULL); 29765 ASSERT(!mutex_owned(SD_MUTEX(un))); 29766 29767 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 29768 "sd_read_modify_write_task: entry: buf:0x%p\n", bp); 29769 29770 /* 29771 * This is the write phase of a read-modify-write request, called 29772 * under the context of a taskq thread in response to the completion 29773 * of the read portion of the rmw request completing under interrupt 29774 * context. The write request must be sent from here down the iostart 29775 * chain as if it were being sent from sd_mapblocksize_iostart(), so 29776 * we use the layer index saved in the layer-private data area. 29777 */ 29778 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp); 29779 29780 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 29781 "sd_read_modify_write_task: exit: buf:0x%p\n", bp); 29782 } 29783 29784 29785 /* 29786 * Function: sddump_do_read_of_rmw() 29787 * 29788 * Description: This routine will be called from sddump, If sddump is called 29789 * with an I/O which not aligned on device blocksize boundary 29790 * then the write has to be converted to read-modify-write. 29791 * Do the read part here in order to keep sddump simple. 29792 * Note - That the sd_mutex is held across the call to this 29793 * routine. 29794 * 29795 * Arguments: un - sd_lun 29796 * blkno - block number in terms of media block size. 29797 * nblk - number of blocks. 29798 * bpp - pointer to pointer to the buf structure. On return 29799 * from this function, *bpp points to the valid buffer 29800 * to which the write has to be done. 29801 * 29802 * Return Code: 0 for success or errno-type return code 29803 */ 29804 29805 static int 29806 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 29807 struct buf **bpp) 29808 { 29809 int err; 29810 int i; 29811 int rval; 29812 struct buf *bp; 29813 struct scsi_pkt *pkt = NULL; 29814 uint32_t target_blocksize; 29815 29816 ASSERT(un != NULL); 29817 ASSERT(mutex_owned(SD_MUTEX(un))); 29818 29819 target_blocksize = un->un_tgt_blocksize; 29820 29821 mutex_exit(SD_MUTEX(un)); 29822 29823 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL, 29824 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL); 29825 if (bp == NULL) { 29826 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29827 "no resources for dumping; giving up"); 29828 err = ENOMEM; 29829 goto done; 29830 } 29831 29832 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL, 29833 blkno, nblk); 29834 if (rval != 0) { 29835 scsi_free_consistent_buf(bp); 29836 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29837 "no resources for dumping; giving up"); 29838 err = ENOMEM; 29839 goto done; 29840 } 29841 29842 pkt->pkt_flags |= FLAG_NOINTR; 29843 29844 err = EIO; 29845 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 29846 29847 /* 29848 * Scsi_poll returns 0 (success) if the command completes and 29849 * the status block is STATUS_GOOD. We should only check 29850 * errors if this condition is not true. Even then we should 29851 * send our own request sense packet only if we have a check 29852 * condition and auto request sense has not been performed by 29853 * the hba. 29854 */ 29855 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n"); 29856 29857 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) { 29858 err = 0; 29859 break; 29860 } 29861 29862 /* 29863 * Check CMD_DEV_GONE 1st, give up if device is gone, 29864 * no need to read RQS data. 29865 */ 29866 if (pkt->pkt_reason == CMD_DEV_GONE) { 29867 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29868 "Error while dumping state with rmw..." 29869 "Device is gone\n"); 29870 break; 29871 } 29872 29873 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) { 29874 SD_INFO(SD_LOG_DUMP, un, 29875 "sddump: read failed with CHECK, try # %d\n", i); 29876 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) { 29877 (void) sd_send_polled_RQS(un); 29878 } 29879 29880 continue; 29881 } 29882 29883 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) { 29884 int reset_retval = 0; 29885 29886 SD_INFO(SD_LOG_DUMP, un, 29887 "sddump: read failed with BUSY, try # %d\n", i); 29888 29889 if (un->un_f_lun_reset_enabled == TRUE) { 29890 reset_retval = scsi_reset(SD_ADDRESS(un), 29891 RESET_LUN); 29892 } 29893 if (reset_retval == 0) { 29894 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 29895 } 29896 (void) sd_send_polled_RQS(un); 29897 29898 } else { 29899 SD_INFO(SD_LOG_DUMP, un, 29900 "sddump: read failed with 0x%x, try # %d\n", 29901 SD_GET_PKT_STATUS(pkt), i); 29902 mutex_enter(SD_MUTEX(un)); 29903 sd_reset_target(un, pkt); 29904 mutex_exit(SD_MUTEX(un)); 29905 } 29906 29907 /* 29908 * If we are not getting anywhere with lun/target resets, 29909 * let's reset the bus. 29910 */ 29911 if (i > SD_NDUMP_RETRIES / 2) { 29912 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 29913 (void) sd_send_polled_RQS(un); 29914 } 29915 29916 } 29917 scsi_destroy_pkt(pkt); 29918 29919 if (err != 0) { 29920 scsi_free_consistent_buf(bp); 29921 *bpp = NULL; 29922 } else { 29923 *bpp = bp; 29924 } 29925 29926 done: 29927 mutex_enter(SD_MUTEX(un)); 29928 return (err); 29929 } 29930 29931 29932 /* 29933 * Function: sd_failfast_flushq 29934 * 29935 * Description: Take all bp's on the wait queue that have B_FAILFAST set 29936 * in b_flags and move them onto the failfast queue, then kick 29937 * off a thread to return all bp's on the failfast queue to 29938 * their owners with an error set. 29939 * 29940 * Arguments: un - pointer to the soft state struct for the instance. 29941 * 29942 * Context: may execute in interrupt context. 29943 */ 29944 29945 static void 29946 sd_failfast_flushq(struct sd_lun *un) 29947 { 29948 struct buf *bp; 29949 struct buf *next_waitq_bp; 29950 struct buf *prev_waitq_bp = NULL; 29951 29952 ASSERT(un != NULL); 29953 ASSERT(mutex_owned(SD_MUTEX(un))); 29954 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE); 29955 ASSERT(un->un_failfast_bp == NULL); 29956 29957 SD_TRACE(SD_LOG_IO_FAILFAST, un, 29958 "sd_failfast_flushq: entry: un:0x%p\n", un); 29959 29960 /* 29961 * Check if we should flush all bufs when entering failfast state, or 29962 * just those with B_FAILFAST set. 29963 */ 29964 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) { 29965 /* 29966 * Move *all* bp's on the wait queue to the failfast flush 29967 * queue, including those that do NOT have B_FAILFAST set. 29968 */ 29969 if (un->un_failfast_headp == NULL) { 29970 ASSERT(un->un_failfast_tailp == NULL); 29971 un->un_failfast_headp = un->un_waitq_headp; 29972 } else { 29973 ASSERT(un->un_failfast_tailp != NULL); 29974 un->un_failfast_tailp->av_forw = un->un_waitq_headp; 29975 } 29976 29977 un->un_failfast_tailp = un->un_waitq_tailp; 29978 29979 /* update kstat for each bp moved out of the waitq */ 29980 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) { 29981 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 29982 } 29983 29984 /* empty the waitq */ 29985 un->un_waitq_headp = un->un_waitq_tailp = NULL; 29986 29987 } else { 29988 /* 29989 * Go thru the wait queue, pick off all entries with 29990 * B_FAILFAST set, and move these onto the failfast queue. 29991 */ 29992 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) { 29993 /* 29994 * Save the pointer to the next bp on the wait queue, 29995 * so we get to it on the next iteration of this loop. 29996 */ 29997 next_waitq_bp = bp->av_forw; 29998 29999 /* 30000 * If this bp from the wait queue does NOT have 30001 * B_FAILFAST set, just move on to the next element 30002 * in the wait queue. Note, this is the only place 30003 * where it is correct to set prev_waitq_bp. 30004 */ 30005 if ((bp->b_flags & B_FAILFAST) == 0) { 30006 prev_waitq_bp = bp; 30007 continue; 30008 } 30009 30010 /* 30011 * Remove the bp from the wait queue. 30012 */ 30013 if (bp == un->un_waitq_headp) { 30014 /* The bp is the first element of the waitq. */ 30015 un->un_waitq_headp = next_waitq_bp; 30016 if (un->un_waitq_headp == NULL) { 30017 /* The wait queue is now empty */ 30018 un->un_waitq_tailp = NULL; 30019 } 30020 } else { 30021 /* 30022 * The bp is either somewhere in the middle 30023 * or at the end of the wait queue. 30024 */ 30025 ASSERT(un->un_waitq_headp != NULL); 30026 ASSERT(prev_waitq_bp != NULL); 30027 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST) 30028 == 0); 30029 if (bp == un->un_waitq_tailp) { 30030 /* bp is the last entry on the waitq. */ 30031 ASSERT(next_waitq_bp == NULL); 30032 un->un_waitq_tailp = prev_waitq_bp; 30033 } 30034 prev_waitq_bp->av_forw = next_waitq_bp; 30035 } 30036 bp->av_forw = NULL; 30037 30038 /* 30039 * update kstat since the bp is moved out of 30040 * the waitq 30041 */ 30042 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 30043 30044 /* 30045 * Now put the bp onto the failfast queue. 30046 */ 30047 if (un->un_failfast_headp == NULL) { 30048 /* failfast queue is currently empty */ 30049 ASSERT(un->un_failfast_tailp == NULL); 30050 un->un_failfast_headp = 30051 un->un_failfast_tailp = bp; 30052 } else { 30053 /* Add the bp to the end of the failfast q */ 30054 ASSERT(un->un_failfast_tailp != NULL); 30055 ASSERT(un->un_failfast_tailp->b_flags & 30056 B_FAILFAST); 30057 un->un_failfast_tailp->av_forw = bp; 30058 un->un_failfast_tailp = bp; 30059 } 30060 } 30061 } 30062 30063 /* 30064 * Now return all bp's on the failfast queue to their owners. 30065 */ 30066 while ((bp = un->un_failfast_headp) != NULL) { 30067 30068 un->un_failfast_headp = bp->av_forw; 30069 if (un->un_failfast_headp == NULL) { 30070 un->un_failfast_tailp = NULL; 30071 } 30072 30073 /* 30074 * We want to return the bp with a failure error code, but 30075 * we do not want a call to sd_start_cmds() to occur here, 30076 * so use sd_return_failed_command_no_restart() instead of 30077 * sd_return_failed_command(). 30078 */ 30079 sd_return_failed_command_no_restart(un, bp, EIO); 30080 } 30081 30082 /* Flush the xbuf queues if required. */ 30083 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) { 30084 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback); 30085 } 30086 30087 SD_TRACE(SD_LOG_IO_FAILFAST, un, 30088 "sd_failfast_flushq: exit: un:0x%p\n", un); 30089 } 30090 30091 30092 /* 30093 * Function: sd_failfast_flushq_callback 30094 * 30095 * Description: Return TRUE if the given bp meets the criteria for failfast 30096 * flushing. Used with ddi_xbuf_flushq(9F). 30097 * 30098 * Arguments: bp - ptr to buf struct to be examined. 30099 * 30100 * Context: Any 30101 */ 30102 30103 static int 30104 sd_failfast_flushq_callback(struct buf *bp) 30105 { 30106 /* 30107 * Return TRUE if (1) we want to flush ALL bufs when the failfast 30108 * state is entered; OR (2) the given bp has B_FAILFAST set. 30109 */ 30110 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) || 30111 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE); 30112 } 30113 30114 30115 30116 /* 30117 * Function: sd_setup_next_xfer 30118 * 30119 * Description: Prepare next I/O operation using DMA_PARTIAL 30120 * 30121 */ 30122 30123 static int 30124 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 30125 struct scsi_pkt *pkt, struct sd_xbuf *xp) 30126 { 30127 ssize_t num_blks_not_xfered; 30128 daddr_t strt_blk_num; 30129 ssize_t bytes_not_xfered; 30130 int rval; 30131 30132 ASSERT(pkt->pkt_resid == 0); 30133 30134 /* 30135 * Calculate next block number and amount to be transferred. 30136 * 30137 * How much data NOT transfered to the HBA yet. 30138 */ 30139 bytes_not_xfered = xp->xb_dma_resid; 30140 30141 /* 30142 * figure how many blocks NOT transfered to the HBA yet. 30143 */ 30144 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered); 30145 30146 /* 30147 * set starting block number to the end of what WAS transfered. 30148 */ 30149 strt_blk_num = xp->xb_blkno + 30150 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered); 30151 30152 /* 30153 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt 30154 * will call scsi_initpkt with NULL_FUNC so we do not have to release 30155 * the disk mutex here. 30156 */ 30157 rval = sd_setup_next_rw_pkt(un, pkt, bp, 30158 strt_blk_num, num_blks_not_xfered); 30159 30160 if (rval == 0) { 30161 30162 /* 30163 * Success. 30164 * 30165 * Adjust things if there are still more blocks to be 30166 * transfered. 30167 */ 30168 xp->xb_dma_resid = pkt->pkt_resid; 30169 pkt->pkt_resid = 0; 30170 30171 return (1); 30172 } 30173 30174 /* 30175 * There's really only one possible return value from 30176 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt 30177 * returns NULL. 30178 */ 30179 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 30180 30181 bp->b_resid = bp->b_bcount; 30182 bp->b_flags |= B_ERROR; 30183 30184 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 30185 "Error setting up next portion of DMA transfer\n"); 30186 30187 return (0); 30188 } 30189 30190 /* 30191 * Function: sd_panic_for_res_conflict 30192 * 30193 * Description: Call panic with a string formatted with "Reservation Conflict" 30194 * and a human readable identifier indicating the SD instance 30195 * that experienced the reservation conflict. 30196 * 30197 * Arguments: un - pointer to the soft state struct for the instance. 30198 * 30199 * Context: may execute in interrupt context. 30200 */ 30201 30202 #define SD_RESV_CONFLICT_FMT_LEN 40 30203 void 30204 sd_panic_for_res_conflict(struct sd_lun *un) 30205 { 30206 char panic_str[SD_RESV_CONFLICT_FMT_LEN + MAXPATHLEN]; 30207 char path_str[MAXPATHLEN]; 30208 30209 (void) snprintf(panic_str, sizeof (panic_str), 30210 "Reservation Conflict\nDisk: %s", 30211 ddi_pathname(SD_DEVINFO(un), path_str)); 30212 30213 panic(panic_str); 30214 } 30215 30216 /* 30217 * Note: The following sd_faultinjection_ioctl( ) routines implement 30218 * driver support for handling fault injection for error analysis 30219 * causing faults in multiple layers of the driver. 30220 * 30221 */ 30222 30223 #ifdef SD_FAULT_INJECTION 30224 static uint_t sd_fault_injection_on = 0; 30225 30226 /* 30227 * Function: sd_faultinjection_ioctl() 30228 * 30229 * Description: This routine is the driver entry point for handling 30230 * faultinjection ioctls to inject errors into the 30231 * layer model 30232 * 30233 * Arguments: cmd - the ioctl cmd received 30234 * arg - the arguments from user and returns 30235 */ 30236 30237 static void 30238 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) 30239 { 30240 uint_t i = 0; 30241 uint_t rval; 30242 30243 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); 30244 30245 mutex_enter(SD_MUTEX(un)); 30246 30247 switch (cmd) { 30248 case SDIOCRUN: 30249 /* Allow pushed faults to be injected */ 30250 SD_INFO(SD_LOG_SDTEST, un, 30251 "sd_faultinjection_ioctl: Injecting Fault Run\n"); 30252 30253 sd_fault_injection_on = 1; 30254 30255 SD_INFO(SD_LOG_IOERR, un, 30256 "sd_faultinjection_ioctl: run finished\n"); 30257 break; 30258 30259 case SDIOCSTART: 30260 /* Start Injection Session */ 30261 SD_INFO(SD_LOG_SDTEST, un, 30262 "sd_faultinjection_ioctl: Injecting Fault Start\n"); 30263 30264 sd_fault_injection_on = 0; 30265 un->sd_injection_mask = 0xFFFFFFFF; 30266 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 30267 un->sd_fi_fifo_pkt[i] = NULL; 30268 un->sd_fi_fifo_xb[i] = NULL; 30269 un->sd_fi_fifo_un[i] = NULL; 30270 un->sd_fi_fifo_arq[i] = NULL; 30271 } 30272 un->sd_fi_fifo_start = 0; 30273 un->sd_fi_fifo_end = 0; 30274 30275 mutex_enter(&(un->un_fi_mutex)); 30276 un->sd_fi_log[0] = '\0'; 30277 un->sd_fi_buf_len = 0; 30278 mutex_exit(&(un->un_fi_mutex)); 30279 30280 SD_INFO(SD_LOG_IOERR, un, 30281 "sd_faultinjection_ioctl: start finished\n"); 30282 break; 30283 30284 case SDIOCSTOP: 30285 /* Stop Injection Session */ 30286 SD_INFO(SD_LOG_SDTEST, un, 30287 "sd_faultinjection_ioctl: Injecting Fault Stop\n"); 30288 sd_fault_injection_on = 0; 30289 un->sd_injection_mask = 0x0; 30290 30291 /* Empty stray or unuseds structs from fifo */ 30292 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 30293 if (un->sd_fi_fifo_pkt[i] != NULL) { 30294 kmem_free(un->sd_fi_fifo_pkt[i], 30295 sizeof (struct sd_fi_pkt)); 30296 } 30297 if (un->sd_fi_fifo_xb[i] != NULL) { 30298 kmem_free(un->sd_fi_fifo_xb[i], 30299 sizeof (struct sd_fi_xb)); 30300 } 30301 if (un->sd_fi_fifo_un[i] != NULL) { 30302 kmem_free(un->sd_fi_fifo_un[i], 30303 sizeof (struct sd_fi_un)); 30304 } 30305 if (un->sd_fi_fifo_arq[i] != NULL) { 30306 kmem_free(un->sd_fi_fifo_arq[i], 30307 sizeof (struct sd_fi_arq)); 30308 } 30309 un->sd_fi_fifo_pkt[i] = NULL; 30310 un->sd_fi_fifo_un[i] = NULL; 30311 un->sd_fi_fifo_xb[i] = NULL; 30312 un->sd_fi_fifo_arq[i] = NULL; 30313 } 30314 un->sd_fi_fifo_start = 0; 30315 un->sd_fi_fifo_end = 0; 30316 30317 SD_INFO(SD_LOG_IOERR, un, 30318 "sd_faultinjection_ioctl: stop finished\n"); 30319 break; 30320 30321 case SDIOCINSERTPKT: 30322 /* Store a packet struct to be pushed onto fifo */ 30323 SD_INFO(SD_LOG_SDTEST, un, 30324 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n"); 30325 30326 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 30327 30328 sd_fault_injection_on = 0; 30329 30330 /* No more that SD_FI_MAX_ERROR allowed in Queue */ 30331 if (un->sd_fi_fifo_pkt[i] != NULL) { 30332 kmem_free(un->sd_fi_fifo_pkt[i], 30333 sizeof (struct sd_fi_pkt)); 30334 } 30335 if (arg != (uintptr_t)NULL) { 30336 un->sd_fi_fifo_pkt[i] = 30337 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP); 30338 if (un->sd_fi_fifo_pkt[i] == NULL) { 30339 /* Alloc failed don't store anything */ 30340 break; 30341 } 30342 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i], 30343 sizeof (struct sd_fi_pkt), 0); 30344 if (rval == -1) { 30345 kmem_free(un->sd_fi_fifo_pkt[i], 30346 sizeof (struct sd_fi_pkt)); 30347 un->sd_fi_fifo_pkt[i] = NULL; 30348 } 30349 } else { 30350 SD_INFO(SD_LOG_IOERR, un, 30351 "sd_faultinjection_ioctl: pkt null\n"); 30352 } 30353 break; 30354 30355 case SDIOCINSERTXB: 30356 /* Store a xb struct to be pushed onto fifo */ 30357 SD_INFO(SD_LOG_SDTEST, un, 30358 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n"); 30359 30360 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 30361 30362 sd_fault_injection_on = 0; 30363 30364 if (un->sd_fi_fifo_xb[i] != NULL) { 30365 kmem_free(un->sd_fi_fifo_xb[i], 30366 sizeof (struct sd_fi_xb)); 30367 un->sd_fi_fifo_xb[i] = NULL; 30368 } 30369 if (arg != (uintptr_t)NULL) { 30370 un->sd_fi_fifo_xb[i] = 30371 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP); 30372 if (un->sd_fi_fifo_xb[i] == NULL) { 30373 /* Alloc failed don't store anything */ 30374 break; 30375 } 30376 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i], 30377 sizeof (struct sd_fi_xb), 0); 30378 30379 if (rval == -1) { 30380 kmem_free(un->sd_fi_fifo_xb[i], 30381 sizeof (struct sd_fi_xb)); 30382 un->sd_fi_fifo_xb[i] = NULL; 30383 } 30384 } else { 30385 SD_INFO(SD_LOG_IOERR, un, 30386 "sd_faultinjection_ioctl: xb null\n"); 30387 } 30388 break; 30389 30390 case SDIOCINSERTUN: 30391 /* Store a un struct to be pushed onto fifo */ 30392 SD_INFO(SD_LOG_SDTEST, un, 30393 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n"); 30394 30395 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 30396 30397 sd_fault_injection_on = 0; 30398 30399 if (un->sd_fi_fifo_un[i] != NULL) { 30400 kmem_free(un->sd_fi_fifo_un[i], 30401 sizeof (struct sd_fi_un)); 30402 un->sd_fi_fifo_un[i] = NULL; 30403 } 30404 if (arg != (uintptr_t)NULL) { 30405 un->sd_fi_fifo_un[i] = 30406 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP); 30407 if (un->sd_fi_fifo_un[i] == NULL) { 30408 /* Alloc failed don't store anything */ 30409 break; 30410 } 30411 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i], 30412 sizeof (struct sd_fi_un), 0); 30413 if (rval == -1) { 30414 kmem_free(un->sd_fi_fifo_un[i], 30415 sizeof (struct sd_fi_un)); 30416 un->sd_fi_fifo_un[i] = NULL; 30417 } 30418 30419 } else { 30420 SD_INFO(SD_LOG_IOERR, un, 30421 "sd_faultinjection_ioctl: un null\n"); 30422 } 30423 30424 break; 30425 30426 case SDIOCINSERTARQ: 30427 /* Store a arq struct to be pushed onto fifo */ 30428 SD_INFO(SD_LOG_SDTEST, un, 30429 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n"); 30430 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 30431 30432 sd_fault_injection_on = 0; 30433 30434 if (un->sd_fi_fifo_arq[i] != NULL) { 30435 kmem_free(un->sd_fi_fifo_arq[i], 30436 sizeof (struct sd_fi_arq)); 30437 un->sd_fi_fifo_arq[i] = NULL; 30438 } 30439 if (arg != (uintptr_t)NULL) { 30440 un->sd_fi_fifo_arq[i] = 30441 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP); 30442 if (un->sd_fi_fifo_arq[i] == NULL) { 30443 /* Alloc failed don't store anything */ 30444 break; 30445 } 30446 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i], 30447 sizeof (struct sd_fi_arq), 0); 30448 if (rval == -1) { 30449 kmem_free(un->sd_fi_fifo_arq[i], 30450 sizeof (struct sd_fi_arq)); 30451 un->sd_fi_fifo_arq[i] = NULL; 30452 } 30453 30454 } else { 30455 SD_INFO(SD_LOG_IOERR, un, 30456 "sd_faultinjection_ioctl: arq null\n"); 30457 } 30458 30459 break; 30460 30461 case SDIOCPUSH: 30462 /* Push stored xb, pkt, un, and arq onto fifo */ 30463 sd_fault_injection_on = 0; 30464 30465 if (arg != (uintptr_t)NULL) { 30466 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); 30467 if (rval != -1 && 30468 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 30469 un->sd_fi_fifo_end += i; 30470 } 30471 } else { 30472 SD_INFO(SD_LOG_IOERR, un, 30473 "sd_faultinjection_ioctl: push arg null\n"); 30474 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 30475 un->sd_fi_fifo_end++; 30476 } 30477 } 30478 SD_INFO(SD_LOG_IOERR, un, 30479 "sd_faultinjection_ioctl: push to end=%d\n", 30480 un->sd_fi_fifo_end); 30481 break; 30482 30483 case SDIOCRETRIEVE: 30484 /* Return buffer of log from Injection session */ 30485 SD_INFO(SD_LOG_SDTEST, un, 30486 "sd_faultinjection_ioctl: Injecting Fault Retreive"); 30487 30488 sd_fault_injection_on = 0; 30489 30490 mutex_enter(&(un->un_fi_mutex)); 30491 rval = ddi_copyout(un->sd_fi_log, (void *)arg, 30492 un->sd_fi_buf_len+1, 0); 30493 mutex_exit(&(un->un_fi_mutex)); 30494 30495 if (rval == -1) { 30496 /* 30497 * arg is possibly invalid setting 30498 * it to NULL for return 30499 */ 30500 arg = (uintptr_t)NULL; 30501 } 30502 break; 30503 } 30504 30505 mutex_exit(SD_MUTEX(un)); 30506 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: exit\n"); 30507 } 30508 30509 30510 /* 30511 * Function: sd_injection_log() 30512 * 30513 * Description: This routine adds buff to the already existing injection log 30514 * for retrieval via faultinjection_ioctl for use in fault 30515 * detection and recovery 30516 * 30517 * Arguments: buf - the string to add to the log 30518 */ 30519 30520 static void 30521 sd_injection_log(char *buf, struct sd_lun *un) 30522 { 30523 uint_t len; 30524 30525 ASSERT(un != NULL); 30526 ASSERT(buf != NULL); 30527 30528 mutex_enter(&(un->un_fi_mutex)); 30529 30530 len = min(strlen(buf), 255); 30531 /* Add logged value to Injection log to be returned later */ 30532 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) { 30533 uint_t offset = strlen((char *)un->sd_fi_log); 30534 char *destp = (char *)un->sd_fi_log + offset; 30535 int i; 30536 for (i = 0; i < len; i++) { 30537 *destp++ = *buf++; 30538 } 30539 un->sd_fi_buf_len += len; 30540 un->sd_fi_log[un->sd_fi_buf_len] = '\0'; 30541 } 30542 30543 mutex_exit(&(un->un_fi_mutex)); 30544 } 30545 30546 30547 /* 30548 * Function: sd_faultinjection() 30549 * 30550 * Description: This routine takes the pkt and changes its 30551 * content based on error injection scenerio. 30552 * 30553 * Arguments: pktp - packet to be changed 30554 */ 30555 30556 static void 30557 sd_faultinjection(struct scsi_pkt *pktp) 30558 { 30559 uint_t i; 30560 struct sd_fi_pkt *fi_pkt; 30561 struct sd_fi_xb *fi_xb; 30562 struct sd_fi_un *fi_un; 30563 struct sd_fi_arq *fi_arq; 30564 struct buf *bp; 30565 struct sd_xbuf *xb; 30566 struct sd_lun *un; 30567 30568 ASSERT(pktp != NULL); 30569 30570 /* pull bp xb and un from pktp */ 30571 bp = (struct buf *)pktp->pkt_private; 30572 xb = SD_GET_XBUF(bp); 30573 un = SD_GET_UN(bp); 30574 30575 ASSERT(un != NULL); 30576 30577 mutex_enter(SD_MUTEX(un)); 30578 30579 SD_TRACE(SD_LOG_SDTEST, un, 30580 "sd_faultinjection: entry Injection from sdintr\n"); 30581 30582 /* if injection is off return */ 30583 if (sd_fault_injection_on == 0 || 30584 un->sd_fi_fifo_start == un->sd_fi_fifo_end) { 30585 mutex_exit(SD_MUTEX(un)); 30586 return; 30587 } 30588 30589 SD_INFO(SD_LOG_SDTEST, un, 30590 "sd_faultinjection: is working for copying\n"); 30591 30592 /* take next set off fifo */ 30593 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR; 30594 30595 fi_pkt = un->sd_fi_fifo_pkt[i]; 30596 fi_xb = un->sd_fi_fifo_xb[i]; 30597 fi_un = un->sd_fi_fifo_un[i]; 30598 fi_arq = un->sd_fi_fifo_arq[i]; 30599 30600 30601 /* set variables accordingly */ 30602 /* set pkt if it was on fifo */ 30603 if (fi_pkt != NULL) { 30604 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags"); 30605 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp"); 30606 if (fi_pkt->pkt_cdbp != 0xff) 30607 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp"); 30608 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state"); 30609 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics"); 30610 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason"); 30611 30612 } 30613 /* set xb if it was on fifo */ 30614 if (fi_xb != NULL) { 30615 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno"); 30616 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid"); 30617 if (fi_xb->xb_retry_count != 0) 30618 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count"); 30619 SD_CONDSET(xb, xb, xb_victim_retry_count, 30620 "xb_victim_retry_count"); 30621 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status"); 30622 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state"); 30623 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid"); 30624 30625 /* copy in block data from sense */ 30626 /* 30627 * if (fi_xb->xb_sense_data[0] != -1) { 30628 * bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, 30629 * SENSE_LENGTH); 30630 * } 30631 */ 30632 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, SENSE_LENGTH); 30633 30634 /* copy in extended sense codes */ 30635 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 30636 xb, es_code, "es_code"); 30637 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 30638 xb, es_key, "es_key"); 30639 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 30640 xb, es_add_code, "es_add_code"); 30641 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 30642 xb, es_qual_code, "es_qual_code"); 30643 struct scsi_extended_sense *esp; 30644 esp = (struct scsi_extended_sense *)xb->xb_sense_data; 30645 esp->es_class = CLASS_EXTENDED_SENSE; 30646 } 30647 30648 /* set un if it was on fifo */ 30649 if (fi_un != NULL) { 30650 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb"); 30651 SD_CONDSET(un, un, un_ctype, "un_ctype"); 30652 SD_CONDSET(un, un, un_reset_retry_count, 30653 "un_reset_retry_count"); 30654 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type"); 30655 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status"); 30656 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled"); 30657 SD_CONDSET(un, un, un_f_allow_bus_device_reset, 30658 "un_f_allow_bus_device_reset"); 30659 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing"); 30660 30661 } 30662 30663 /* copy in auto request sense if it was on fifo */ 30664 if (fi_arq != NULL) { 30665 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq)); 30666 } 30667 30668 /* free structs */ 30669 if (un->sd_fi_fifo_pkt[i] != NULL) { 30670 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt)); 30671 } 30672 if (un->sd_fi_fifo_xb[i] != NULL) { 30673 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb)); 30674 } 30675 if (un->sd_fi_fifo_un[i] != NULL) { 30676 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un)); 30677 } 30678 if (un->sd_fi_fifo_arq[i] != NULL) { 30679 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); 30680 } 30681 30682 /* 30683 * kmem_free does not gurantee to set to NULL 30684 * since we uses these to determine if we set 30685 * values or not lets confirm they are always 30686 * NULL after free 30687 */ 30688 un->sd_fi_fifo_pkt[i] = NULL; 30689 un->sd_fi_fifo_un[i] = NULL; 30690 un->sd_fi_fifo_xb[i] = NULL; 30691 un->sd_fi_fifo_arq[i] = NULL; 30692 30693 un->sd_fi_fifo_start++; 30694 30695 mutex_exit(SD_MUTEX(un)); 30696 30697 SD_INFO(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n"); 30698 } 30699 30700 #endif /* SD_FAULT_INJECTION */ 30701 30702 /* 30703 * This routine is invoked in sd_unit_attach(). Before calling it, the 30704 * properties in conf file should be processed already, and "hotpluggable" 30705 * property was processed also. 30706 * 30707 * The sd driver distinguishes 3 different type of devices: removable media, 30708 * non-removable media, and hotpluggable. Below the differences are defined: 30709 * 30710 * 1. Device ID 30711 * 30712 * The device ID of a device is used to identify this device. Refer to 30713 * ddi_devid_register(9F). 30714 * 30715 * For a non-removable media disk device which can provide 0x80 or 0x83 30716 * VPD page (refer to INQUIRY command of SCSI SPC specification), a unique 30717 * device ID is created to identify this device. For other non-removable 30718 * media devices, a default device ID is created only if this device has 30719 * at least 2 alter cylinders. Otherwise, this device has no devid. 30720 * 30721 * ------------------------------------------------------- 30722 * removable media hotpluggable | Can Have Device ID 30723 * ------------------------------------------------------- 30724 * false false | Yes 30725 * false true | Yes 30726 * true x | No 30727 * ------------------------------------------------------ 30728 * 30729 * 30730 * 2. SCSI group 4 commands 30731 * 30732 * In SCSI specs, only some commands in group 4 command set can use 30733 * 8-byte addresses that can be used to access >2TB storage spaces. 30734 * Other commands have no such capability. Without supporting group4, 30735 * it is impossible to make full use of storage spaces of a disk with 30736 * capacity larger than 2TB. 30737 * 30738 * ----------------------------------------------- 30739 * removable media hotpluggable LP64 | Group 30740 * ----------------------------------------------- 30741 * false false false | 1 30742 * false false true | 4 30743 * false true false | 1 30744 * false true true | 4 30745 * true x x | 5 30746 * ----------------------------------------------- 30747 * 30748 * 30749 * 3. Check for VTOC Label 30750 * 30751 * If a direct-access disk has no EFI label, sd will check if it has a 30752 * valid VTOC label. Now, sd also does that check for removable media 30753 * and hotpluggable devices. 30754 * 30755 * -------------------------------------------------------------- 30756 * Direct-Access removable media hotpluggable | Check Label 30757 * ------------------------------------------------------------- 30758 * false false false | No 30759 * false false true | No 30760 * false true false | Yes 30761 * false true true | Yes 30762 * true x x | Yes 30763 * -------------------------------------------------------------- 30764 * 30765 * 30766 * 4. Building default VTOC label 30767 * 30768 * As section 3 says, sd checks if some kinds of devices have VTOC label. 30769 * If those devices have no valid VTOC label, sd(4D) will attempt to 30770 * create default VTOC for them. Currently sd creates default VTOC label 30771 * for all devices on x86 platform (VTOC_16), but only for removable 30772 * media devices on SPARC (VTOC_8). 30773 * 30774 * ----------------------------------------------------------- 30775 * removable media hotpluggable platform | Default Label 30776 * ----------------------------------------------------------- 30777 * false false sparc | No 30778 * false true x86 | Yes 30779 * false true sparc | Yes 30780 * true x x | Yes 30781 * ---------------------------------------------------------- 30782 * 30783 * 30784 * 5. Supported blocksizes of target devices 30785 * 30786 * Sd supports non-512-byte blocksize for removable media devices only. 30787 * For other devices, only 512-byte blocksize is supported. This may be 30788 * changed in near future because some RAID devices require non-512-byte 30789 * blocksize 30790 * 30791 * ----------------------------------------------------------- 30792 * removable media hotpluggable | non-512-byte blocksize 30793 * ----------------------------------------------------------- 30794 * false false | No 30795 * false true | No 30796 * true x | Yes 30797 * ----------------------------------------------------------- 30798 * 30799 * 30800 * 6. Automatic mount & unmount 30801 * 30802 * sd(4D) driver provides DKIOCREMOVABLE ioctl. This ioctl is used to query 30803 * if a device is removable media device. It return 1 for removable media 30804 * devices, and 0 for others. 30805 * 30806 * The automatic mounting subsystem should distinguish between the types 30807 * of devices and apply automounting policies to each. 30808 * 30809 * 30810 * 7. fdisk partition management 30811 * 30812 * Fdisk is traditional partition method on x86 platform. sd(4D) driver 30813 * just supports fdisk partitions on x86 platform. On sparc platform, sd 30814 * doesn't support fdisk partitions at all. Note: pcfs(4FS) can recognize 30815 * fdisk partitions on both x86 and SPARC platform. 30816 * 30817 * ----------------------------------------------------------- 30818 * platform removable media USB/1394 | fdisk supported 30819 * ----------------------------------------------------------- 30820 * x86 X X | true 30821 * ------------------------------------------------------------ 30822 * sparc X X | false 30823 * ------------------------------------------------------------ 30824 * 30825 * 30826 * 8. MBOOT/MBR 30827 * 30828 * Although sd(4D) doesn't support fdisk on SPARC platform, it does support 30829 * read/write mboot for removable media devices on sparc platform. 30830 * 30831 * ----------------------------------------------------------- 30832 * platform removable media USB/1394 | mboot supported 30833 * ----------------------------------------------------------- 30834 * x86 X X | true 30835 * ------------------------------------------------------------ 30836 * sparc false false | false 30837 * sparc false true | true 30838 * sparc true false | true 30839 * sparc true true | true 30840 * ------------------------------------------------------------ 30841 * 30842 * 30843 * 9. error handling during opening device 30844 * 30845 * If failed to open a disk device, an errno is returned. For some kinds 30846 * of errors, different errno is returned depending on if this device is 30847 * a removable media device. This brings USB/1394 hard disks in line with 30848 * expected hard disk behavior. It is not expected that this breaks any 30849 * application. 30850 * 30851 * ------------------------------------------------------ 30852 * removable media hotpluggable | errno 30853 * ------------------------------------------------------ 30854 * false false | EIO 30855 * false true | EIO 30856 * true x | ENXIO 30857 * ------------------------------------------------------ 30858 * 30859 * 30860 * 11. ioctls: DKIOCEJECT, CDROMEJECT 30861 * 30862 * These IOCTLs are applicable only to removable media devices. 30863 * 30864 * ----------------------------------------------------------- 30865 * removable media hotpluggable |DKIOCEJECT, CDROMEJECT 30866 * ----------------------------------------------------------- 30867 * false false | No 30868 * false true | No 30869 * true x | Yes 30870 * ----------------------------------------------------------- 30871 * 30872 * 30873 * 12. Kstats for partitions 30874 * 30875 * sd creates partition kstat for non-removable media devices. USB and 30876 * Firewire hard disks now have partition kstats 30877 * 30878 * ------------------------------------------------------ 30879 * removable media hotpluggable | kstat 30880 * ------------------------------------------------------ 30881 * false false | Yes 30882 * false true | Yes 30883 * true x | No 30884 * ------------------------------------------------------ 30885 * 30886 * 30887 * 13. Removable media & hotpluggable properties 30888 * 30889 * Sd driver creates a "removable-media" property for removable media 30890 * devices. Parent nexus drivers create a "hotpluggable" property if 30891 * it supports hotplugging. 30892 * 30893 * --------------------------------------------------------------------- 30894 * removable media hotpluggable | "removable-media" " hotpluggable" 30895 * --------------------------------------------------------------------- 30896 * false false | No No 30897 * false true | No Yes 30898 * true false | Yes No 30899 * true true | Yes Yes 30900 * --------------------------------------------------------------------- 30901 * 30902 * 30903 * 14. Power Management 30904 * 30905 * sd only power manages removable media devices or devices that support 30906 * LOG_SENSE or have a "pm-capable" property (PSARC/2002/250) 30907 * 30908 * A parent nexus that supports hotplugging can also set "pm-capable" 30909 * if the disk can be power managed. 30910 * 30911 * ------------------------------------------------------------ 30912 * removable media hotpluggable pm-capable | power manage 30913 * ------------------------------------------------------------ 30914 * false false false | No 30915 * false false true | Yes 30916 * false true false | No 30917 * false true true | Yes 30918 * true x x | Yes 30919 * ------------------------------------------------------------ 30920 * 30921 * USB and firewire hard disks can now be power managed independently 30922 * of the framebuffer 30923 * 30924 * 30925 * 15. Support for USB disks with capacity larger than 1TB 30926 * 30927 * Currently, sd doesn't permit a fixed disk device with capacity 30928 * larger than 1TB to be used in a 32-bit operating system environment. 30929 * However, sd doesn't do that for removable media devices. Instead, it 30930 * assumes that removable media devices cannot have a capacity larger 30931 * than 1TB. Therefore, using those devices on 32-bit system is partially 30932 * supported, which can cause some unexpected results. 30933 * 30934 * --------------------------------------------------------------------- 30935 * removable media USB/1394 | Capacity > 1TB | Used in 32-bit env 30936 * --------------------------------------------------------------------- 30937 * false false | true | no 30938 * false true | true | no 30939 * true false | true | Yes 30940 * true true | true | Yes 30941 * --------------------------------------------------------------------- 30942 * 30943 * 30944 * 16. Check write-protection at open time 30945 * 30946 * When a removable media device is being opened for writing without NDELAY 30947 * flag, sd will check if this device is writable. If attempting to open 30948 * without NDELAY flag a write-protected device, this operation will abort. 30949 * 30950 * ------------------------------------------------------------ 30951 * removable media USB/1394 | WP Check 30952 * ------------------------------------------------------------ 30953 * false false | No 30954 * false true | No 30955 * true false | Yes 30956 * true true | Yes 30957 * ------------------------------------------------------------ 30958 * 30959 * 30960 * 17. syslog when corrupted VTOC is encountered 30961 * 30962 * Currently, if an invalid VTOC is encountered, sd only print syslog 30963 * for fixed SCSI disks. 30964 * ------------------------------------------------------------ 30965 * removable media USB/1394 | print syslog 30966 * ------------------------------------------------------------ 30967 * false false | Yes 30968 * false true | No 30969 * true false | No 30970 * true true | No 30971 * ------------------------------------------------------------ 30972 */ 30973 static void 30974 sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi) 30975 { 30976 int pm_cap; 30977 30978 ASSERT(un->un_sd); 30979 ASSERT(un->un_sd->sd_inq); 30980 30981 /* 30982 * Enable SYNC CACHE support for all devices. 30983 */ 30984 un->un_f_sync_cache_supported = TRUE; 30985 30986 /* 30987 * Set the sync cache required flag to false. 30988 * This would ensure that there is no SYNC CACHE 30989 * sent when there are no writes 30990 */ 30991 un->un_f_sync_cache_required = FALSE; 30992 30993 if (un->un_sd->sd_inq->inq_rmb) { 30994 /* 30995 * The media of this device is removable. And for this kind 30996 * of devices, it is possible to change medium after opening 30997 * devices. Thus we should support this operation. 30998 */ 30999 un->un_f_has_removable_media = TRUE; 31000 31001 /* 31002 * support non-512-byte blocksize of removable media devices 31003 */ 31004 un->un_f_non_devbsize_supported = TRUE; 31005 31006 /* 31007 * Assume that all removable media devices support DOOR_LOCK 31008 */ 31009 un->un_f_doorlock_supported = TRUE; 31010 31011 /* 31012 * For a removable media device, it is possible to be opened 31013 * with NDELAY flag when there is no media in drive, in this 31014 * case we don't care if device is writable. But if without 31015 * NDELAY flag, we need to check if media is write-protected. 31016 */ 31017 un->un_f_chk_wp_open = TRUE; 31018 31019 /* 31020 * need to start a SCSI watch thread to monitor media state, 31021 * when media is being inserted or ejected, notify syseventd. 31022 */ 31023 un->un_f_monitor_media_state = TRUE; 31024 31025 /* 31026 * Some devices don't support START_STOP_UNIT command. 31027 * Therefore, we'd better check if a device supports it 31028 * before sending it. 31029 */ 31030 un->un_f_check_start_stop = TRUE; 31031 31032 /* 31033 * support eject media ioctl: 31034 * FDEJECT, DKIOCEJECT, CDROMEJECT 31035 */ 31036 un->un_f_eject_media_supported = TRUE; 31037 31038 /* 31039 * Because many removable-media devices don't support 31040 * LOG_SENSE, we couldn't use this command to check if 31041 * a removable media device support power-management. 31042 * We assume that they support power-management via 31043 * START_STOP_UNIT command and can be spun up and down 31044 * without limitations. 31045 */ 31046 un->un_f_pm_supported = TRUE; 31047 31048 /* 31049 * Need to create a zero length (Boolean) property 31050 * removable-media for the removable media devices. 31051 * Note that the return value of the property is not being 31052 * checked, since if unable to create the property 31053 * then do not want the attach to fail altogether. Consistent 31054 * with other property creation in attach. 31055 */ 31056 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, 31057 DDI_PROP_CANSLEEP, "removable-media", NULL, 0); 31058 31059 } else { 31060 /* 31061 * create device ID for device 31062 */ 31063 un->un_f_devid_supported = TRUE; 31064 31065 /* 31066 * Spin up non-removable-media devices once it is attached 31067 */ 31068 un->un_f_attach_spinup = TRUE; 31069 31070 /* 31071 * According to SCSI specification, Sense data has two kinds of 31072 * format: fixed format, and descriptor format. At present, we 31073 * don't support descriptor format sense data for removable 31074 * media. 31075 */ 31076 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT) { 31077 un->un_f_descr_format_supported = TRUE; 31078 } 31079 31080 /* 31081 * kstats are created only for non-removable media devices. 31082 * 31083 * Set this in sd.conf to 0 in order to disable kstats. The 31084 * default is 1, so they are enabled by default. 31085 */ 31086 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY, 31087 SD_DEVINFO(un), DDI_PROP_DONTPASS, 31088 "enable-partition-kstats", 1)); 31089 31090 /* 31091 * Check if HBA has set the "pm-capable" property. 31092 * If "pm-capable" exists and is non-zero then we can 31093 * power manage the device without checking the start/stop 31094 * cycle count log sense page. 31095 * 31096 * If "pm-capable" exists and is set to be false (0), 31097 * then we should not power manage the device. 31098 * 31099 * If "pm-capable" doesn't exist then pm_cap will 31100 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case, 31101 * sd will check the start/stop cycle count log sense page 31102 * and power manage the device if the cycle count limit has 31103 * not been exceeded. 31104 */ 31105 pm_cap = ddi_prop_get_int(DDI_DEV_T_ANY, devi, 31106 DDI_PROP_DONTPASS, "pm-capable", SD_PM_CAPABLE_UNDEFINED); 31107 if (SD_PM_CAPABLE_IS_UNDEFINED(pm_cap)) { 31108 un->un_f_log_sense_supported = TRUE; 31109 if (!un->un_f_power_condition_disabled && 31110 SD_INQUIRY(un)->inq_ansi == 6) { 31111 un->un_f_power_condition_supported = TRUE; 31112 } 31113 } else { 31114 /* 31115 * pm-capable property exists. 31116 * 31117 * Convert "TRUE" values for pm_cap to 31118 * SD_PM_CAPABLE_IS_TRUE to make it easier to check 31119 * later. "TRUE" values are any values defined in 31120 * inquiry.h. 31121 */ 31122 if (SD_PM_CAPABLE_IS_FALSE(pm_cap)) { 31123 un->un_f_log_sense_supported = FALSE; 31124 } else { 31125 /* SD_PM_CAPABLE_IS_TRUE case */ 31126 un->un_f_pm_supported = TRUE; 31127 if (!un->un_f_power_condition_disabled && 31128 SD_PM_CAPABLE_IS_SPC_4(pm_cap)) { 31129 un->un_f_power_condition_supported = 31130 TRUE; 31131 } 31132 if (SD_PM_CAP_LOG_SUPPORTED(pm_cap)) { 31133 un->un_f_log_sense_supported = TRUE; 31134 un->un_f_pm_log_sense_smart = 31135 SD_PM_CAP_SMART_LOG(pm_cap); 31136 } 31137 } 31138 31139 SD_INFO(SD_LOG_ATTACH_DETACH, un, 31140 "sd_unit_attach: un:0x%p pm-capable " 31141 "property set to %d.\n", un, un->un_f_pm_supported); 31142 } 31143 } 31144 31145 if (un->un_f_is_hotpluggable) { 31146 31147 /* 31148 * Have to watch hotpluggable devices as well, since 31149 * that's the only way for userland applications to 31150 * detect hot removal while device is busy/mounted. 31151 */ 31152 un->un_f_monitor_media_state = TRUE; 31153 31154 un->un_f_check_start_stop = TRUE; 31155 31156 } 31157 } 31158 31159 /* 31160 * sd_tg_rdwr: 31161 * Provides rdwr access for cmlb via sd_tgops. The start_block is 31162 * in sys block size, req_length in bytes. 31163 * 31164 */ 31165 static int 31166 sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 31167 diskaddr_t start_block, size_t reqlength, void *tg_cookie) 31168 { 31169 struct sd_lun *un; 31170 int path_flag = (int)(uintptr_t)tg_cookie; 31171 char *dkl = NULL; 31172 diskaddr_t real_addr = start_block; 31173 diskaddr_t first_byte, end_block; 31174 31175 size_t buffer_size = reqlength; 31176 int rval = 0; 31177 diskaddr_t cap; 31178 uint32_t lbasize; 31179 sd_ssc_t *ssc; 31180 31181 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 31182 if (un == NULL) 31183 return (ENXIO); 31184 31185 if (cmd != TG_READ && cmd != TG_WRITE) 31186 return (EINVAL); 31187 31188 ssc = sd_ssc_init(un); 31189 mutex_enter(SD_MUTEX(un)); 31190 if (un->un_f_tgt_blocksize_is_valid == FALSE) { 31191 mutex_exit(SD_MUTEX(un)); 31192 rval = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap, 31193 &lbasize, path_flag); 31194 if (rval != 0) 31195 goto done1; 31196 mutex_enter(SD_MUTEX(un)); 31197 sd_update_block_info(un, lbasize, cap); 31198 if ((un->un_f_tgt_blocksize_is_valid == FALSE)) { 31199 mutex_exit(SD_MUTEX(un)); 31200 rval = EIO; 31201 goto done; 31202 } 31203 } 31204 31205 if (NOT_DEVBSIZE(un)) { 31206 /* 31207 * sys_blocksize != tgt_blocksize, need to re-adjust 31208 * blkno and save the index to beginning of dk_label 31209 */ 31210 first_byte = SD_SYSBLOCKS2BYTES(start_block); 31211 real_addr = first_byte / un->un_tgt_blocksize; 31212 31213 end_block = (first_byte + reqlength + 31214 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize; 31215 31216 /* round up buffer size to multiple of target block size */ 31217 buffer_size = (end_block - real_addr) * un->un_tgt_blocksize; 31218 31219 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_tg_rdwr", 31220 "label_addr: 0x%x allocation size: 0x%x\n", 31221 real_addr, buffer_size); 31222 31223 if (((first_byte % un->un_tgt_blocksize) != 0) || 31224 (reqlength % un->un_tgt_blocksize) != 0) 31225 /* the request is not aligned */ 31226 dkl = kmem_zalloc(buffer_size, KM_SLEEP); 31227 } 31228 31229 /* 31230 * The MMC standard allows READ CAPACITY to be 31231 * inaccurate by a bounded amount (in the interest of 31232 * response latency). As a result, failed READs are 31233 * commonplace (due to the reading of metadata and not 31234 * data). Depending on the per-Vendor/drive Sense data, 31235 * the failed READ can cause many (unnecessary) retries. 31236 */ 31237 31238 if (ISCD(un) && (cmd == TG_READ) && 31239 (un->un_f_blockcount_is_valid == TRUE) && 31240 ((start_block == (un->un_blockcount - 1)) || 31241 (start_block == (un->un_blockcount - 2)))) { 31242 path_flag = SD_PATH_DIRECT_PRIORITY; 31243 } 31244 31245 mutex_exit(SD_MUTEX(un)); 31246 if (cmd == TG_READ) { 31247 rval = sd_send_scsi_READ(ssc, (dkl != NULL) ? dkl : bufaddr, 31248 buffer_size, real_addr, path_flag); 31249 if (dkl != NULL) 31250 bcopy(dkl + SD_TGTBYTEOFFSET(un, start_block, 31251 real_addr), bufaddr, reqlength); 31252 } else { 31253 if (dkl) { 31254 rval = sd_send_scsi_READ(ssc, dkl, buffer_size, 31255 real_addr, path_flag); 31256 if (rval) { 31257 goto done1; 31258 } 31259 bcopy(bufaddr, dkl + SD_TGTBYTEOFFSET(un, start_block, 31260 real_addr), reqlength); 31261 } 31262 rval = sd_send_scsi_WRITE(ssc, (dkl != NULL) ? dkl : bufaddr, 31263 buffer_size, real_addr, path_flag); 31264 } 31265 31266 done1: 31267 if (dkl != NULL) 31268 kmem_free(dkl, buffer_size); 31269 31270 if (rval != 0) { 31271 if (rval == EIO) 31272 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 31273 else 31274 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 31275 } 31276 done: 31277 sd_ssc_fini(ssc); 31278 return (rval); 31279 } 31280 31281 31282 static int 31283 sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie) 31284 { 31285 31286 struct sd_lun *un; 31287 diskaddr_t cap; 31288 uint32_t lbasize; 31289 int path_flag = (int)(uintptr_t)tg_cookie; 31290 int ret = 0; 31291 31292 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 31293 if (un == NULL) 31294 return (ENXIO); 31295 31296 switch (cmd) { 31297 case TG_GETPHYGEOM: 31298 case TG_GETVIRTGEOM: 31299 case TG_GETCAPACITY: 31300 case TG_GETBLOCKSIZE: 31301 mutex_enter(SD_MUTEX(un)); 31302 31303 if ((un->un_f_blockcount_is_valid == TRUE) && 31304 (un->un_f_tgt_blocksize_is_valid == TRUE)) { 31305 cap = un->un_blockcount; 31306 lbasize = un->un_tgt_blocksize; 31307 mutex_exit(SD_MUTEX(un)); 31308 } else { 31309 sd_ssc_t *ssc; 31310 mutex_exit(SD_MUTEX(un)); 31311 ssc = sd_ssc_init(un); 31312 ret = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap, 31313 &lbasize, path_flag); 31314 if (ret != 0) { 31315 if (ret == EIO) 31316 sd_ssc_assessment(ssc, 31317 SD_FMT_STATUS_CHECK); 31318 else 31319 sd_ssc_assessment(ssc, 31320 SD_FMT_IGNORE); 31321 sd_ssc_fini(ssc); 31322 return (ret); 31323 } 31324 sd_ssc_fini(ssc); 31325 mutex_enter(SD_MUTEX(un)); 31326 sd_update_block_info(un, lbasize, cap); 31327 if ((un->un_f_blockcount_is_valid == FALSE) || 31328 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 31329 mutex_exit(SD_MUTEX(un)); 31330 return (EIO); 31331 } 31332 mutex_exit(SD_MUTEX(un)); 31333 } 31334 31335 if (cmd == TG_GETCAPACITY) { 31336 *(diskaddr_t *)arg = cap; 31337 return (0); 31338 } 31339 31340 if (cmd == TG_GETBLOCKSIZE) { 31341 *(uint32_t *)arg = lbasize; 31342 return (0); 31343 } 31344 31345 if (cmd == TG_GETPHYGEOM) 31346 ret = sd_get_physical_geometry(un, (cmlb_geom_t *)arg, 31347 cap, lbasize, path_flag); 31348 else 31349 /* TG_GETVIRTGEOM */ 31350 ret = sd_get_virtual_geometry(un, 31351 (cmlb_geom_t *)arg, cap, lbasize); 31352 31353 return (ret); 31354 31355 case TG_GETATTR: 31356 mutex_enter(SD_MUTEX(un)); 31357 ((tg_attribute_t *)arg)->media_is_writable = 31358 un->un_f_mmc_writable_media; 31359 ((tg_attribute_t *)arg)->media_is_solid_state = 31360 un->un_f_is_solid_state; 31361 ((tg_attribute_t *)arg)->media_is_rotational = 31362 un->un_f_is_rotational; 31363 mutex_exit(SD_MUTEX(un)); 31364 return (0); 31365 default: 31366 return (ENOTTY); 31367 31368 } 31369 } 31370 31371 /* 31372 * Function: sd_ssc_ereport_post 31373 * 31374 * Description: Will be called when SD driver need to post an ereport. 31375 * 31376 * Context: Kernel thread or interrupt context. 31377 */ 31378 31379 #define DEVID_IF_KNOWN(d) "devid", DATA_TYPE_STRING, (d) ? (d) : "unknown" 31380 31381 static void 31382 sd_ssc_ereport_post(sd_ssc_t *ssc, enum sd_driver_assessment drv_assess) 31383 { 31384 int uscsi_path_instance = 0; 31385 uchar_t uscsi_pkt_reason; 31386 uint32_t uscsi_pkt_state; 31387 uint32_t uscsi_pkt_statistics; 31388 uint64_t uscsi_ena; 31389 uchar_t op_code; 31390 uint8_t *sensep; 31391 union scsi_cdb *cdbp; 31392 uint_t cdblen = 0; 31393 uint_t senlen = 0; 31394 struct sd_lun *un; 31395 dev_info_t *dip; 31396 char *devid; 31397 int ssc_invalid_flags = SSC_FLAGS_INVALID_PKT_REASON | 31398 SSC_FLAGS_INVALID_STATUS | 31399 SSC_FLAGS_INVALID_SENSE | 31400 SSC_FLAGS_INVALID_DATA; 31401 char assessment[16]; 31402 31403 ASSERT(ssc != NULL); 31404 ASSERT(ssc->ssc_uscsi_cmd != NULL); 31405 ASSERT(ssc->ssc_uscsi_info != NULL); 31406 31407 un = ssc->ssc_un; 31408 ASSERT(un != NULL); 31409 31410 dip = un->un_sd->sd_dev; 31411 31412 /* 31413 * Get the devid: 31414 * devid will only be passed to non-transport error reports. 31415 */ 31416 devid = DEVI(dip)->devi_devid_str; 31417 31418 /* 31419 * If we are syncing or dumping, the command will not be executed 31420 * so we bypass this situation. 31421 */ 31422 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 31423 (un->un_state == SD_STATE_DUMPING)) 31424 return; 31425 31426 uscsi_pkt_reason = ssc->ssc_uscsi_info->ui_pkt_reason; 31427 uscsi_path_instance = ssc->ssc_uscsi_cmd->uscsi_path_instance; 31428 uscsi_pkt_state = ssc->ssc_uscsi_info->ui_pkt_state; 31429 uscsi_pkt_statistics = ssc->ssc_uscsi_info->ui_pkt_statistics; 31430 uscsi_ena = ssc->ssc_uscsi_info->ui_ena; 31431 31432 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 31433 cdbp = (union scsi_cdb *)ssc->ssc_uscsi_cmd->uscsi_cdb; 31434 31435 /* In rare cases, EG:DOORLOCK, the cdb could be NULL */ 31436 if (cdbp == NULL) { 31437 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 31438 "sd_ssc_ereport_post meet empty cdb\n"); 31439 return; 31440 } 31441 31442 op_code = cdbp->scc_cmd; 31443 31444 cdblen = (int)ssc->ssc_uscsi_cmd->uscsi_cdblen; 31445 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 31446 ssc->ssc_uscsi_cmd->uscsi_rqresid); 31447 31448 if (senlen > 0) 31449 ASSERT(sensep != NULL); 31450 31451 /* 31452 * Initialize drv_assess to corresponding values. 31453 * SD_FM_DRV_FATAL will be mapped to "fail" or "fatal" depending 31454 * on the sense-key returned back. 31455 */ 31456 switch (drv_assess) { 31457 case SD_FM_DRV_RECOVERY: 31458 (void) sprintf(assessment, "%s", "recovered"); 31459 break; 31460 case SD_FM_DRV_RETRY: 31461 (void) sprintf(assessment, "%s", "retry"); 31462 break; 31463 case SD_FM_DRV_NOTICE: 31464 (void) sprintf(assessment, "%s", "info"); 31465 break; 31466 case SD_FM_DRV_FATAL: 31467 default: 31468 (void) sprintf(assessment, "%s", "unknown"); 31469 } 31470 /* 31471 * If drv_assess == SD_FM_DRV_RECOVERY, this should be a recovered 31472 * command, we will post ereport.io.scsi.cmd.disk.recovered. 31473 * driver-assessment will always be "recovered" here. 31474 */ 31475 if (drv_assess == SD_FM_DRV_RECOVERY) { 31476 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, NULL, 31477 "cmd.disk.recovered", uscsi_ena, devid, NULL, 31478 DDI_NOSLEEP, NULL, 31479 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 31480 DEVID_IF_KNOWN(devid), 31481 "driver-assessment", DATA_TYPE_STRING, assessment, 31482 "op-code", DATA_TYPE_UINT8, op_code, 31483 "cdb", DATA_TYPE_UINT8_ARRAY, 31484 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 31485 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 31486 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 31487 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics, 31488 NULL); 31489 return; 31490 } 31491 31492 /* 31493 * If there is un-expected/un-decodable data, we should post 31494 * ereport.io.scsi.cmd.disk.dev.uderr. 31495 * driver-assessment will be set based on parameter drv_assess. 31496 * SSC_FLAGS_INVALID_SENSE - invalid sense data sent back. 31497 * SSC_FLAGS_INVALID_PKT_REASON - invalid pkt-reason encountered. 31498 * SSC_FLAGS_INVALID_STATUS - invalid stat-code encountered. 31499 * SSC_FLAGS_INVALID_DATA - invalid data sent back. 31500 */ 31501 if (ssc->ssc_flags & ssc_invalid_flags) { 31502 if (ssc->ssc_flags & SSC_FLAGS_INVALID_SENSE) { 31503 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 31504 NULL, "cmd.disk.dev.uderr", uscsi_ena, devid, 31505 NULL, DDI_NOSLEEP, NULL, 31506 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 31507 DEVID_IF_KNOWN(devid), 31508 "driver-assessment", DATA_TYPE_STRING, 31509 drv_assess == SD_FM_DRV_FATAL ? 31510 "fail" : assessment, 31511 "op-code", DATA_TYPE_UINT8, op_code, 31512 "cdb", DATA_TYPE_UINT8_ARRAY, 31513 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 31514 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 31515 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 31516 "pkt-stats", DATA_TYPE_UINT32, 31517 uscsi_pkt_statistics, 31518 "stat-code", DATA_TYPE_UINT8, 31519 ssc->ssc_uscsi_cmd->uscsi_status, 31520 "un-decode-info", DATA_TYPE_STRING, 31521 ssc->ssc_info, 31522 "un-decode-value", DATA_TYPE_UINT8_ARRAY, 31523 senlen, sensep, 31524 NULL); 31525 } else { 31526 /* 31527 * For other type of invalid data, the 31528 * un-decode-value field would be empty because the 31529 * un-decodable content could be seen from upper 31530 * level payload or inside un-decode-info. 31531 */ 31532 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 31533 NULL, 31534 "cmd.disk.dev.uderr", uscsi_ena, devid, 31535 NULL, DDI_NOSLEEP, NULL, 31536 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 31537 DEVID_IF_KNOWN(devid), 31538 "driver-assessment", DATA_TYPE_STRING, 31539 drv_assess == SD_FM_DRV_FATAL ? 31540 "fail" : assessment, 31541 "op-code", DATA_TYPE_UINT8, op_code, 31542 "cdb", DATA_TYPE_UINT8_ARRAY, 31543 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 31544 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 31545 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 31546 "pkt-stats", DATA_TYPE_UINT32, 31547 uscsi_pkt_statistics, 31548 "stat-code", DATA_TYPE_UINT8, 31549 ssc->ssc_uscsi_cmd->uscsi_status, 31550 "un-decode-info", DATA_TYPE_STRING, 31551 ssc->ssc_info, 31552 "un-decode-value", DATA_TYPE_UINT8_ARRAY, 31553 0, NULL, 31554 NULL); 31555 } 31556 ssc->ssc_flags &= ~ssc_invalid_flags; 31557 return; 31558 } 31559 31560 if (uscsi_pkt_reason != CMD_CMPLT || 31561 (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT)) { 31562 /* 31563 * pkt-reason != CMD_CMPLT or SSC_FLAGS_TRAN_ABORT was 31564 * set inside sd_start_cmds due to errors(bad packet or 31565 * fatal transport error), we should take it as a 31566 * transport error, so we post ereport.io.scsi.cmd.disk.tran. 31567 * driver-assessment will be set based on drv_assess. 31568 * We will set devid to NULL because it is a transport 31569 * error. 31570 */ 31571 if (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT) 31572 ssc->ssc_flags &= ~SSC_FLAGS_TRAN_ABORT; 31573 31574 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, NULL, 31575 "cmd.disk.tran", uscsi_ena, NULL, NULL, DDI_NOSLEEP, NULL, 31576 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 31577 DEVID_IF_KNOWN(devid), 31578 "driver-assessment", DATA_TYPE_STRING, 31579 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment, 31580 "op-code", DATA_TYPE_UINT8, op_code, 31581 "cdb", DATA_TYPE_UINT8_ARRAY, 31582 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 31583 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 31584 "pkt-state", DATA_TYPE_UINT8, uscsi_pkt_state, 31585 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics, 31586 NULL); 31587 } else { 31588 /* 31589 * If we got here, we have a completed command, and we need 31590 * to further investigate the sense data to see what kind 31591 * of ereport we should post. 31592 * No ereport is needed if sense-key is KEY_RECOVERABLE_ERROR 31593 * and asc/ascq is "ATA PASS-THROUGH INFORMATION AVAILABLE". 31594 * Post ereport.io.scsi.cmd.disk.dev.rqs.merr if sense-key is 31595 * KEY_MEDIUM_ERROR. 31596 * Post ereport.io.scsi.cmd.disk.dev.rqs.derr otherwise. 31597 * driver-assessment will be set based on the parameter 31598 * drv_assess. 31599 */ 31600 if (senlen > 0) { 31601 /* 31602 * Here we have sense data available. 31603 */ 31604 uint8_t sense_key = scsi_sense_key(sensep); 31605 uint8_t sense_asc = scsi_sense_asc(sensep); 31606 uint8_t sense_ascq = scsi_sense_ascq(sensep); 31607 31608 if (sense_key == KEY_RECOVERABLE_ERROR && 31609 sense_asc == 0x00 && sense_ascq == 0x1d) 31610 return; 31611 31612 if (sense_key == KEY_MEDIUM_ERROR) { 31613 /* 31614 * driver-assessment should be "fatal" if 31615 * drv_assess is SD_FM_DRV_FATAL. 31616 */ 31617 scsi_fm_ereport_post(un->un_sd, 31618 uscsi_path_instance, NULL, 31619 "cmd.disk.dev.rqs.merr", 31620 uscsi_ena, devid, NULL, DDI_NOSLEEP, NULL, 31621 FM_VERSION, DATA_TYPE_UINT8, 31622 FM_EREPORT_VERS0, 31623 DEVID_IF_KNOWN(devid), 31624 "driver-assessment", 31625 DATA_TYPE_STRING, 31626 drv_assess == SD_FM_DRV_FATAL ? 31627 "fatal" : assessment, 31628 "op-code", 31629 DATA_TYPE_UINT8, op_code, 31630 "cdb", 31631 DATA_TYPE_UINT8_ARRAY, cdblen, 31632 ssc->ssc_uscsi_cmd->uscsi_cdb, 31633 "pkt-reason", 31634 DATA_TYPE_UINT8, uscsi_pkt_reason, 31635 "pkt-state", 31636 DATA_TYPE_UINT8, uscsi_pkt_state, 31637 "pkt-stats", 31638 DATA_TYPE_UINT32, 31639 uscsi_pkt_statistics, 31640 "stat-code", 31641 DATA_TYPE_UINT8, 31642 ssc->ssc_uscsi_cmd->uscsi_status, 31643 "key", 31644 DATA_TYPE_UINT8, 31645 scsi_sense_key(sensep), 31646 "asc", 31647 DATA_TYPE_UINT8, 31648 scsi_sense_asc(sensep), 31649 "ascq", 31650 DATA_TYPE_UINT8, 31651 scsi_sense_ascq(sensep), 31652 "sense-data", 31653 DATA_TYPE_UINT8_ARRAY, 31654 senlen, sensep, 31655 "lba", 31656 DATA_TYPE_UINT64, 31657 ssc->ssc_uscsi_info->ui_lba, 31658 NULL); 31659 } else { 31660 /* 31661 * if sense-key == 0x4(hardware 31662 * error), driver-assessment should 31663 * be "fatal" if drv_assess is 31664 * SD_FM_DRV_FATAL. 31665 */ 31666 scsi_fm_ereport_post(un->un_sd, 31667 uscsi_path_instance, NULL, 31668 "cmd.disk.dev.rqs.derr", 31669 uscsi_ena, devid, 31670 NULL, DDI_NOSLEEP, NULL, 31671 FM_VERSION, 31672 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 31673 DEVID_IF_KNOWN(devid), 31674 "driver-assessment", 31675 DATA_TYPE_STRING, 31676 drv_assess == SD_FM_DRV_FATAL ? 31677 (sense_key == 0x4 ? 31678 "fatal" : "fail") : assessment, 31679 "op-code", 31680 DATA_TYPE_UINT8, op_code, 31681 "cdb", 31682 DATA_TYPE_UINT8_ARRAY, cdblen, 31683 ssc->ssc_uscsi_cmd->uscsi_cdb, 31684 "pkt-reason", 31685 DATA_TYPE_UINT8, uscsi_pkt_reason, 31686 "pkt-state", 31687 DATA_TYPE_UINT8, uscsi_pkt_state, 31688 "pkt-stats", 31689 DATA_TYPE_UINT32, 31690 uscsi_pkt_statistics, 31691 "stat-code", 31692 DATA_TYPE_UINT8, 31693 ssc->ssc_uscsi_cmd->uscsi_status, 31694 "key", 31695 DATA_TYPE_UINT8, 31696 scsi_sense_key(sensep), 31697 "asc", 31698 DATA_TYPE_UINT8, 31699 scsi_sense_asc(sensep), 31700 "ascq", 31701 DATA_TYPE_UINT8, 31702 scsi_sense_ascq(sensep), 31703 "sense-data", 31704 DATA_TYPE_UINT8_ARRAY, 31705 senlen, sensep, 31706 NULL); 31707 } 31708 } else { 31709 /* 31710 * For stat_code == STATUS_GOOD, this is not a 31711 * hardware error. 31712 */ 31713 if (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD) 31714 return; 31715 31716 /* 31717 * Post ereport.io.scsi.cmd.disk.dev.serr if we got the 31718 * stat-code but with sense data unavailable. 31719 * driver-assessment will be set based on parameter 31720 * drv_assess. 31721 */ 31722 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 31723 NULL, 31724 "cmd.disk.dev.serr", uscsi_ena, 31725 devid, NULL, DDI_NOSLEEP, NULL, 31726 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 31727 DEVID_IF_KNOWN(devid), 31728 "driver-assessment", DATA_TYPE_STRING, 31729 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment, 31730 "op-code", DATA_TYPE_UINT8, op_code, 31731 "cdb", 31732 DATA_TYPE_UINT8_ARRAY, 31733 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 31734 "pkt-reason", 31735 DATA_TYPE_UINT8, uscsi_pkt_reason, 31736 "pkt-state", 31737 DATA_TYPE_UINT8, uscsi_pkt_state, 31738 "pkt-stats", 31739 DATA_TYPE_UINT32, uscsi_pkt_statistics, 31740 "stat-code", 31741 DATA_TYPE_UINT8, 31742 ssc->ssc_uscsi_cmd->uscsi_status, 31743 NULL); 31744 } 31745 } 31746 } 31747 31748 /* 31749 * Function: sd_ssc_extract_info 31750 * 31751 * Description: Extract information available to help generate ereport. 31752 * 31753 * Context: Kernel thread or interrupt context. 31754 */ 31755 static void 31756 sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un, struct scsi_pkt *pktp, 31757 struct buf *bp, struct sd_xbuf *xp) 31758 { 31759 size_t senlen = 0; 31760 union scsi_cdb *cdbp; 31761 int path_instance; 31762 /* 31763 * Need scsi_cdb_size array to determine the cdb length. 31764 */ 31765 extern uchar_t scsi_cdb_size[]; 31766 31767 ASSERT(un != NULL); 31768 ASSERT(pktp != NULL); 31769 ASSERT(bp != NULL); 31770 ASSERT(xp != NULL); 31771 ASSERT(ssc != NULL); 31772 ASSERT(mutex_owned(SD_MUTEX(un))); 31773 31774 /* 31775 * Transfer the cdb buffer pointer here. 31776 */ 31777 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 31778 31779 ssc->ssc_uscsi_cmd->uscsi_cdblen = scsi_cdb_size[GETGROUP(cdbp)]; 31780 ssc->ssc_uscsi_cmd->uscsi_cdb = (caddr_t)cdbp; 31781 31782 /* 31783 * Transfer the sense data buffer pointer if sense data is available, 31784 * calculate the sense data length first. 31785 */ 31786 if ((xp->xb_sense_state & STATE_XARQ_DONE) || 31787 (xp->xb_sense_state & STATE_ARQ_DONE)) { 31788 /* 31789 * For arq case, we will enter here. 31790 */ 31791 if (xp->xb_sense_state & STATE_XARQ_DONE) { 31792 senlen = MAX_SENSE_LENGTH - xp->xb_sense_resid; 31793 } else { 31794 senlen = SENSE_LENGTH; 31795 } 31796 } else { 31797 /* 31798 * For non-arq case, we will enter this branch. 31799 */ 31800 if (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK && 31801 (xp->xb_sense_state & STATE_XFERRED_DATA)) { 31802 senlen = SENSE_LENGTH - xp->xb_sense_resid; 31803 } 31804 31805 } 31806 31807 ssc->ssc_uscsi_cmd->uscsi_rqlen = (senlen & 0xff); 31808 ssc->ssc_uscsi_cmd->uscsi_rqresid = 0; 31809 ssc->ssc_uscsi_cmd->uscsi_rqbuf = (caddr_t)xp->xb_sense_data; 31810 31811 ssc->ssc_uscsi_cmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 31812 31813 /* 31814 * Only transfer path_instance when scsi_pkt was properly allocated. 31815 */ 31816 path_instance = pktp->pkt_path_instance; 31817 if (scsi_pkt_allocated_correctly(pktp) && path_instance) 31818 ssc->ssc_uscsi_cmd->uscsi_path_instance = path_instance; 31819 else 31820 ssc->ssc_uscsi_cmd->uscsi_path_instance = 0; 31821 31822 /* 31823 * Copy in the other fields we may need when posting ereport. 31824 */ 31825 ssc->ssc_uscsi_info->ui_pkt_reason = pktp->pkt_reason; 31826 ssc->ssc_uscsi_info->ui_pkt_state = pktp->pkt_state; 31827 ssc->ssc_uscsi_info->ui_pkt_statistics = pktp->pkt_statistics; 31828 ssc->ssc_uscsi_info->ui_lba = (uint64_t)SD_GET_BLKNO(bp); 31829 31830 /* 31831 * For partially read/write command, we will not create ena 31832 * in case of a successful command be reconized as recovered. 31833 */ 31834 if ((pktp->pkt_reason == CMD_CMPLT) && 31835 (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD) && 31836 (senlen == 0)) { 31837 return; 31838 } 31839 31840 /* 31841 * To associate ereports of a single command execution flow, we 31842 * need a shared ena for a specific command. 31843 */ 31844 if (xp->xb_ena == 0) 31845 xp->xb_ena = fm_ena_generate(0, FM_ENA_FMT1); 31846 ssc->ssc_uscsi_info->ui_ena = xp->xb_ena; 31847 } 31848 31849 31850 /* 31851 * Function: sd_check_bdc_vpd 31852 * 31853 * Description: Query the optional INQUIRY VPD page 0xb1. If the device 31854 * supports VPD page 0xb1, sd examines the MEDIUM ROTATION 31855 * RATE. 31856 * 31857 * Set the following based on RPM value: 31858 * = 0 device is not solid state, non-rotational 31859 * = 1 device is solid state, non-rotational 31860 * > 1 device is not solid state, rotational 31861 * 31862 * Context: Kernel thread or interrupt context. 31863 */ 31864 31865 static void 31866 sd_check_bdc_vpd(sd_ssc_t *ssc) 31867 { 31868 int rval = 0; 31869 uchar_t *inqb1 = NULL; 31870 size_t inqb1_len = MAX_INQUIRY_SIZE; 31871 size_t inqb1_resid = 0; 31872 struct sd_lun *un; 31873 31874 ASSERT(ssc != NULL); 31875 un = ssc->ssc_un; 31876 ASSERT(un != NULL); 31877 ASSERT(!mutex_owned(SD_MUTEX(un))); 31878 31879 mutex_enter(SD_MUTEX(un)); 31880 un->un_f_is_rotational = TRUE; 31881 un->un_f_is_solid_state = FALSE; 31882 31883 if (ISCD(un)) { 31884 mutex_exit(SD_MUTEX(un)); 31885 return; 31886 } 31887 31888 if (sd_check_vpd_page_support(ssc) == 0 && 31889 un->un_vpd_page_mask & SD_VPD_DEV_CHARACTER_PG) { 31890 mutex_exit(SD_MUTEX(un)); 31891 /* collect page b1 data */ 31892 inqb1 = kmem_zalloc(inqb1_len, KM_SLEEP); 31893 31894 rval = sd_send_scsi_INQUIRY(ssc, inqb1, inqb1_len, 31895 0x01, 0xB1, &inqb1_resid); 31896 31897 if (rval == 0 && (inqb1_len - inqb1_resid > 5)) { 31898 SD_TRACE(SD_LOG_COMMON, un, 31899 "sd_check_bdc_vpd: \ 31900 successfully get VPD page: %x \ 31901 PAGE LENGTH: %x BYTE 4: %x \ 31902 BYTE 5: %x", inqb1[1], inqb1[3], inqb1[4], 31903 inqb1[5]); 31904 31905 mutex_enter(SD_MUTEX(un)); 31906 /* 31907 * Check the MEDIUM ROTATION RATE. 31908 */ 31909 if (inqb1[4] == 0) { 31910 if (inqb1[5] == 0) { 31911 un->un_f_is_rotational = FALSE; 31912 } else if (inqb1[5] == 1) { 31913 un->un_f_is_rotational = FALSE; 31914 un->un_f_is_solid_state = TRUE; 31915 /* 31916 * Solid state drives don't need 31917 * disksort. 31918 */ 31919 un->un_f_disksort_disabled = TRUE; 31920 } 31921 } 31922 mutex_exit(SD_MUTEX(un)); 31923 } else if (rval != 0) { 31924 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 31925 } 31926 31927 kmem_free(inqb1, inqb1_len); 31928 } else { 31929 mutex_exit(SD_MUTEX(un)); 31930 } 31931 } 31932 31933 /* 31934 * Function: sd_check_emulation_mode 31935 * 31936 * Description: Check whether the SSD is at emulation mode 31937 * by issuing READ_CAPACITY_16 to see whether 31938 * we can get physical block size of the drive. 31939 * 31940 * Context: Kernel thread or interrupt context. 31941 */ 31942 31943 static void 31944 sd_check_emulation_mode(sd_ssc_t *ssc) 31945 { 31946 int rval = 0; 31947 uint64_t capacity; 31948 uint_t lbasize; 31949 uint_t pbsize; 31950 int i; 31951 int devid_len; 31952 struct sd_lun *un; 31953 31954 ASSERT(ssc != NULL); 31955 un = ssc->ssc_un; 31956 ASSERT(un != NULL); 31957 ASSERT(!mutex_owned(SD_MUTEX(un))); 31958 31959 mutex_enter(SD_MUTEX(un)); 31960 if (ISCD(un)) { 31961 mutex_exit(SD_MUTEX(un)); 31962 return; 31963 } 31964 31965 if (un->un_f_descr_format_supported) { 31966 mutex_exit(SD_MUTEX(un)); 31967 rval = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, &lbasize, 31968 &pbsize, SD_PATH_DIRECT); 31969 mutex_enter(SD_MUTEX(un)); 31970 31971 if (rval != 0) { 31972 un->un_phy_blocksize = DEV_BSIZE; 31973 } else { 31974 if (!ISP2(pbsize % DEV_BSIZE) || pbsize == 0) { 31975 un->un_phy_blocksize = DEV_BSIZE; 31976 } else if (pbsize > un->un_phy_blocksize) { 31977 /* 31978 * Don't reset the physical blocksize 31979 * unless we've detected a larger value. 31980 */ 31981 un->un_phy_blocksize = pbsize; 31982 } 31983 } 31984 } 31985 31986 for (i = 0; i < sd_flash_dev_table_size; i++) { 31987 devid_len = (int)strlen(sd_flash_dev_table[i]); 31988 if (sd_sdconf_id_match(un, sd_flash_dev_table[i], devid_len) 31989 == SD_SUCCESS) { 31990 un->un_phy_blocksize = SSD_SECSIZE; 31991 if (un->un_f_is_solid_state && 31992 un->un_phy_blocksize != un->un_tgt_blocksize) 31993 un->un_f_enable_rmw = TRUE; 31994 } 31995 } 31996 31997 mutex_exit(SD_MUTEX(un)); 31998 } 31999