1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * SCSI disk target driver. 29 */ 30 #include <sys/scsi/scsi.h> 31 #include <sys/dkbad.h> 32 #include <sys/dklabel.h> 33 #include <sys/dkio.h> 34 #include <sys/fdio.h> 35 #include <sys/cdio.h> 36 #include <sys/mhd.h> 37 #include <sys/vtoc.h> 38 #include <sys/dktp/fdisk.h> 39 #include <sys/kstat.h> 40 #include <sys/vtrace.h> 41 #include <sys/note.h> 42 #include <sys/thread.h> 43 #include <sys/proc.h> 44 #include <sys/efi_partition.h> 45 #include <sys/var.h> 46 #include <sys/aio_req.h> 47 48 #ifdef __lock_lint 49 #define _LP64 50 #define __amd64 51 #endif 52 53 #if (defined(__fibre)) 54 /* Note: is there a leadville version of the following? */ 55 #include <sys/fc4/fcal_linkapp.h> 56 #endif 57 #include <sys/taskq.h> 58 #include <sys/uuid.h> 59 #include <sys/byteorder.h> 60 #include <sys/sdt.h> 61 62 #include "sd_xbuf.h" 63 64 #include <sys/scsi/targets/sddef.h> 65 #include <sys/cmlb.h> 66 #include <sys/sysevent/eventdefs.h> 67 #include <sys/sysevent/dev.h> 68 69 #include <sys/fm/protocol.h> 70 71 /* 72 * Loadable module info. 73 */ 74 #if (defined(__fibre)) 75 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver" 76 char _depends_on[] = "misc/scsi misc/cmlb drv/fcp"; 77 #else 78 #define SD_MODULE_NAME "SCSI Disk Driver" 79 char _depends_on[] = "misc/scsi misc/cmlb"; 80 #endif 81 82 /* 83 * Define the interconnect type, to allow the driver to distinguish 84 * between parallel SCSI (sd) and fibre channel (ssd) behaviors. 85 * 86 * This is really for backward compatibility. In the future, the driver 87 * should actually check the "interconnect-type" property as reported by 88 * the HBA; however at present this property is not defined by all HBAs, 89 * so we will use this #define (1) to permit the driver to run in 90 * backward-compatibility mode; and (2) to print a notification message 91 * if an FC HBA does not support the "interconnect-type" property. The 92 * behavior of the driver will be to assume parallel SCSI behaviors unless 93 * the "interconnect-type" property is defined by the HBA **AND** has a 94 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or 95 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre 96 * Channel behaviors (as per the old ssd). (Note that the 97 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and 98 * will result in the driver assuming parallel SCSI behaviors.) 99 * 100 * (see common/sys/scsi/impl/services.h) 101 * 102 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default 103 * since some FC HBAs may already support that, and there is some code in 104 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the 105 * default would confuse that code, and besides things should work fine 106 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the 107 * "interconnect_type" property. 108 * 109 */ 110 #if (defined(__fibre)) 111 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE 112 #else 113 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL 114 #endif 115 116 /* 117 * The name of the driver, established from the module name in _init. 118 */ 119 static char *sd_label = NULL; 120 121 /* 122 * Driver name is unfortunately prefixed on some driver.conf properties. 123 */ 124 #if (defined(__fibre)) 125 #define sd_max_xfer_size ssd_max_xfer_size 126 #define sd_config_list ssd_config_list 127 static char *sd_max_xfer_size = "ssd_max_xfer_size"; 128 static char *sd_config_list = "ssd-config-list"; 129 #else 130 static char *sd_max_xfer_size = "sd_max_xfer_size"; 131 static char *sd_config_list = "sd-config-list"; 132 #endif 133 134 /* 135 * Driver global variables 136 */ 137 138 #if (defined(__fibre)) 139 /* 140 * These #defines are to avoid namespace collisions that occur because this 141 * code is currently used to compile two separate driver modules: sd and ssd. 142 * All global variables need to be treated this way (even if declared static) 143 * in order to allow the debugger to resolve the names properly. 144 * It is anticipated that in the near future the ssd module will be obsoleted, 145 * at which time this namespace issue should go away. 146 */ 147 #define sd_state ssd_state 148 #define sd_io_time ssd_io_time 149 #define sd_failfast_enable ssd_failfast_enable 150 #define sd_ua_retry_count ssd_ua_retry_count 151 #define sd_report_pfa ssd_report_pfa 152 #define sd_max_throttle ssd_max_throttle 153 #define sd_min_throttle ssd_min_throttle 154 #define sd_rot_delay ssd_rot_delay 155 156 #define sd_retry_on_reservation_conflict \ 157 ssd_retry_on_reservation_conflict 158 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay 159 #define sd_resv_conflict_name ssd_resv_conflict_name 160 161 #define sd_component_mask ssd_component_mask 162 #define sd_level_mask ssd_level_mask 163 #define sd_debug_un ssd_debug_un 164 #define sd_error_level ssd_error_level 165 166 #define sd_xbuf_active_limit ssd_xbuf_active_limit 167 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit 168 169 #define sd_tr ssd_tr 170 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout 171 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout 172 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable 173 #define sd_check_media_time ssd_check_media_time 174 #define sd_wait_cmds_complete ssd_wait_cmds_complete 175 #define sd_label_mutex ssd_label_mutex 176 #define sd_detach_mutex ssd_detach_mutex 177 #define sd_log_buf ssd_log_buf 178 #define sd_log_mutex ssd_log_mutex 179 180 #define sd_disk_table ssd_disk_table 181 #define sd_disk_table_size ssd_disk_table_size 182 #define sd_sense_mutex ssd_sense_mutex 183 #define sd_cdbtab ssd_cdbtab 184 185 #define sd_cb_ops ssd_cb_ops 186 #define sd_ops ssd_ops 187 #define sd_additional_codes ssd_additional_codes 188 #define sd_tgops ssd_tgops 189 190 #define sd_minor_data ssd_minor_data 191 #define sd_minor_data_efi ssd_minor_data_efi 192 193 #define sd_tq ssd_tq 194 #define sd_wmr_tq ssd_wmr_tq 195 #define sd_taskq_name ssd_taskq_name 196 #define sd_wmr_taskq_name ssd_wmr_taskq_name 197 #define sd_taskq_minalloc ssd_taskq_minalloc 198 #define sd_taskq_maxalloc ssd_taskq_maxalloc 199 200 #define sd_dump_format_string ssd_dump_format_string 201 202 #define sd_iostart_chain ssd_iostart_chain 203 #define sd_iodone_chain ssd_iodone_chain 204 205 #define sd_pm_idletime ssd_pm_idletime 206 207 #define sd_force_pm_supported ssd_force_pm_supported 208 209 #define sd_dtype_optical_bind ssd_dtype_optical_bind 210 211 #define sd_ssc_init ssd_ssc_init 212 #define sd_ssc_send ssd_ssc_send 213 #define sd_ssc_fini ssd_ssc_fini 214 #define sd_ssc_assessment ssd_ssc_assessment 215 #define sd_ssc_post ssd_ssc_post 216 #define sd_ssc_print ssd_ssc_print 217 #define sd_ssc_ereport_post ssd_ssc_ereport_post 218 #define sd_ssc_set_info ssd_ssc_set_info 219 #define sd_ssc_extract_info ssd_ssc_extract_info 220 221 #endif 222 223 #ifdef SDDEBUG 224 int sd_force_pm_supported = 0; 225 #endif /* SDDEBUG */ 226 227 void *sd_state = NULL; 228 int sd_io_time = SD_IO_TIME; 229 int sd_failfast_enable = 1; 230 int sd_ua_retry_count = SD_UA_RETRY_COUNT; 231 int sd_report_pfa = 1; 232 int sd_max_throttle = SD_MAX_THROTTLE; 233 int sd_min_throttle = SD_MIN_THROTTLE; 234 int sd_rot_delay = 4; /* Default 4ms Rotation delay */ 235 int sd_qfull_throttle_enable = TRUE; 236 237 int sd_retry_on_reservation_conflict = 1; 238 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 239 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay)) 240 241 static int sd_dtype_optical_bind = -1; 242 243 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */ 244 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict"; 245 246 /* 247 * Global data for debug logging. To enable debug printing, sd_component_mask 248 * and sd_level_mask should be set to the desired bit patterns as outlined in 249 * sddef.h. 250 */ 251 uint_t sd_component_mask = 0x0; 252 uint_t sd_level_mask = 0x0; 253 struct sd_lun *sd_debug_un = NULL; 254 uint_t sd_error_level = SCSI_ERR_RETRYABLE; 255 256 /* Note: these may go away in the future... */ 257 static uint32_t sd_xbuf_active_limit = 512; 258 static uint32_t sd_xbuf_reserve_limit = 16; 259 260 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 }; 261 262 /* 263 * Timer value used to reset the throttle after it has been reduced 264 * (typically in response to TRAN_BUSY or STATUS_QFULL) 265 */ 266 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT; 267 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT; 268 269 /* 270 * Interval value associated with the media change scsi watch. 271 */ 272 static int sd_check_media_time = 3000000; 273 274 /* 275 * Wait value used for in progress operations during a DDI_SUSPEND 276 */ 277 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE; 278 279 /* 280 * sd_label_mutex protects a static buffer used in the disk label 281 * component of the driver 282 */ 283 static kmutex_t sd_label_mutex; 284 285 /* 286 * sd_detach_mutex protects un_layer_count, un_detach_count, and 287 * un_opens_in_progress in the sd_lun structure. 288 */ 289 static kmutex_t sd_detach_mutex; 290 291 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex, 292 sd_lun::{un_layer_count un_detach_count un_opens_in_progress})) 293 294 /* 295 * Global buffer and mutex for debug logging 296 */ 297 static char sd_log_buf[1024]; 298 static kmutex_t sd_log_mutex; 299 300 /* 301 * Structs and globals for recording attached lun information. 302 * This maintains a chain. Each node in the chain represents a SCSI controller. 303 * The structure records the number of luns attached to each target connected 304 * with the controller. 305 * For parallel scsi device only. 306 */ 307 struct sd_scsi_hba_tgt_lun { 308 struct sd_scsi_hba_tgt_lun *next; 309 dev_info_t *pdip; 310 int nlun[NTARGETS_WIDE]; 311 }; 312 313 /* 314 * Flag to indicate the lun is attached or detached 315 */ 316 #define SD_SCSI_LUN_ATTACH 0 317 #define SD_SCSI_LUN_DETACH 1 318 319 static kmutex_t sd_scsi_target_lun_mutex; 320 static struct sd_scsi_hba_tgt_lun *sd_scsi_target_lun_head = NULL; 321 322 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 323 sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip)) 324 325 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex, 326 sd_scsi_target_lun_head)) 327 328 /* 329 * "Smart" Probe Caching structs, globals, #defines, etc. 330 * For parallel scsi and non-self-identify device only. 331 */ 332 333 /* 334 * The following resources and routines are implemented to support 335 * "smart" probing, which caches the scsi_probe() results in an array, 336 * in order to help avoid long probe times. 337 */ 338 struct sd_scsi_probe_cache { 339 struct sd_scsi_probe_cache *next; 340 dev_info_t *pdip; 341 int cache[NTARGETS_WIDE]; 342 }; 343 344 static kmutex_t sd_scsi_probe_cache_mutex; 345 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; 346 347 /* 348 * Really we only need protection on the head of the linked list, but 349 * better safe than sorry. 350 */ 351 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 352 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip)) 353 354 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 355 sd_scsi_probe_cache_head)) 356 357 358 /* 359 * Vendor specific data name property declarations 360 */ 361 362 #if defined(__fibre) || defined(__i386) ||defined(__amd64) 363 364 static sd_tunables seagate_properties = { 365 SEAGATE_THROTTLE_VALUE, 366 0, 367 0, 368 0, 369 0, 370 0, 371 0, 372 0, 373 0 374 }; 375 376 377 static sd_tunables fujitsu_properties = { 378 FUJITSU_THROTTLE_VALUE, 379 0, 380 0, 381 0, 382 0, 383 0, 384 0, 385 0, 386 0 387 }; 388 389 static sd_tunables ibm_properties = { 390 IBM_THROTTLE_VALUE, 391 0, 392 0, 393 0, 394 0, 395 0, 396 0, 397 0, 398 0 399 }; 400 401 static sd_tunables purple_properties = { 402 PURPLE_THROTTLE_VALUE, 403 0, 404 0, 405 PURPLE_BUSY_RETRIES, 406 PURPLE_RESET_RETRY_COUNT, 407 PURPLE_RESERVE_RELEASE_TIME, 408 0, 409 0, 410 0 411 }; 412 413 static sd_tunables sve_properties = { 414 SVE_THROTTLE_VALUE, 415 0, 416 0, 417 SVE_BUSY_RETRIES, 418 SVE_RESET_RETRY_COUNT, 419 SVE_RESERVE_RELEASE_TIME, 420 SVE_MIN_THROTTLE_VALUE, 421 SVE_DISKSORT_DISABLED_FLAG, 422 0 423 }; 424 425 static sd_tunables maserati_properties = { 426 0, 427 0, 428 0, 429 0, 430 0, 431 0, 432 0, 433 MASERATI_DISKSORT_DISABLED_FLAG, 434 MASERATI_LUN_RESET_ENABLED_FLAG 435 }; 436 437 static sd_tunables pirus_properties = { 438 PIRUS_THROTTLE_VALUE, 439 0, 440 PIRUS_NRR_COUNT, 441 PIRUS_BUSY_RETRIES, 442 PIRUS_RESET_RETRY_COUNT, 443 0, 444 PIRUS_MIN_THROTTLE_VALUE, 445 PIRUS_DISKSORT_DISABLED_FLAG, 446 PIRUS_LUN_RESET_ENABLED_FLAG 447 }; 448 449 #endif 450 451 #if (defined(__sparc) && !defined(__fibre)) || \ 452 (defined(__i386) || defined(__amd64)) 453 454 455 static sd_tunables elite_properties = { 456 ELITE_THROTTLE_VALUE, 457 0, 458 0, 459 0, 460 0, 461 0, 462 0, 463 0, 464 0 465 }; 466 467 static sd_tunables st31200n_properties = { 468 ST31200N_THROTTLE_VALUE, 469 0, 470 0, 471 0, 472 0, 473 0, 474 0, 475 0, 476 0 477 }; 478 479 #endif /* Fibre or not */ 480 481 static sd_tunables lsi_properties_scsi = { 482 LSI_THROTTLE_VALUE, 483 0, 484 LSI_NOTREADY_RETRIES, 485 0, 486 0, 487 0, 488 0, 489 0, 490 0 491 }; 492 493 static sd_tunables symbios_properties = { 494 SYMBIOS_THROTTLE_VALUE, 495 0, 496 SYMBIOS_NOTREADY_RETRIES, 497 0, 498 0, 499 0, 500 0, 501 0, 502 0 503 }; 504 505 static sd_tunables lsi_properties = { 506 0, 507 0, 508 LSI_NOTREADY_RETRIES, 509 0, 510 0, 511 0, 512 0, 513 0, 514 0 515 }; 516 517 static sd_tunables lsi_oem_properties = { 518 0, 519 0, 520 LSI_OEM_NOTREADY_RETRIES, 521 0, 522 0, 523 0, 524 0, 525 0, 526 0, 527 1 528 }; 529 530 531 532 #if (defined(SD_PROP_TST)) 533 534 #define SD_TST_CTYPE_VAL CTYPE_CDROM 535 #define SD_TST_THROTTLE_VAL 16 536 #define SD_TST_NOTREADY_VAL 12 537 #define SD_TST_BUSY_VAL 60 538 #define SD_TST_RST_RETRY_VAL 36 539 #define SD_TST_RSV_REL_TIME 60 540 541 static sd_tunables tst_properties = { 542 SD_TST_THROTTLE_VAL, 543 SD_TST_CTYPE_VAL, 544 SD_TST_NOTREADY_VAL, 545 SD_TST_BUSY_VAL, 546 SD_TST_RST_RETRY_VAL, 547 SD_TST_RSV_REL_TIME, 548 0, 549 0, 550 0 551 }; 552 #endif 553 554 /* This is similar to the ANSI toupper implementation */ 555 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C)) 556 557 /* 558 * Static Driver Configuration Table 559 * 560 * This is the table of disks which need throttle adjustment (or, perhaps 561 * something else as defined by the flags at a future time.) device_id 562 * is a string consisting of concatenated vid (vendor), pid (product/model) 563 * and revision strings as defined in the scsi_inquiry structure. Offsets of 564 * the parts of the string are as defined by the sizes in the scsi_inquiry 565 * structure. Device type is searched as far as the device_id string is 566 * defined. Flags defines which values are to be set in the driver from the 567 * properties list. 568 * 569 * Entries below which begin and end with a "*" are a special case. 570 * These do not have a specific vendor, and the string which follows 571 * can appear anywhere in the 16 byte PID portion of the inquiry data. 572 * 573 * Entries below which begin and end with a " " (blank) are a special 574 * case. The comparison function will treat multiple consecutive blanks 575 * as equivalent to a single blank. For example, this causes a 576 * sd_disk_table entry of " NEC CDROM " to match a device's id string 577 * of "NEC CDROM". 578 * 579 * Note: The MD21 controller type has been obsoleted. 580 * ST318202F is a Legacy device 581 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been 582 * made with an FC connection. The entries here are a legacy. 583 */ 584 static sd_disk_config_t sd_disk_table[] = { 585 #if defined(__fibre) || defined(__i386) || defined(__amd64) 586 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 587 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 588 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 589 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 590 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 591 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 592 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 593 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 594 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 595 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 596 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 597 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 598 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 599 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 600 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 601 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 602 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 603 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 604 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 605 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 606 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 607 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 608 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 609 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties }, 610 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties }, 611 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties }, 612 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties }, 613 { "IBM 1724-100", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 614 { "IBM 1726-2xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 615 { "IBM 1726-22x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 616 { "IBM 1726-4xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 617 { "IBM 1726-42x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 618 { "IBM 1726-3xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 619 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 620 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 621 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 622 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 623 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 624 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 625 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 626 { "IBM 1814", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 627 { "IBM 1814-200", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 628 { "IBM 1818", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 629 { "DELL MD3000", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 630 { "DELL MD3000i", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 631 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 632 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 633 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 634 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 635 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT | 636 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 637 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT | 638 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties }, 639 { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties }, 640 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, 641 { "SUN T3", SD_CONF_BSET_THROTTLE | 642 SD_CONF_BSET_BSY_RETRY_COUNT| 643 SD_CONF_BSET_RST_RETRIES| 644 SD_CONF_BSET_RSV_REL_TIME, 645 &purple_properties }, 646 { "SUN SESS01", SD_CONF_BSET_THROTTLE | 647 SD_CONF_BSET_BSY_RETRY_COUNT| 648 SD_CONF_BSET_RST_RETRIES| 649 SD_CONF_BSET_RSV_REL_TIME| 650 SD_CONF_BSET_MIN_THROTTLE| 651 SD_CONF_BSET_DISKSORT_DISABLED, 652 &sve_properties }, 653 { "SUN T4", SD_CONF_BSET_THROTTLE | 654 SD_CONF_BSET_BSY_RETRY_COUNT| 655 SD_CONF_BSET_RST_RETRIES| 656 SD_CONF_BSET_RSV_REL_TIME, 657 &purple_properties }, 658 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED | 659 SD_CONF_BSET_LUN_RESET_ENABLED, 660 &maserati_properties }, 661 { "SUN SE6920", SD_CONF_BSET_THROTTLE | 662 SD_CONF_BSET_NRR_COUNT| 663 SD_CONF_BSET_BSY_RETRY_COUNT| 664 SD_CONF_BSET_RST_RETRIES| 665 SD_CONF_BSET_MIN_THROTTLE| 666 SD_CONF_BSET_DISKSORT_DISABLED| 667 SD_CONF_BSET_LUN_RESET_ENABLED, 668 &pirus_properties }, 669 { "SUN SE6940", SD_CONF_BSET_THROTTLE | 670 SD_CONF_BSET_NRR_COUNT| 671 SD_CONF_BSET_BSY_RETRY_COUNT| 672 SD_CONF_BSET_RST_RETRIES| 673 SD_CONF_BSET_MIN_THROTTLE| 674 SD_CONF_BSET_DISKSORT_DISABLED| 675 SD_CONF_BSET_LUN_RESET_ENABLED, 676 &pirus_properties }, 677 { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE | 678 SD_CONF_BSET_NRR_COUNT| 679 SD_CONF_BSET_BSY_RETRY_COUNT| 680 SD_CONF_BSET_RST_RETRIES| 681 SD_CONF_BSET_MIN_THROTTLE| 682 SD_CONF_BSET_DISKSORT_DISABLED| 683 SD_CONF_BSET_LUN_RESET_ENABLED, 684 &pirus_properties }, 685 { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE | 686 SD_CONF_BSET_NRR_COUNT| 687 SD_CONF_BSET_BSY_RETRY_COUNT| 688 SD_CONF_BSET_RST_RETRIES| 689 SD_CONF_BSET_MIN_THROTTLE| 690 SD_CONF_BSET_DISKSORT_DISABLED| 691 SD_CONF_BSET_LUN_RESET_ENABLED, 692 &pirus_properties }, 693 { "SUN PSX1000", SD_CONF_BSET_THROTTLE | 694 SD_CONF_BSET_NRR_COUNT| 695 SD_CONF_BSET_BSY_RETRY_COUNT| 696 SD_CONF_BSET_RST_RETRIES| 697 SD_CONF_BSET_MIN_THROTTLE| 698 SD_CONF_BSET_DISKSORT_DISABLED| 699 SD_CONF_BSET_LUN_RESET_ENABLED, 700 &pirus_properties }, 701 { "SUN SE6330", SD_CONF_BSET_THROTTLE | 702 SD_CONF_BSET_NRR_COUNT| 703 SD_CONF_BSET_BSY_RETRY_COUNT| 704 SD_CONF_BSET_RST_RETRIES| 705 SD_CONF_BSET_MIN_THROTTLE| 706 SD_CONF_BSET_DISKSORT_DISABLED| 707 SD_CONF_BSET_LUN_RESET_ENABLED, 708 &pirus_properties }, 709 { "SUN STK6580_6780", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 710 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 711 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 712 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 713 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 714 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties }, 715 #endif /* fibre or NON-sparc platforms */ 716 #if ((defined(__sparc) && !defined(__fibre)) ||\ 717 (defined(__i386) || defined(__amd64))) 718 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties }, 719 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties }, 720 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL }, 721 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL }, 722 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL }, 723 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL }, 724 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, 725 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, 726 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, 727 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, 728 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, 729 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL }, 730 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT, 731 &symbios_properties }, 732 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT, 733 &lsi_properties_scsi }, 734 #if defined(__i386) || defined(__amd64) 735 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD 736 | SD_CONF_BSET_READSUB_BCD 737 | SD_CONF_BSET_READ_TOC_ADDR_BCD 738 | SD_CONF_BSET_NO_READ_HEADER 739 | SD_CONF_BSET_READ_CD_XD4), NULL }, 740 741 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD 742 | SD_CONF_BSET_READSUB_BCD 743 | SD_CONF_BSET_READ_TOC_ADDR_BCD 744 | SD_CONF_BSET_NO_READ_HEADER 745 | SD_CONF_BSET_READ_CD_XD4), NULL }, 746 #endif /* __i386 || __amd64 */ 747 #endif /* sparc NON-fibre or NON-sparc platforms */ 748 749 #if (defined(SD_PROP_TST)) 750 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE 751 | SD_CONF_BSET_CTYPE 752 | SD_CONF_BSET_NRR_COUNT 753 | SD_CONF_BSET_FAB_DEVID 754 | SD_CONF_BSET_NOCACHE 755 | SD_CONF_BSET_BSY_RETRY_COUNT 756 | SD_CONF_BSET_PLAYMSF_BCD 757 | SD_CONF_BSET_READSUB_BCD 758 | SD_CONF_BSET_READ_TOC_TRK_BCD 759 | SD_CONF_BSET_READ_TOC_ADDR_BCD 760 | SD_CONF_BSET_NO_READ_HEADER 761 | SD_CONF_BSET_READ_CD_XD4 762 | SD_CONF_BSET_RST_RETRIES 763 | SD_CONF_BSET_RSV_REL_TIME 764 | SD_CONF_BSET_TUR_CHECK), &tst_properties}, 765 #endif 766 }; 767 768 static const int sd_disk_table_size = 769 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t); 770 771 772 773 #define SD_INTERCONNECT_PARALLEL 0 774 #define SD_INTERCONNECT_FABRIC 1 775 #define SD_INTERCONNECT_FIBRE 2 776 #define SD_INTERCONNECT_SSA 3 777 #define SD_INTERCONNECT_SATA 4 778 #define SD_IS_PARALLEL_SCSI(un) \ 779 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL) 780 #define SD_IS_SERIAL(un) \ 781 ((un)->un_interconnect_type == SD_INTERCONNECT_SATA) 782 783 /* 784 * Definitions used by device id registration routines 785 */ 786 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */ 787 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */ 788 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */ 789 790 static kmutex_t sd_sense_mutex = {0}; 791 792 /* 793 * Macros for updates of the driver state 794 */ 795 #define New_state(un, s) \ 796 (un)->un_last_state = (un)->un_state, (un)->un_state = (s) 797 #define Restore_state(un) \ 798 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); } 799 800 static struct sd_cdbinfo sd_cdbtab[] = { 801 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, }, 802 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, }, 803 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, }, 804 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, }, 805 }; 806 807 /* 808 * Specifies the number of seconds that must have elapsed since the last 809 * cmd. has completed for a device to be declared idle to the PM framework. 810 */ 811 static int sd_pm_idletime = 1; 812 813 /* 814 * Internal function prototypes 815 */ 816 817 #if (defined(__fibre)) 818 /* 819 * These #defines are to avoid namespace collisions that occur because this 820 * code is currently used to compile two separate driver modules: sd and ssd. 821 * All function names need to be treated this way (even if declared static) 822 * in order to allow the debugger to resolve the names properly. 823 * It is anticipated that in the near future the ssd module will be obsoleted, 824 * at which time this ugliness should go away. 825 */ 826 #define sd_log_trace ssd_log_trace 827 #define sd_log_info ssd_log_info 828 #define sd_log_err ssd_log_err 829 #define sdprobe ssdprobe 830 #define sdinfo ssdinfo 831 #define sd_prop_op ssd_prop_op 832 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init 833 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini 834 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache 835 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache 836 #define sd_scsi_target_lun_init ssd_scsi_target_lun_init 837 #define sd_scsi_target_lun_fini ssd_scsi_target_lun_fini 838 #define sd_scsi_get_target_lun_count ssd_scsi_get_target_lun_count 839 #define sd_scsi_update_lun_on_target ssd_scsi_update_lun_on_target 840 #define sd_spin_up_unit ssd_spin_up_unit 841 #define sd_enable_descr_sense ssd_enable_descr_sense 842 #define sd_reenable_dsense_task ssd_reenable_dsense_task 843 #define sd_set_mmc_caps ssd_set_mmc_caps 844 #define sd_read_unit_properties ssd_read_unit_properties 845 #define sd_process_sdconf_file ssd_process_sdconf_file 846 #define sd_process_sdconf_table ssd_process_sdconf_table 847 #define sd_sdconf_id_match ssd_sdconf_id_match 848 #define sd_blank_cmp ssd_blank_cmp 849 #define sd_chk_vers1_data ssd_chk_vers1_data 850 #define sd_set_vers1_properties ssd_set_vers1_properties 851 852 #define sd_get_physical_geometry ssd_get_physical_geometry 853 #define sd_get_virtual_geometry ssd_get_virtual_geometry 854 #define sd_update_block_info ssd_update_block_info 855 #define sd_register_devid ssd_register_devid 856 #define sd_get_devid ssd_get_devid 857 #define sd_create_devid ssd_create_devid 858 #define sd_write_deviceid ssd_write_deviceid 859 #define sd_check_vpd_page_support ssd_check_vpd_page_support 860 #define sd_setup_pm ssd_setup_pm 861 #define sd_create_pm_components ssd_create_pm_components 862 #define sd_ddi_suspend ssd_ddi_suspend 863 #define sd_ddi_pm_suspend ssd_ddi_pm_suspend 864 #define sd_ddi_resume ssd_ddi_resume 865 #define sd_ddi_pm_resume ssd_ddi_pm_resume 866 #define sdpower ssdpower 867 #define sdattach ssdattach 868 #define sddetach ssddetach 869 #define sd_unit_attach ssd_unit_attach 870 #define sd_unit_detach ssd_unit_detach 871 #define sd_set_unit_attributes ssd_set_unit_attributes 872 #define sd_create_errstats ssd_create_errstats 873 #define sd_set_errstats ssd_set_errstats 874 #define sd_set_pstats ssd_set_pstats 875 #define sddump ssddump 876 #define sd_scsi_poll ssd_scsi_poll 877 #define sd_send_polled_RQS ssd_send_polled_RQS 878 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll 879 #define sd_init_event_callbacks ssd_init_event_callbacks 880 #define sd_event_callback ssd_event_callback 881 #define sd_cache_control ssd_cache_control 882 #define sd_get_write_cache_enabled ssd_get_write_cache_enabled 883 #define sd_get_nv_sup ssd_get_nv_sup 884 #define sd_make_device ssd_make_device 885 #define sdopen ssdopen 886 #define sdclose ssdclose 887 #define sd_ready_and_valid ssd_ready_and_valid 888 #define sdmin ssdmin 889 #define sdread ssdread 890 #define sdwrite ssdwrite 891 #define sdaread ssdaread 892 #define sdawrite ssdawrite 893 #define sdstrategy ssdstrategy 894 #define sdioctl ssdioctl 895 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart 896 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart 897 #define sd_checksum_iostart ssd_checksum_iostart 898 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart 899 #define sd_pm_iostart ssd_pm_iostart 900 #define sd_core_iostart ssd_core_iostart 901 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone 902 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone 903 #define sd_checksum_iodone ssd_checksum_iodone 904 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone 905 #define sd_pm_iodone ssd_pm_iodone 906 #define sd_initpkt_for_buf ssd_initpkt_for_buf 907 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf 908 #define sd_setup_rw_pkt ssd_setup_rw_pkt 909 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt 910 #define sd_buf_iodone ssd_buf_iodone 911 #define sd_uscsi_strategy ssd_uscsi_strategy 912 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi 913 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi 914 #define sd_uscsi_iodone ssd_uscsi_iodone 915 #define sd_xbuf_strategy ssd_xbuf_strategy 916 #define sd_xbuf_init ssd_xbuf_init 917 #define sd_pm_entry ssd_pm_entry 918 #define sd_pm_exit ssd_pm_exit 919 920 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler 921 #define sd_pm_timeout_handler ssd_pm_timeout_handler 922 923 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq 924 #define sdintr ssdintr 925 #define sd_start_cmds ssd_start_cmds 926 #define sd_send_scsi_cmd ssd_send_scsi_cmd 927 #define sd_bioclone_alloc ssd_bioclone_alloc 928 #define sd_bioclone_free ssd_bioclone_free 929 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc 930 #define sd_shadow_buf_free ssd_shadow_buf_free 931 #define sd_print_transport_rejected_message \ 932 ssd_print_transport_rejected_message 933 #define sd_retry_command ssd_retry_command 934 #define sd_set_retry_bp ssd_set_retry_bp 935 #define sd_send_request_sense_command ssd_send_request_sense_command 936 #define sd_start_retry_command ssd_start_retry_command 937 #define sd_start_direct_priority_command \ 938 ssd_start_direct_priority_command 939 #define sd_return_failed_command ssd_return_failed_command 940 #define sd_return_failed_command_no_restart \ 941 ssd_return_failed_command_no_restart 942 #define sd_return_command ssd_return_command 943 #define sd_sync_with_callback ssd_sync_with_callback 944 #define sdrunout ssdrunout 945 #define sd_mark_rqs_busy ssd_mark_rqs_busy 946 #define sd_mark_rqs_idle ssd_mark_rqs_idle 947 #define sd_reduce_throttle ssd_reduce_throttle 948 #define sd_restore_throttle ssd_restore_throttle 949 #define sd_print_incomplete_msg ssd_print_incomplete_msg 950 #define sd_init_cdb_limits ssd_init_cdb_limits 951 #define sd_pkt_status_good ssd_pkt_status_good 952 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition 953 #define sd_pkt_status_busy ssd_pkt_status_busy 954 #define sd_pkt_status_reservation_conflict \ 955 ssd_pkt_status_reservation_conflict 956 #define sd_pkt_status_qfull ssd_pkt_status_qfull 957 #define sd_handle_request_sense ssd_handle_request_sense 958 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense 959 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg 960 #define sd_validate_sense_data ssd_validate_sense_data 961 #define sd_decode_sense ssd_decode_sense 962 #define sd_print_sense_msg ssd_print_sense_msg 963 #define sd_sense_key_no_sense ssd_sense_key_no_sense 964 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error 965 #define sd_sense_key_not_ready ssd_sense_key_not_ready 966 #define sd_sense_key_medium_or_hardware_error \ 967 ssd_sense_key_medium_or_hardware_error 968 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request 969 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention 970 #define sd_sense_key_fail_command ssd_sense_key_fail_command 971 #define sd_sense_key_blank_check ssd_sense_key_blank_check 972 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command 973 #define sd_sense_key_default ssd_sense_key_default 974 #define sd_print_retry_msg ssd_print_retry_msg 975 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg 976 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete 977 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err 978 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset 979 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted 980 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout 981 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free 982 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject 983 #define sd_pkt_reason_default ssd_pkt_reason_default 984 #define sd_reset_target ssd_reset_target 985 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback 986 #define sd_start_stop_unit_task ssd_start_stop_unit_task 987 #define sd_taskq_create ssd_taskq_create 988 #define sd_taskq_delete ssd_taskq_delete 989 #define sd_target_change_task ssd_target_change_task 990 #define sd_log_lun_expansion_event ssd_log_lun_expansion_event 991 #define sd_media_change_task ssd_media_change_task 992 #define sd_handle_mchange ssd_handle_mchange 993 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK 994 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY 995 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16 996 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION 997 #define sd_send_scsi_feature_GET_CONFIGURATION \ 998 sd_send_scsi_feature_GET_CONFIGURATION 999 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT 1000 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY 1001 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY 1002 #define sd_send_scsi_PERSISTENT_RESERVE_IN \ 1003 ssd_send_scsi_PERSISTENT_RESERVE_IN 1004 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \ 1005 ssd_send_scsi_PERSISTENT_RESERVE_OUT 1006 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE 1007 #define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \ 1008 ssd_send_scsi_SYNCHRONIZE_CACHE_biodone 1009 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE 1010 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT 1011 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR 1012 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE 1013 #define sd_alloc_rqs ssd_alloc_rqs 1014 #define sd_free_rqs ssd_free_rqs 1015 #define sd_dump_memory ssd_dump_memory 1016 #define sd_get_media_info ssd_get_media_info 1017 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info 1018 #define sd_nvpair_str_decode ssd_nvpair_str_decode 1019 #define sd_strtok_r ssd_strtok_r 1020 #define sd_set_properties ssd_set_properties 1021 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf 1022 #define sd_setup_next_xfer ssd_setup_next_xfer 1023 #define sd_dkio_get_temp ssd_dkio_get_temp 1024 #define sd_check_mhd ssd_check_mhd 1025 #define sd_mhd_watch_cb ssd_mhd_watch_cb 1026 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete 1027 #define sd_sname ssd_sname 1028 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover 1029 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread 1030 #define sd_take_ownership ssd_take_ownership 1031 #define sd_reserve_release ssd_reserve_release 1032 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req 1033 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb 1034 #define sd_persistent_reservation_in_read_keys \ 1035 ssd_persistent_reservation_in_read_keys 1036 #define sd_persistent_reservation_in_read_resv \ 1037 ssd_persistent_reservation_in_read_resv 1038 #define sd_mhdioc_takeown ssd_mhdioc_takeown 1039 #define sd_mhdioc_failfast ssd_mhdioc_failfast 1040 #define sd_mhdioc_release ssd_mhdioc_release 1041 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid 1042 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys 1043 #define sd_mhdioc_inresv ssd_mhdioc_inresv 1044 #define sr_change_blkmode ssr_change_blkmode 1045 #define sr_change_speed ssr_change_speed 1046 #define sr_atapi_change_speed ssr_atapi_change_speed 1047 #define sr_pause_resume ssr_pause_resume 1048 #define sr_play_msf ssr_play_msf 1049 #define sr_play_trkind ssr_play_trkind 1050 #define sr_read_all_subcodes ssr_read_all_subcodes 1051 #define sr_read_subchannel ssr_read_subchannel 1052 #define sr_read_tocentry ssr_read_tocentry 1053 #define sr_read_tochdr ssr_read_tochdr 1054 #define sr_read_cdda ssr_read_cdda 1055 #define sr_read_cdxa ssr_read_cdxa 1056 #define sr_read_mode1 ssr_read_mode1 1057 #define sr_read_mode2 ssr_read_mode2 1058 #define sr_read_cd_mode2 ssr_read_cd_mode2 1059 #define sr_sector_mode ssr_sector_mode 1060 #define sr_eject ssr_eject 1061 #define sr_ejected ssr_ejected 1062 #define sr_check_wp ssr_check_wp 1063 #define sd_check_media ssd_check_media 1064 #define sd_media_watch_cb ssd_media_watch_cb 1065 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast 1066 #define sr_volume_ctrl ssr_volume_ctrl 1067 #define sr_read_sony_session_offset ssr_read_sony_session_offset 1068 #define sd_log_page_supported ssd_log_page_supported 1069 #define sd_check_for_writable_cd ssd_check_for_writable_cd 1070 #define sd_wm_cache_constructor ssd_wm_cache_constructor 1071 #define sd_wm_cache_destructor ssd_wm_cache_destructor 1072 #define sd_range_lock ssd_range_lock 1073 #define sd_get_range ssd_get_range 1074 #define sd_free_inlist_wmap ssd_free_inlist_wmap 1075 #define sd_range_unlock ssd_range_unlock 1076 #define sd_read_modify_write_task ssd_read_modify_write_task 1077 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw 1078 1079 #define sd_iostart_chain ssd_iostart_chain 1080 #define sd_iodone_chain ssd_iodone_chain 1081 #define sd_initpkt_map ssd_initpkt_map 1082 #define sd_destroypkt_map ssd_destroypkt_map 1083 #define sd_chain_type_map ssd_chain_type_map 1084 #define sd_chain_index_map ssd_chain_index_map 1085 1086 #define sd_failfast_flushctl ssd_failfast_flushctl 1087 #define sd_failfast_flushq ssd_failfast_flushq 1088 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback 1089 1090 #define sd_is_lsi ssd_is_lsi 1091 #define sd_tg_rdwr ssd_tg_rdwr 1092 #define sd_tg_getinfo ssd_tg_getinfo 1093 1094 #endif /* #if (defined(__fibre)) */ 1095 1096 1097 int _init(void); 1098 int _fini(void); 1099 int _info(struct modinfo *modinfop); 1100 1101 /*PRINTFLIKE3*/ 1102 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1103 /*PRINTFLIKE3*/ 1104 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1105 /*PRINTFLIKE3*/ 1106 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1107 1108 static int sdprobe(dev_info_t *devi); 1109 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 1110 void **result); 1111 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1112 int mod_flags, char *name, caddr_t valuep, int *lengthp); 1113 1114 /* 1115 * Smart probe for parallel scsi 1116 */ 1117 static void sd_scsi_probe_cache_init(void); 1118 static void sd_scsi_probe_cache_fini(void); 1119 static void sd_scsi_clear_probe_cache(void); 1120 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)()); 1121 1122 /* 1123 * Attached luns on target for parallel scsi 1124 */ 1125 static void sd_scsi_target_lun_init(void); 1126 static void sd_scsi_target_lun_fini(void); 1127 static int sd_scsi_get_target_lun_count(dev_info_t *dip, int target); 1128 static void sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag); 1129 1130 static int sd_spin_up_unit(sd_ssc_t *ssc); 1131 1132 /* 1133 * Using sd_ssc_init to establish sd_ssc_t struct 1134 * Using sd_ssc_send to send uscsi internal command 1135 * Using sd_ssc_fini to free sd_ssc_t struct 1136 */ 1137 static sd_ssc_t *sd_ssc_init(struct sd_lun *un); 1138 static int sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, 1139 int flag, enum uio_seg dataspace, int path_flag); 1140 static void sd_ssc_fini(sd_ssc_t *ssc); 1141 1142 /* 1143 * Using sd_ssc_assessment to set correct type-of-assessment 1144 * Using sd_ssc_post to post ereport & system log 1145 * sd_ssc_post will call sd_ssc_print to print system log 1146 * sd_ssc_post will call sd_ssd_ereport_post to post ereport 1147 */ 1148 static void sd_ssc_assessment(sd_ssc_t *ssc, 1149 enum sd_type_assessment tp_assess); 1150 1151 static void sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess); 1152 static void sd_ssc_print(sd_ssc_t *ssc, int sd_severity); 1153 static void sd_ssc_ereport_post(sd_ssc_t *ssc, 1154 enum sd_driver_assessment drv_assess); 1155 1156 /* 1157 * Using sd_ssc_set_info to mark an un-decodable-data error. 1158 * Using sd_ssc_extract_info to transfer information from internal 1159 * data structures to sd_ssc_t. 1160 */ 1161 static void sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, uint_t comp, 1162 const char *fmt, ...); 1163 static void sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un, 1164 struct scsi_pkt *pktp, struct buf *bp, struct sd_xbuf *xp); 1165 1166 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1167 enum uio_seg dataspace, int path_flag); 1168 1169 #ifdef _LP64 1170 static void sd_enable_descr_sense(sd_ssc_t *ssc); 1171 static void sd_reenable_dsense_task(void *arg); 1172 #endif /* _LP64 */ 1173 1174 static void sd_set_mmc_caps(sd_ssc_t *ssc); 1175 1176 static void sd_read_unit_properties(struct sd_lun *un); 1177 static int sd_process_sdconf_file(struct sd_lun *un); 1178 static void sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str); 1179 static char *sd_strtok_r(char *string, const char *sepset, char **lasts); 1180 static void sd_set_properties(struct sd_lun *un, char *name, char *value); 1181 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags, 1182 int *data_list, sd_tunables *values); 1183 static void sd_process_sdconf_table(struct sd_lun *un); 1184 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen); 1185 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen); 1186 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 1187 int list_len, char *dataname_ptr); 1188 static void sd_set_vers1_properties(struct sd_lun *un, int flags, 1189 sd_tunables *prop_list); 1190 1191 static void sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi, 1192 int reservation_flag); 1193 static int sd_get_devid(sd_ssc_t *ssc); 1194 static ddi_devid_t sd_create_devid(sd_ssc_t *ssc); 1195 static int sd_write_deviceid(sd_ssc_t *ssc); 1196 static int sd_get_devid_page(struct sd_lun *un, uchar_t *wwn, int *len); 1197 static int sd_check_vpd_page_support(sd_ssc_t *ssc); 1198 1199 static void sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi); 1200 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); 1201 1202 static int sd_ddi_suspend(dev_info_t *devi); 1203 static int sd_ddi_pm_suspend(struct sd_lun *un); 1204 static int sd_ddi_resume(dev_info_t *devi); 1205 static int sd_ddi_pm_resume(struct sd_lun *un); 1206 static int sdpower(dev_info_t *devi, int component, int level); 1207 1208 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 1209 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 1210 static int sd_unit_attach(dev_info_t *devi); 1211 static int sd_unit_detach(dev_info_t *devi); 1212 1213 static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi); 1214 static void sd_create_errstats(struct sd_lun *un, int instance); 1215 static void sd_set_errstats(struct sd_lun *un); 1216 static void sd_set_pstats(struct sd_lun *un); 1217 1218 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 1219 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt); 1220 static int sd_send_polled_RQS(struct sd_lun *un); 1221 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt); 1222 1223 #if (defined(__fibre)) 1224 /* 1225 * Event callbacks (photon) 1226 */ 1227 static void sd_init_event_callbacks(struct sd_lun *un); 1228 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *); 1229 #endif 1230 1231 /* 1232 * Defines for sd_cache_control 1233 */ 1234 1235 #define SD_CACHE_ENABLE 1 1236 #define SD_CACHE_DISABLE 0 1237 #define SD_CACHE_NOCHANGE -1 1238 1239 static int sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag); 1240 static int sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled); 1241 static void sd_get_nv_sup(sd_ssc_t *ssc); 1242 static dev_t sd_make_device(dev_info_t *devi); 1243 1244 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize, 1245 uint64_t capacity); 1246 1247 /* 1248 * Driver entry point functions. 1249 */ 1250 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 1251 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 1252 static int sd_ready_and_valid(sd_ssc_t *ssc, int part); 1253 1254 static void sdmin(struct buf *bp); 1255 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p); 1256 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 1257 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1258 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1259 1260 static int sdstrategy(struct buf *bp); 1261 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 1262 1263 /* 1264 * Function prototypes for layering functions in the iostart chain. 1265 */ 1266 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un, 1267 struct buf *bp); 1268 static void sd_mapblocksize_iostart(int index, struct sd_lun *un, 1269 struct buf *bp); 1270 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp); 1271 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un, 1272 struct buf *bp); 1273 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp); 1274 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp); 1275 1276 /* 1277 * Function prototypes for layering functions in the iodone chain. 1278 */ 1279 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp); 1280 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp); 1281 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un, 1282 struct buf *bp); 1283 static void sd_mapblocksize_iodone(int index, struct sd_lun *un, 1284 struct buf *bp); 1285 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp); 1286 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un, 1287 struct buf *bp); 1288 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp); 1289 1290 /* 1291 * Prototypes for functions to support buf(9S) based IO. 1292 */ 1293 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg); 1294 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **); 1295 static void sd_destroypkt_for_buf(struct buf *); 1296 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp, 1297 struct buf *bp, int flags, 1298 int (*callback)(caddr_t), caddr_t callback_arg, 1299 diskaddr_t lba, uint32_t blockcount); 1300 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp, 1301 struct buf *bp, diskaddr_t lba, uint32_t blockcount); 1302 1303 /* 1304 * Prototypes for functions to support USCSI IO. 1305 */ 1306 static int sd_uscsi_strategy(struct buf *bp); 1307 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **); 1308 static void sd_destroypkt_for_uscsi(struct buf *); 1309 1310 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 1311 uchar_t chain_type, void *pktinfop); 1312 1313 static int sd_pm_entry(struct sd_lun *un); 1314 static void sd_pm_exit(struct sd_lun *un); 1315 1316 static void sd_pm_idletimeout_handler(void *arg); 1317 1318 /* 1319 * sd_core internal functions (used at the sd_core_io layer). 1320 */ 1321 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp); 1322 static void sdintr(struct scsi_pkt *pktp); 1323 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp); 1324 1325 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 1326 enum uio_seg dataspace, int path_flag); 1327 1328 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen, 1329 daddr_t blkno, int (*func)(struct buf *)); 1330 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen, 1331 uint_t bflags, daddr_t blkno, int (*func)(struct buf *)); 1332 static void sd_bioclone_free(struct buf *bp); 1333 static void sd_shadow_buf_free(struct buf *bp); 1334 1335 static void sd_print_transport_rejected_message(struct sd_lun *un, 1336 struct sd_xbuf *xp, int code); 1337 static void sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, 1338 void *arg, int code); 1339 static void sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, 1340 void *arg, int code); 1341 static void sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, 1342 void *arg, int code); 1343 1344 static void sd_retry_command(struct sd_lun *un, struct buf *bp, 1345 int retry_check_flag, 1346 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, 1347 int c), 1348 void *user_arg, int failure_code, clock_t retry_delay, 1349 void (*statp)(kstat_io_t *)); 1350 1351 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, 1352 clock_t retry_delay, void (*statp)(kstat_io_t *)); 1353 1354 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 1355 struct scsi_pkt *pktp); 1356 static void sd_start_retry_command(void *arg); 1357 static void sd_start_direct_priority_command(void *arg); 1358 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, 1359 int errcode); 1360 static void sd_return_failed_command_no_restart(struct sd_lun *un, 1361 struct buf *bp, int errcode); 1362 static void sd_return_command(struct sd_lun *un, struct buf *bp); 1363 static void sd_sync_with_callback(struct sd_lun *un); 1364 static int sdrunout(caddr_t arg); 1365 1366 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp); 1367 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp); 1368 1369 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type); 1370 static void sd_restore_throttle(void *arg); 1371 1372 static void sd_init_cdb_limits(struct sd_lun *un); 1373 1374 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 1375 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1376 1377 /* 1378 * Error handling functions 1379 */ 1380 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 1381 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1382 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, 1383 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1384 static void sd_pkt_status_reservation_conflict(struct sd_lun *un, 1385 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1386 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 1387 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1388 1389 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp, 1390 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1391 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 1392 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1393 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp, 1394 struct sd_xbuf *xp, size_t actual_len); 1395 static void sd_decode_sense(struct sd_lun *un, struct buf *bp, 1396 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1397 1398 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp, 1399 void *arg, int code); 1400 1401 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 1402 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1403 static void sd_sense_key_recoverable_error(struct sd_lun *un, 1404 uint8_t *sense_datap, 1405 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1406 static void sd_sense_key_not_ready(struct sd_lun *un, 1407 uint8_t *sense_datap, 1408 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1409 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 1410 uint8_t *sense_datap, 1411 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1412 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 1413 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1414 static void sd_sense_key_unit_attention(struct sd_lun *un, 1415 uint8_t *sense_datap, 1416 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1417 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 1418 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1419 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 1420 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1421 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 1422 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1423 static void sd_sense_key_default(struct sd_lun *un, 1424 uint8_t *sense_datap, 1425 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1426 1427 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp, 1428 void *arg, int flag); 1429 1430 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 1431 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1432 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 1433 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1434 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 1435 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1436 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 1437 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1438 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 1439 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1440 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 1441 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1442 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 1443 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1444 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 1445 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1446 1447 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp); 1448 1449 static void sd_start_stop_unit_callback(void *arg); 1450 static void sd_start_stop_unit_task(void *arg); 1451 1452 static void sd_taskq_create(void); 1453 static void sd_taskq_delete(void); 1454 static void sd_target_change_task(void *arg); 1455 static void sd_log_lun_expansion_event(struct sd_lun *un, int km_flag); 1456 static void sd_media_change_task(void *arg); 1457 1458 static int sd_handle_mchange(struct sd_lun *un); 1459 static int sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag); 1460 static int sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp, 1461 uint32_t *lbap, int path_flag); 1462 static int sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp, 1463 uint32_t *lbap, int path_flag); 1464 static int sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int flag, 1465 int path_flag); 1466 static int sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr, 1467 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp); 1468 static int sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag); 1469 static int sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc, 1470 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp); 1471 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, 1472 uchar_t usr_cmd, uchar_t *usr_bufp); 1473 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, 1474 struct dk_callback *dkc); 1475 static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp); 1476 static int sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, 1477 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1478 uchar_t *bufaddr, uint_t buflen, int path_flag); 1479 static int sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc, 1480 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1481 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag); 1482 static int sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize, 1483 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag); 1484 static int sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize, 1485 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag); 1486 static int sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr, 1487 size_t buflen, daddr_t start_block, int path_flag); 1488 #define sd_send_scsi_READ(ssc, bufaddr, buflen, start_block, path_flag) \ 1489 sd_send_scsi_RDWR(ssc, SCMD_READ, bufaddr, buflen, start_block, \ 1490 path_flag) 1491 #define sd_send_scsi_WRITE(ssc, bufaddr, buflen, start_block, path_flag)\ 1492 sd_send_scsi_RDWR(ssc, SCMD_WRITE, bufaddr, buflen, start_block,\ 1493 path_flag) 1494 1495 static int sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr, 1496 uint16_t buflen, uchar_t page_code, uchar_t page_control, 1497 uint16_t param_ptr, int path_flag); 1498 1499 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un); 1500 static void sd_free_rqs(struct sd_lun *un); 1501 1502 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, 1503 uchar_t *data, int len, int fmt); 1504 static void sd_panic_for_res_conflict(struct sd_lun *un); 1505 1506 /* 1507 * Disk Ioctl Function Prototypes 1508 */ 1509 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag); 1510 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag); 1511 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag); 1512 1513 /* 1514 * Multi-host Ioctl Prototypes 1515 */ 1516 static int sd_check_mhd(dev_t dev, int interval); 1517 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1518 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt); 1519 static char *sd_sname(uchar_t status); 1520 static void sd_mhd_resvd_recover(void *arg); 1521 static void sd_resv_reclaim_thread(); 1522 static int sd_take_ownership(dev_t dev, struct mhioctkown *p); 1523 static int sd_reserve_release(dev_t dev, int cmd); 1524 static void sd_rmv_resv_reclaim_req(dev_t dev); 1525 static void sd_mhd_reset_notify_cb(caddr_t arg); 1526 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un, 1527 mhioc_inkeys_t *usrp, int flag); 1528 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un, 1529 mhioc_inresvs_t *usrp, int flag); 1530 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag); 1531 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag); 1532 static int sd_mhdioc_release(dev_t dev); 1533 static int sd_mhdioc_register_devid(dev_t dev); 1534 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag); 1535 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag); 1536 1537 /* 1538 * SCSI removable prototypes 1539 */ 1540 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag); 1541 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1542 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1543 static int sr_pause_resume(dev_t dev, int mode); 1544 static int sr_play_msf(dev_t dev, caddr_t data, int flag); 1545 static int sr_play_trkind(dev_t dev, caddr_t data, int flag); 1546 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag); 1547 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag); 1548 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag); 1549 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag); 1550 static int sr_read_cdda(dev_t dev, caddr_t data, int flag); 1551 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag); 1552 static int sr_read_mode1(dev_t dev, caddr_t data, int flag); 1553 static int sr_read_mode2(dev_t dev, caddr_t data, int flag); 1554 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag); 1555 static int sr_sector_mode(dev_t dev, uint32_t blksize); 1556 static int sr_eject(dev_t dev); 1557 static void sr_ejected(register struct sd_lun *un); 1558 static int sr_check_wp(dev_t dev); 1559 static int sd_check_media(dev_t dev, enum dkio_state state); 1560 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1561 static void sd_delayed_cv_broadcast(void *arg); 1562 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag); 1563 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag); 1564 1565 static int sd_log_page_supported(sd_ssc_t *ssc, int log_page); 1566 1567 /* 1568 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions. 1569 */ 1570 static void sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag); 1571 static int sd_wm_cache_constructor(void *wm, void *un, int flags); 1572 static void sd_wm_cache_destructor(void *wm, void *un); 1573 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb, 1574 daddr_t endb, ushort_t typ); 1575 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb, 1576 daddr_t endb); 1577 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp); 1578 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm); 1579 static void sd_read_modify_write_task(void * arg); 1580 static int 1581 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 1582 struct buf **bpp); 1583 1584 1585 /* 1586 * Function prototypes for failfast support. 1587 */ 1588 static void sd_failfast_flushq(struct sd_lun *un); 1589 static int sd_failfast_flushq_callback(struct buf *bp); 1590 1591 /* 1592 * Function prototypes to check for lsi devices 1593 */ 1594 static void sd_is_lsi(struct sd_lun *un); 1595 1596 /* 1597 * Function prototypes for partial DMA support 1598 */ 1599 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 1600 struct scsi_pkt *pkt, struct sd_xbuf *xp); 1601 1602 1603 /* Function prototypes for cmlb */ 1604 static int sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 1605 diskaddr_t start_block, size_t reqlength, void *tg_cookie); 1606 1607 static int sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie); 1608 1609 /* 1610 * Constants for failfast support: 1611 * 1612 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO 1613 * failfast processing being performed. 1614 * 1615 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing 1616 * failfast processing on all bufs with B_FAILFAST set. 1617 */ 1618 1619 #define SD_FAILFAST_INACTIVE 0 1620 #define SD_FAILFAST_ACTIVE 1 1621 1622 /* 1623 * Bitmask to control behavior of buf(9S) flushes when a transition to 1624 * the failfast state occurs. Optional bits include: 1625 * 1626 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that 1627 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will 1628 * be flushed. 1629 * 1630 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the 1631 * driver, in addition to the regular wait queue. This includes the xbuf 1632 * queues. When clear, only the driver's wait queue will be flushed. 1633 */ 1634 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 1635 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 1636 1637 /* 1638 * The default behavior is to only flush bufs that have B_FAILFAST set, but 1639 * to flush all queues within the driver. 1640 */ 1641 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES; 1642 1643 1644 /* 1645 * SD Testing Fault Injection 1646 */ 1647 #ifdef SD_FAULT_INJECTION 1648 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); 1649 static void sd_faultinjection(struct scsi_pkt *pktp); 1650 static void sd_injection_log(char *buf, struct sd_lun *un); 1651 #endif 1652 1653 /* 1654 * Device driver ops vector 1655 */ 1656 static struct cb_ops sd_cb_ops = { 1657 sdopen, /* open */ 1658 sdclose, /* close */ 1659 sdstrategy, /* strategy */ 1660 nodev, /* print */ 1661 sddump, /* dump */ 1662 sdread, /* read */ 1663 sdwrite, /* write */ 1664 sdioctl, /* ioctl */ 1665 nodev, /* devmap */ 1666 nodev, /* mmap */ 1667 nodev, /* segmap */ 1668 nochpoll, /* poll */ 1669 sd_prop_op, /* cb_prop_op */ 1670 0, /* streamtab */ 1671 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */ 1672 CB_REV, /* cb_rev */ 1673 sdaread, /* async I/O read entry point */ 1674 sdawrite /* async I/O write entry point */ 1675 }; 1676 1677 static struct dev_ops sd_ops = { 1678 DEVO_REV, /* devo_rev, */ 1679 0, /* refcnt */ 1680 sdinfo, /* info */ 1681 nulldev, /* identify */ 1682 sdprobe, /* probe */ 1683 sdattach, /* attach */ 1684 sddetach, /* detach */ 1685 nodev, /* reset */ 1686 &sd_cb_ops, /* driver operations */ 1687 NULL, /* bus operations */ 1688 sdpower, /* power */ 1689 ddi_quiesce_not_needed, /* quiesce */ 1690 }; 1691 1692 1693 /* 1694 * This is the loadable module wrapper. 1695 */ 1696 #include <sys/modctl.h> 1697 1698 static struct modldrv modldrv = { 1699 &mod_driverops, /* Type of module. This one is a driver */ 1700 SD_MODULE_NAME, /* Module name. */ 1701 &sd_ops /* driver ops */ 1702 }; 1703 1704 1705 static struct modlinkage modlinkage = { 1706 MODREV_1, 1707 &modldrv, 1708 NULL 1709 }; 1710 1711 static cmlb_tg_ops_t sd_tgops = { 1712 TG_DK_OPS_VERSION_1, 1713 sd_tg_rdwr, 1714 sd_tg_getinfo 1715 }; 1716 1717 static struct scsi_asq_key_strings sd_additional_codes[] = { 1718 0x81, 0, "Logical Unit is Reserved", 1719 0x85, 0, "Audio Address Not Valid", 1720 0xb6, 0, "Media Load Mechanism Failed", 1721 0xB9, 0, "Audio Play Operation Aborted", 1722 0xbf, 0, "Buffer Overflow for Read All Subcodes Command", 1723 0x53, 2, "Medium removal prevented", 1724 0x6f, 0, "Authentication failed during key exchange", 1725 0x6f, 1, "Key not present", 1726 0x6f, 2, "Key not established", 1727 0x6f, 3, "Read without proper authentication", 1728 0x6f, 4, "Mismatched region to this logical unit", 1729 0x6f, 5, "Region reset count error", 1730 0xffff, 0x0, NULL 1731 }; 1732 1733 1734 /* 1735 * Struct for passing printing information for sense data messages 1736 */ 1737 struct sd_sense_info { 1738 int ssi_severity; 1739 int ssi_pfa_flag; 1740 }; 1741 1742 /* 1743 * Table of function pointers for iostart-side routines. Separate "chains" 1744 * of layered function calls are formed by placing the function pointers 1745 * sequentially in the desired order. Functions are called according to an 1746 * incrementing table index ordering. The last function in each chain must 1747 * be sd_core_iostart(). The corresponding iodone-side routines are expected 1748 * in the sd_iodone_chain[] array. 1749 * 1750 * Note: It may seem more natural to organize both the iostart and iodone 1751 * functions together, into an array of structures (or some similar 1752 * organization) with a common index, rather than two separate arrays which 1753 * must be maintained in synchronization. The purpose of this division is 1754 * to achieve improved performance: individual arrays allows for more 1755 * effective cache line utilization on certain platforms. 1756 */ 1757 1758 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp); 1759 1760 1761 static sd_chain_t sd_iostart_chain[] = { 1762 1763 /* Chain for buf IO for disk drive targets (PM enabled) */ 1764 sd_mapblockaddr_iostart, /* Index: 0 */ 1765 sd_pm_iostart, /* Index: 1 */ 1766 sd_core_iostart, /* Index: 2 */ 1767 1768 /* Chain for buf IO for disk drive targets (PM disabled) */ 1769 sd_mapblockaddr_iostart, /* Index: 3 */ 1770 sd_core_iostart, /* Index: 4 */ 1771 1772 /* Chain for buf IO for removable-media targets (PM enabled) */ 1773 sd_mapblockaddr_iostart, /* Index: 5 */ 1774 sd_mapblocksize_iostart, /* Index: 6 */ 1775 sd_pm_iostart, /* Index: 7 */ 1776 sd_core_iostart, /* Index: 8 */ 1777 1778 /* Chain for buf IO for removable-media targets (PM disabled) */ 1779 sd_mapblockaddr_iostart, /* Index: 9 */ 1780 sd_mapblocksize_iostart, /* Index: 10 */ 1781 sd_core_iostart, /* Index: 11 */ 1782 1783 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1784 sd_mapblockaddr_iostart, /* Index: 12 */ 1785 sd_checksum_iostart, /* Index: 13 */ 1786 sd_pm_iostart, /* Index: 14 */ 1787 sd_core_iostart, /* Index: 15 */ 1788 1789 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1790 sd_mapblockaddr_iostart, /* Index: 16 */ 1791 sd_checksum_iostart, /* Index: 17 */ 1792 sd_core_iostart, /* Index: 18 */ 1793 1794 /* Chain for USCSI commands (all targets) */ 1795 sd_pm_iostart, /* Index: 19 */ 1796 sd_core_iostart, /* Index: 20 */ 1797 1798 /* Chain for checksumming USCSI commands (all targets) */ 1799 sd_checksum_uscsi_iostart, /* Index: 21 */ 1800 sd_pm_iostart, /* Index: 22 */ 1801 sd_core_iostart, /* Index: 23 */ 1802 1803 /* Chain for "direct" USCSI commands (all targets) */ 1804 sd_core_iostart, /* Index: 24 */ 1805 1806 /* Chain for "direct priority" USCSI commands (all targets) */ 1807 sd_core_iostart, /* Index: 25 */ 1808 }; 1809 1810 /* 1811 * Macros to locate the first function of each iostart chain in the 1812 * sd_iostart_chain[] array. These are located by the index in the array. 1813 */ 1814 #define SD_CHAIN_DISK_IOSTART 0 1815 #define SD_CHAIN_DISK_IOSTART_NO_PM 3 1816 #define SD_CHAIN_RMMEDIA_IOSTART 5 1817 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9 1818 #define SD_CHAIN_CHKSUM_IOSTART 12 1819 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16 1820 #define SD_CHAIN_USCSI_CMD_IOSTART 19 1821 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21 1822 #define SD_CHAIN_DIRECT_CMD_IOSTART 24 1823 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25 1824 1825 1826 /* 1827 * Table of function pointers for the iodone-side routines for the driver- 1828 * internal layering mechanism. The calling sequence for iodone routines 1829 * uses a decrementing table index, so the last routine called in a chain 1830 * must be at the lowest array index location for that chain. The last 1831 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs) 1832 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering 1833 * of the functions in an iodone side chain must correspond to the ordering 1834 * of the iostart routines for that chain. Note that there is no iodone 1835 * side routine that corresponds to sd_core_iostart(), so there is no 1836 * entry in the table for this. 1837 */ 1838 1839 static sd_chain_t sd_iodone_chain[] = { 1840 1841 /* Chain for buf IO for disk drive targets (PM enabled) */ 1842 sd_buf_iodone, /* Index: 0 */ 1843 sd_mapblockaddr_iodone, /* Index: 1 */ 1844 sd_pm_iodone, /* Index: 2 */ 1845 1846 /* Chain for buf IO for disk drive targets (PM disabled) */ 1847 sd_buf_iodone, /* Index: 3 */ 1848 sd_mapblockaddr_iodone, /* Index: 4 */ 1849 1850 /* Chain for buf IO for removable-media targets (PM enabled) */ 1851 sd_buf_iodone, /* Index: 5 */ 1852 sd_mapblockaddr_iodone, /* Index: 6 */ 1853 sd_mapblocksize_iodone, /* Index: 7 */ 1854 sd_pm_iodone, /* Index: 8 */ 1855 1856 /* Chain for buf IO for removable-media targets (PM disabled) */ 1857 sd_buf_iodone, /* Index: 9 */ 1858 sd_mapblockaddr_iodone, /* Index: 10 */ 1859 sd_mapblocksize_iodone, /* Index: 11 */ 1860 1861 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1862 sd_buf_iodone, /* Index: 12 */ 1863 sd_mapblockaddr_iodone, /* Index: 13 */ 1864 sd_checksum_iodone, /* Index: 14 */ 1865 sd_pm_iodone, /* Index: 15 */ 1866 1867 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1868 sd_buf_iodone, /* Index: 16 */ 1869 sd_mapblockaddr_iodone, /* Index: 17 */ 1870 sd_checksum_iodone, /* Index: 18 */ 1871 1872 /* Chain for USCSI commands (non-checksum targets) */ 1873 sd_uscsi_iodone, /* Index: 19 */ 1874 sd_pm_iodone, /* Index: 20 */ 1875 1876 /* Chain for USCSI commands (checksum targets) */ 1877 sd_uscsi_iodone, /* Index: 21 */ 1878 sd_checksum_uscsi_iodone, /* Index: 22 */ 1879 sd_pm_iodone, /* Index: 22 */ 1880 1881 /* Chain for "direct" USCSI commands (all targets) */ 1882 sd_uscsi_iodone, /* Index: 24 */ 1883 1884 /* Chain for "direct priority" USCSI commands (all targets) */ 1885 sd_uscsi_iodone, /* Index: 25 */ 1886 }; 1887 1888 1889 /* 1890 * Macros to locate the "first" function in the sd_iodone_chain[] array for 1891 * each iodone-side chain. These are located by the array index, but as the 1892 * iodone side functions are called in a decrementing-index order, the 1893 * highest index number in each chain must be specified (as these correspond 1894 * to the first function in the iodone chain that will be called by the core 1895 * at IO completion time). 1896 */ 1897 1898 #define SD_CHAIN_DISK_IODONE 2 1899 #define SD_CHAIN_DISK_IODONE_NO_PM 4 1900 #define SD_CHAIN_RMMEDIA_IODONE 8 1901 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11 1902 #define SD_CHAIN_CHKSUM_IODONE 15 1903 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18 1904 #define SD_CHAIN_USCSI_CMD_IODONE 20 1905 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22 1906 #define SD_CHAIN_DIRECT_CMD_IODONE 24 1907 #define SD_CHAIN_PRIORITY_CMD_IODONE 25 1908 1909 1910 1911 1912 /* 1913 * Array to map a layering chain index to the appropriate initpkt routine. 1914 * The redundant entries are present so that the index used for accessing 1915 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1916 * with this table as well. 1917 */ 1918 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **); 1919 1920 static sd_initpkt_t sd_initpkt_map[] = { 1921 1922 /* Chain for buf IO for disk drive targets (PM enabled) */ 1923 sd_initpkt_for_buf, /* Index: 0 */ 1924 sd_initpkt_for_buf, /* Index: 1 */ 1925 sd_initpkt_for_buf, /* Index: 2 */ 1926 1927 /* Chain for buf IO for disk drive targets (PM disabled) */ 1928 sd_initpkt_for_buf, /* Index: 3 */ 1929 sd_initpkt_for_buf, /* Index: 4 */ 1930 1931 /* Chain for buf IO for removable-media targets (PM enabled) */ 1932 sd_initpkt_for_buf, /* Index: 5 */ 1933 sd_initpkt_for_buf, /* Index: 6 */ 1934 sd_initpkt_for_buf, /* Index: 7 */ 1935 sd_initpkt_for_buf, /* Index: 8 */ 1936 1937 /* Chain for buf IO for removable-media targets (PM disabled) */ 1938 sd_initpkt_for_buf, /* Index: 9 */ 1939 sd_initpkt_for_buf, /* Index: 10 */ 1940 sd_initpkt_for_buf, /* Index: 11 */ 1941 1942 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1943 sd_initpkt_for_buf, /* Index: 12 */ 1944 sd_initpkt_for_buf, /* Index: 13 */ 1945 sd_initpkt_for_buf, /* Index: 14 */ 1946 sd_initpkt_for_buf, /* Index: 15 */ 1947 1948 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1949 sd_initpkt_for_buf, /* Index: 16 */ 1950 sd_initpkt_for_buf, /* Index: 17 */ 1951 sd_initpkt_for_buf, /* Index: 18 */ 1952 1953 /* Chain for USCSI commands (non-checksum targets) */ 1954 sd_initpkt_for_uscsi, /* Index: 19 */ 1955 sd_initpkt_for_uscsi, /* Index: 20 */ 1956 1957 /* Chain for USCSI commands (checksum targets) */ 1958 sd_initpkt_for_uscsi, /* Index: 21 */ 1959 sd_initpkt_for_uscsi, /* Index: 22 */ 1960 sd_initpkt_for_uscsi, /* Index: 22 */ 1961 1962 /* Chain for "direct" USCSI commands (all targets) */ 1963 sd_initpkt_for_uscsi, /* Index: 24 */ 1964 1965 /* Chain for "direct priority" USCSI commands (all targets) */ 1966 sd_initpkt_for_uscsi, /* Index: 25 */ 1967 1968 }; 1969 1970 1971 /* 1972 * Array to map a layering chain index to the appropriate destroypktpkt routine. 1973 * The redundant entries are present so that the index used for accessing 1974 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1975 * with this table as well. 1976 */ 1977 typedef void (*sd_destroypkt_t)(struct buf *); 1978 1979 static sd_destroypkt_t sd_destroypkt_map[] = { 1980 1981 /* Chain for buf IO for disk drive targets (PM enabled) */ 1982 sd_destroypkt_for_buf, /* Index: 0 */ 1983 sd_destroypkt_for_buf, /* Index: 1 */ 1984 sd_destroypkt_for_buf, /* Index: 2 */ 1985 1986 /* Chain for buf IO for disk drive targets (PM disabled) */ 1987 sd_destroypkt_for_buf, /* Index: 3 */ 1988 sd_destroypkt_for_buf, /* Index: 4 */ 1989 1990 /* Chain for buf IO for removable-media targets (PM enabled) */ 1991 sd_destroypkt_for_buf, /* Index: 5 */ 1992 sd_destroypkt_for_buf, /* Index: 6 */ 1993 sd_destroypkt_for_buf, /* Index: 7 */ 1994 sd_destroypkt_for_buf, /* Index: 8 */ 1995 1996 /* Chain for buf IO for removable-media targets (PM disabled) */ 1997 sd_destroypkt_for_buf, /* Index: 9 */ 1998 sd_destroypkt_for_buf, /* Index: 10 */ 1999 sd_destroypkt_for_buf, /* Index: 11 */ 2000 2001 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2002 sd_destroypkt_for_buf, /* Index: 12 */ 2003 sd_destroypkt_for_buf, /* Index: 13 */ 2004 sd_destroypkt_for_buf, /* Index: 14 */ 2005 sd_destroypkt_for_buf, /* Index: 15 */ 2006 2007 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2008 sd_destroypkt_for_buf, /* Index: 16 */ 2009 sd_destroypkt_for_buf, /* Index: 17 */ 2010 sd_destroypkt_for_buf, /* Index: 18 */ 2011 2012 /* Chain for USCSI commands (non-checksum targets) */ 2013 sd_destroypkt_for_uscsi, /* Index: 19 */ 2014 sd_destroypkt_for_uscsi, /* Index: 20 */ 2015 2016 /* Chain for USCSI commands (checksum targets) */ 2017 sd_destroypkt_for_uscsi, /* Index: 21 */ 2018 sd_destroypkt_for_uscsi, /* Index: 22 */ 2019 sd_destroypkt_for_uscsi, /* Index: 22 */ 2020 2021 /* Chain for "direct" USCSI commands (all targets) */ 2022 sd_destroypkt_for_uscsi, /* Index: 24 */ 2023 2024 /* Chain for "direct priority" USCSI commands (all targets) */ 2025 sd_destroypkt_for_uscsi, /* Index: 25 */ 2026 2027 }; 2028 2029 2030 2031 /* 2032 * Array to map a layering chain index to the appropriate chain "type". 2033 * The chain type indicates a specific property/usage of the chain. 2034 * The redundant entries are present so that the index used for accessing 2035 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 2036 * with this table as well. 2037 */ 2038 2039 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */ 2040 #define SD_CHAIN_BUFIO 1 /* regular buf IO */ 2041 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */ 2042 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */ 2043 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */ 2044 /* (for error recovery) */ 2045 2046 static int sd_chain_type_map[] = { 2047 2048 /* Chain for buf IO for disk drive targets (PM enabled) */ 2049 SD_CHAIN_BUFIO, /* Index: 0 */ 2050 SD_CHAIN_BUFIO, /* Index: 1 */ 2051 SD_CHAIN_BUFIO, /* Index: 2 */ 2052 2053 /* Chain for buf IO for disk drive targets (PM disabled) */ 2054 SD_CHAIN_BUFIO, /* Index: 3 */ 2055 SD_CHAIN_BUFIO, /* Index: 4 */ 2056 2057 /* Chain for buf IO for removable-media targets (PM enabled) */ 2058 SD_CHAIN_BUFIO, /* Index: 5 */ 2059 SD_CHAIN_BUFIO, /* Index: 6 */ 2060 SD_CHAIN_BUFIO, /* Index: 7 */ 2061 SD_CHAIN_BUFIO, /* Index: 8 */ 2062 2063 /* Chain for buf IO for removable-media targets (PM disabled) */ 2064 SD_CHAIN_BUFIO, /* Index: 9 */ 2065 SD_CHAIN_BUFIO, /* Index: 10 */ 2066 SD_CHAIN_BUFIO, /* Index: 11 */ 2067 2068 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 2069 SD_CHAIN_BUFIO, /* Index: 12 */ 2070 SD_CHAIN_BUFIO, /* Index: 13 */ 2071 SD_CHAIN_BUFIO, /* Index: 14 */ 2072 SD_CHAIN_BUFIO, /* Index: 15 */ 2073 2074 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 2075 SD_CHAIN_BUFIO, /* Index: 16 */ 2076 SD_CHAIN_BUFIO, /* Index: 17 */ 2077 SD_CHAIN_BUFIO, /* Index: 18 */ 2078 2079 /* Chain for USCSI commands (non-checksum targets) */ 2080 SD_CHAIN_USCSI, /* Index: 19 */ 2081 SD_CHAIN_USCSI, /* Index: 20 */ 2082 2083 /* Chain for USCSI commands (checksum targets) */ 2084 SD_CHAIN_USCSI, /* Index: 21 */ 2085 SD_CHAIN_USCSI, /* Index: 22 */ 2086 SD_CHAIN_USCSI, /* Index: 22 */ 2087 2088 /* Chain for "direct" USCSI commands (all targets) */ 2089 SD_CHAIN_DIRECT, /* Index: 24 */ 2090 2091 /* Chain for "direct priority" USCSI commands (all targets) */ 2092 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */ 2093 }; 2094 2095 2096 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */ 2097 #define SD_IS_BUFIO(xp) \ 2098 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO) 2099 2100 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */ 2101 #define SD_IS_DIRECT_PRIORITY(xp) \ 2102 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY) 2103 2104 2105 2106 /* 2107 * Struct, array, and macros to map a specific chain to the appropriate 2108 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays. 2109 * 2110 * The sd_chain_index_map[] array is used at attach time to set the various 2111 * un_xxx_chain type members of the sd_lun softstate to the specific layering 2112 * chain to be used with the instance. This allows different instances to use 2113 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart 2114 * and xb_chain_iodone index values in the sd_xbuf are initialized to these 2115 * values at sd_xbuf init time, this allows (1) layering chains may be changed 2116 * dynamically & without the use of locking; and (2) a layer may update the 2117 * xb_chain_io[start|done] member in a given xbuf with its current index value, 2118 * to allow for deferred processing of an IO within the same chain from a 2119 * different execution context. 2120 */ 2121 2122 struct sd_chain_index { 2123 int sci_iostart_index; 2124 int sci_iodone_index; 2125 }; 2126 2127 static struct sd_chain_index sd_chain_index_map[] = { 2128 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE }, 2129 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM }, 2130 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE }, 2131 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM }, 2132 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE }, 2133 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM }, 2134 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE }, 2135 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE }, 2136 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE }, 2137 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE }, 2138 }; 2139 2140 2141 /* 2142 * The following are indexes into the sd_chain_index_map[] array. 2143 */ 2144 2145 /* un->un_buf_chain_type must be set to one of these */ 2146 #define SD_CHAIN_INFO_DISK 0 2147 #define SD_CHAIN_INFO_DISK_NO_PM 1 2148 #define SD_CHAIN_INFO_RMMEDIA 2 2149 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3 2150 #define SD_CHAIN_INFO_CHKSUM 4 2151 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5 2152 2153 /* un->un_uscsi_chain_type must be set to one of these */ 2154 #define SD_CHAIN_INFO_USCSI_CMD 6 2155 /* USCSI with PM disabled is the same as DIRECT */ 2156 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8 2157 #define SD_CHAIN_INFO_USCSI_CHKSUM 7 2158 2159 /* un->un_direct_chain_type must be set to one of these */ 2160 #define SD_CHAIN_INFO_DIRECT_CMD 8 2161 2162 /* un->un_priority_chain_type must be set to one of these */ 2163 #define SD_CHAIN_INFO_PRIORITY_CMD 9 2164 2165 /* size for devid inquiries */ 2166 #define MAX_INQUIRY_SIZE 0xF0 2167 2168 /* 2169 * Macros used by functions to pass a given buf(9S) struct along to the 2170 * next function in the layering chain for further processing. 2171 * 2172 * In the following macros, passing more than three arguments to the called 2173 * routines causes the optimizer for the SPARC compiler to stop doing tail 2174 * call elimination which results in significant performance degradation. 2175 */ 2176 #define SD_BEGIN_IOSTART(index, un, bp) \ 2177 ((*(sd_iostart_chain[index]))(index, un, bp)) 2178 2179 #define SD_BEGIN_IODONE(index, un, bp) \ 2180 ((*(sd_iodone_chain[index]))(index, un, bp)) 2181 2182 #define SD_NEXT_IOSTART(index, un, bp) \ 2183 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp)) 2184 2185 #define SD_NEXT_IODONE(index, un, bp) \ 2186 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp)) 2187 2188 /* 2189 * Function: _init 2190 * 2191 * Description: This is the driver _init(9E) entry point. 2192 * 2193 * Return Code: Returns the value from mod_install(9F) or 2194 * ddi_soft_state_init(9F) as appropriate. 2195 * 2196 * Context: Called when driver module loaded. 2197 */ 2198 2199 int 2200 _init(void) 2201 { 2202 int err; 2203 2204 /* establish driver name from module name */ 2205 sd_label = (char *)mod_modname(&modlinkage); 2206 2207 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun), 2208 SD_MAXUNIT); 2209 2210 if (err != 0) { 2211 return (err); 2212 } 2213 2214 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL); 2215 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL); 2216 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL); 2217 2218 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL); 2219 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL); 2220 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL); 2221 2222 /* 2223 * it's ok to init here even for fibre device 2224 */ 2225 sd_scsi_probe_cache_init(); 2226 2227 sd_scsi_target_lun_init(); 2228 2229 /* 2230 * Creating taskq before mod_install ensures that all callers (threads) 2231 * that enter the module after a successful mod_install encounter 2232 * a valid taskq. 2233 */ 2234 sd_taskq_create(); 2235 2236 err = mod_install(&modlinkage); 2237 if (err != 0) { 2238 /* delete taskq if install fails */ 2239 sd_taskq_delete(); 2240 2241 mutex_destroy(&sd_detach_mutex); 2242 mutex_destroy(&sd_log_mutex); 2243 mutex_destroy(&sd_label_mutex); 2244 2245 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2246 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2247 cv_destroy(&sd_tr.srq_inprocess_cv); 2248 2249 sd_scsi_probe_cache_fini(); 2250 2251 sd_scsi_target_lun_fini(); 2252 2253 ddi_soft_state_fini(&sd_state); 2254 return (err); 2255 } 2256 2257 return (err); 2258 } 2259 2260 2261 /* 2262 * Function: _fini 2263 * 2264 * Description: This is the driver _fini(9E) entry point. 2265 * 2266 * Return Code: Returns the value from mod_remove(9F) 2267 * 2268 * Context: Called when driver module is unloaded. 2269 */ 2270 2271 int 2272 _fini(void) 2273 { 2274 int err; 2275 2276 if ((err = mod_remove(&modlinkage)) != 0) { 2277 return (err); 2278 } 2279 2280 sd_taskq_delete(); 2281 2282 mutex_destroy(&sd_detach_mutex); 2283 mutex_destroy(&sd_log_mutex); 2284 mutex_destroy(&sd_label_mutex); 2285 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2286 2287 sd_scsi_probe_cache_fini(); 2288 2289 sd_scsi_target_lun_fini(); 2290 2291 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2292 cv_destroy(&sd_tr.srq_inprocess_cv); 2293 2294 ddi_soft_state_fini(&sd_state); 2295 2296 return (err); 2297 } 2298 2299 2300 /* 2301 * Function: _info 2302 * 2303 * Description: This is the driver _info(9E) entry point. 2304 * 2305 * Arguments: modinfop - pointer to the driver modinfo structure 2306 * 2307 * Return Code: Returns the value from mod_info(9F). 2308 * 2309 * Context: Kernel thread context 2310 */ 2311 2312 int 2313 _info(struct modinfo *modinfop) 2314 { 2315 return (mod_info(&modlinkage, modinfop)); 2316 } 2317 2318 2319 /* 2320 * The following routines implement the driver message logging facility. 2321 * They provide component- and level- based debug output filtering. 2322 * Output may also be restricted to messages for a single instance by 2323 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set 2324 * to NULL, then messages for all instances are printed. 2325 * 2326 * These routines have been cloned from each other due to the language 2327 * constraints of macros and variable argument list processing. 2328 */ 2329 2330 2331 /* 2332 * Function: sd_log_err 2333 * 2334 * Description: This routine is called by the SD_ERROR macro for debug 2335 * logging of error conditions. 2336 * 2337 * Arguments: comp - driver component being logged 2338 * dev - pointer to driver info structure 2339 * fmt - error string and format to be logged 2340 */ 2341 2342 static void 2343 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...) 2344 { 2345 va_list ap; 2346 dev_info_t *dev; 2347 2348 ASSERT(un != NULL); 2349 dev = SD_DEVINFO(un); 2350 ASSERT(dev != NULL); 2351 2352 /* 2353 * Filter messages based on the global component and level masks. 2354 * Also print if un matches the value of sd_debug_un, or if 2355 * sd_debug_un is set to NULL. 2356 */ 2357 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) && 2358 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2359 mutex_enter(&sd_log_mutex); 2360 va_start(ap, fmt); 2361 (void) vsprintf(sd_log_buf, fmt, ap); 2362 va_end(ap); 2363 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2364 mutex_exit(&sd_log_mutex); 2365 } 2366 #ifdef SD_FAULT_INJECTION 2367 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2368 if (un->sd_injection_mask & comp) { 2369 mutex_enter(&sd_log_mutex); 2370 va_start(ap, fmt); 2371 (void) vsprintf(sd_log_buf, fmt, ap); 2372 va_end(ap); 2373 sd_injection_log(sd_log_buf, un); 2374 mutex_exit(&sd_log_mutex); 2375 } 2376 #endif 2377 } 2378 2379 2380 /* 2381 * Function: sd_log_info 2382 * 2383 * Description: This routine is called by the SD_INFO macro for debug 2384 * logging of general purpose informational conditions. 2385 * 2386 * Arguments: comp - driver component being logged 2387 * dev - pointer to driver info structure 2388 * fmt - info string and format to be logged 2389 */ 2390 2391 static void 2392 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...) 2393 { 2394 va_list ap; 2395 dev_info_t *dev; 2396 2397 ASSERT(un != NULL); 2398 dev = SD_DEVINFO(un); 2399 ASSERT(dev != NULL); 2400 2401 /* 2402 * Filter messages based on the global component and level masks. 2403 * Also print if un matches the value of sd_debug_un, or if 2404 * sd_debug_un is set to NULL. 2405 */ 2406 if ((sd_component_mask & component) && 2407 (sd_level_mask & SD_LOGMASK_INFO) && 2408 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2409 mutex_enter(&sd_log_mutex); 2410 va_start(ap, fmt); 2411 (void) vsprintf(sd_log_buf, fmt, ap); 2412 va_end(ap); 2413 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2414 mutex_exit(&sd_log_mutex); 2415 } 2416 #ifdef SD_FAULT_INJECTION 2417 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2418 if (un->sd_injection_mask & component) { 2419 mutex_enter(&sd_log_mutex); 2420 va_start(ap, fmt); 2421 (void) vsprintf(sd_log_buf, fmt, ap); 2422 va_end(ap); 2423 sd_injection_log(sd_log_buf, un); 2424 mutex_exit(&sd_log_mutex); 2425 } 2426 #endif 2427 } 2428 2429 2430 /* 2431 * Function: sd_log_trace 2432 * 2433 * Description: This routine is called by the SD_TRACE macro for debug 2434 * logging of trace conditions (i.e. function entry/exit). 2435 * 2436 * Arguments: comp - driver component being logged 2437 * dev - pointer to driver info structure 2438 * fmt - trace string and format to be logged 2439 */ 2440 2441 static void 2442 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...) 2443 { 2444 va_list ap; 2445 dev_info_t *dev; 2446 2447 ASSERT(un != NULL); 2448 dev = SD_DEVINFO(un); 2449 ASSERT(dev != NULL); 2450 2451 /* 2452 * Filter messages based on the global component and level masks. 2453 * Also print if un matches the value of sd_debug_un, or if 2454 * sd_debug_un is set to NULL. 2455 */ 2456 if ((sd_component_mask & component) && 2457 (sd_level_mask & SD_LOGMASK_TRACE) && 2458 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2459 mutex_enter(&sd_log_mutex); 2460 va_start(ap, fmt); 2461 (void) vsprintf(sd_log_buf, fmt, ap); 2462 va_end(ap); 2463 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2464 mutex_exit(&sd_log_mutex); 2465 } 2466 #ifdef SD_FAULT_INJECTION 2467 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2468 if (un->sd_injection_mask & component) { 2469 mutex_enter(&sd_log_mutex); 2470 va_start(ap, fmt); 2471 (void) vsprintf(sd_log_buf, fmt, ap); 2472 va_end(ap); 2473 sd_injection_log(sd_log_buf, un); 2474 mutex_exit(&sd_log_mutex); 2475 } 2476 #endif 2477 } 2478 2479 2480 /* 2481 * Function: sdprobe 2482 * 2483 * Description: This is the driver probe(9e) entry point function. 2484 * 2485 * Arguments: devi - opaque device info handle 2486 * 2487 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful. 2488 * DDI_PROBE_FAILURE: If the probe failed. 2489 * DDI_PROBE_PARTIAL: If the instance is not present now, 2490 * but may be present in the future. 2491 */ 2492 2493 static int 2494 sdprobe(dev_info_t *devi) 2495 { 2496 struct scsi_device *devp; 2497 int rval; 2498 int instance; 2499 2500 /* 2501 * if it wasn't for pln, sdprobe could actually be nulldev 2502 * in the "__fibre" case. 2503 */ 2504 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) { 2505 return (DDI_PROBE_DONTCARE); 2506 } 2507 2508 devp = ddi_get_driver_private(devi); 2509 2510 if (devp == NULL) { 2511 /* Ooops... nexus driver is mis-configured... */ 2512 return (DDI_PROBE_FAILURE); 2513 } 2514 2515 instance = ddi_get_instance(devi); 2516 2517 if (ddi_get_soft_state(sd_state, instance) != NULL) { 2518 return (DDI_PROBE_PARTIAL); 2519 } 2520 2521 /* 2522 * Call the SCSA utility probe routine to see if we actually 2523 * have a target at this SCSI nexus. 2524 */ 2525 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) { 2526 case SCSIPROBE_EXISTS: 2527 switch (devp->sd_inq->inq_dtype) { 2528 case DTYPE_DIRECT: 2529 rval = DDI_PROBE_SUCCESS; 2530 break; 2531 case DTYPE_RODIRECT: 2532 /* CDs etc. Can be removable media */ 2533 rval = DDI_PROBE_SUCCESS; 2534 break; 2535 case DTYPE_OPTICAL: 2536 /* 2537 * Rewritable optical driver HP115AA 2538 * Can also be removable media 2539 */ 2540 2541 /* 2542 * Do not attempt to bind to DTYPE_OPTICAL if 2543 * pre solaris 9 sparc sd behavior is required 2544 * 2545 * If first time through and sd_dtype_optical_bind 2546 * has not been set in /etc/system check properties 2547 */ 2548 2549 if (sd_dtype_optical_bind < 0) { 2550 sd_dtype_optical_bind = ddi_prop_get_int 2551 (DDI_DEV_T_ANY, devi, 0, 2552 "optical-device-bind", 1); 2553 } 2554 2555 if (sd_dtype_optical_bind == 0) { 2556 rval = DDI_PROBE_FAILURE; 2557 } else { 2558 rval = DDI_PROBE_SUCCESS; 2559 } 2560 break; 2561 2562 case DTYPE_NOTPRESENT: 2563 default: 2564 rval = DDI_PROBE_FAILURE; 2565 break; 2566 } 2567 break; 2568 default: 2569 rval = DDI_PROBE_PARTIAL; 2570 break; 2571 } 2572 2573 /* 2574 * This routine checks for resource allocation prior to freeing, 2575 * so it will take care of the "smart probing" case where a 2576 * scsi_probe() may or may not have been issued and will *not* 2577 * free previously-freed resources. 2578 */ 2579 scsi_unprobe(devp); 2580 return (rval); 2581 } 2582 2583 2584 /* 2585 * Function: sdinfo 2586 * 2587 * Description: This is the driver getinfo(9e) entry point function. 2588 * Given the device number, return the devinfo pointer from 2589 * the scsi_device structure or the instance number 2590 * associated with the dev_t. 2591 * 2592 * Arguments: dip - pointer to device info structure 2593 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO, 2594 * DDI_INFO_DEVT2INSTANCE) 2595 * arg - driver dev_t 2596 * resultp - user buffer for request response 2597 * 2598 * Return Code: DDI_SUCCESS 2599 * DDI_FAILURE 2600 */ 2601 /* ARGSUSED */ 2602 static int 2603 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2604 { 2605 struct sd_lun *un; 2606 dev_t dev; 2607 int instance; 2608 int error; 2609 2610 switch (infocmd) { 2611 case DDI_INFO_DEVT2DEVINFO: 2612 dev = (dev_t)arg; 2613 instance = SDUNIT(dev); 2614 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 2615 return (DDI_FAILURE); 2616 } 2617 *result = (void *) SD_DEVINFO(un); 2618 error = DDI_SUCCESS; 2619 break; 2620 case DDI_INFO_DEVT2INSTANCE: 2621 dev = (dev_t)arg; 2622 instance = SDUNIT(dev); 2623 *result = (void *)(uintptr_t)instance; 2624 error = DDI_SUCCESS; 2625 break; 2626 default: 2627 error = DDI_FAILURE; 2628 } 2629 return (error); 2630 } 2631 2632 /* 2633 * Function: sd_prop_op 2634 * 2635 * Description: This is the driver prop_op(9e) entry point function. 2636 * Return the number of blocks for the partition in question 2637 * or forward the request to the property facilities. 2638 * 2639 * Arguments: dev - device number 2640 * dip - pointer to device info structure 2641 * prop_op - property operator 2642 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent 2643 * name - pointer to property name 2644 * valuep - pointer or address of the user buffer 2645 * lengthp - property length 2646 * 2647 * Return Code: DDI_PROP_SUCCESS 2648 * DDI_PROP_NOT_FOUND 2649 * DDI_PROP_UNDEFINED 2650 * DDI_PROP_NO_MEMORY 2651 * DDI_PROP_BUF_TOO_SMALL 2652 */ 2653 2654 static int 2655 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 2656 char *name, caddr_t valuep, int *lengthp) 2657 { 2658 struct sd_lun *un; 2659 2660 if ((un = ddi_get_soft_state(sd_state, ddi_get_instance(dip))) == NULL) 2661 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 2662 name, valuep, lengthp)); 2663 2664 return (cmlb_prop_op(un->un_cmlbhandle, 2665 dev, dip, prop_op, mod_flags, name, valuep, lengthp, 2666 SDPART(dev), (void *)SD_PATH_DIRECT)); 2667 } 2668 2669 /* 2670 * The following functions are for smart probing: 2671 * sd_scsi_probe_cache_init() 2672 * sd_scsi_probe_cache_fini() 2673 * sd_scsi_clear_probe_cache() 2674 * sd_scsi_probe_with_cache() 2675 */ 2676 2677 /* 2678 * Function: sd_scsi_probe_cache_init 2679 * 2680 * Description: Initializes the probe response cache mutex and head pointer. 2681 * 2682 * Context: Kernel thread context 2683 */ 2684 2685 static void 2686 sd_scsi_probe_cache_init(void) 2687 { 2688 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL); 2689 sd_scsi_probe_cache_head = NULL; 2690 } 2691 2692 2693 /* 2694 * Function: sd_scsi_probe_cache_fini 2695 * 2696 * Description: Frees all resources associated with the probe response cache. 2697 * 2698 * Context: Kernel thread context 2699 */ 2700 2701 static void 2702 sd_scsi_probe_cache_fini(void) 2703 { 2704 struct sd_scsi_probe_cache *cp; 2705 struct sd_scsi_probe_cache *ncp; 2706 2707 /* Clean up our smart probing linked list */ 2708 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) { 2709 ncp = cp->next; 2710 kmem_free(cp, sizeof (struct sd_scsi_probe_cache)); 2711 } 2712 sd_scsi_probe_cache_head = NULL; 2713 mutex_destroy(&sd_scsi_probe_cache_mutex); 2714 } 2715 2716 2717 /* 2718 * Function: sd_scsi_clear_probe_cache 2719 * 2720 * Description: This routine clears the probe response cache. This is 2721 * done when open() returns ENXIO so that when deferred 2722 * attach is attempted (possibly after a device has been 2723 * turned on) we will retry the probe. Since we don't know 2724 * which target we failed to open, we just clear the 2725 * entire cache. 2726 * 2727 * Context: Kernel thread context 2728 */ 2729 2730 static void 2731 sd_scsi_clear_probe_cache(void) 2732 { 2733 struct sd_scsi_probe_cache *cp; 2734 int i; 2735 2736 mutex_enter(&sd_scsi_probe_cache_mutex); 2737 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2738 /* 2739 * Reset all entries to SCSIPROBE_EXISTS. This will 2740 * force probing to be performed the next time 2741 * sd_scsi_probe_with_cache is called. 2742 */ 2743 for (i = 0; i < NTARGETS_WIDE; i++) { 2744 cp->cache[i] = SCSIPROBE_EXISTS; 2745 } 2746 } 2747 mutex_exit(&sd_scsi_probe_cache_mutex); 2748 } 2749 2750 2751 /* 2752 * Function: sd_scsi_probe_with_cache 2753 * 2754 * Description: This routine implements support for a scsi device probe 2755 * with cache. The driver maintains a cache of the target 2756 * responses to scsi probes. If we get no response from a 2757 * target during a probe inquiry, we remember that, and we 2758 * avoid additional calls to scsi_probe on non-zero LUNs 2759 * on the same target until the cache is cleared. By doing 2760 * so we avoid the 1/4 sec selection timeout for nonzero 2761 * LUNs. lun0 of a target is always probed. 2762 * 2763 * Arguments: devp - Pointer to a scsi_device(9S) structure 2764 * waitfunc - indicates what the allocator routines should 2765 * do when resources are not available. This value 2766 * is passed on to scsi_probe() when that routine 2767 * is called. 2768 * 2769 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache; 2770 * otherwise the value returned by scsi_probe(9F). 2771 * 2772 * Context: Kernel thread context 2773 */ 2774 2775 static int 2776 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)()) 2777 { 2778 struct sd_scsi_probe_cache *cp; 2779 dev_info_t *pdip = ddi_get_parent(devp->sd_dev); 2780 int lun, tgt; 2781 2782 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2783 SCSI_ADDR_PROP_LUN, 0); 2784 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2785 SCSI_ADDR_PROP_TARGET, -1); 2786 2787 /* Make sure caching enabled and target in range */ 2788 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) { 2789 /* do it the old way (no cache) */ 2790 return (scsi_probe(devp, waitfn)); 2791 } 2792 2793 mutex_enter(&sd_scsi_probe_cache_mutex); 2794 2795 /* Find the cache for this scsi bus instance */ 2796 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2797 if (cp->pdip == pdip) { 2798 break; 2799 } 2800 } 2801 2802 /* If we can't find a cache for this pdip, create one */ 2803 if (cp == NULL) { 2804 int i; 2805 2806 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache), 2807 KM_SLEEP); 2808 cp->pdip = pdip; 2809 cp->next = sd_scsi_probe_cache_head; 2810 sd_scsi_probe_cache_head = cp; 2811 for (i = 0; i < NTARGETS_WIDE; i++) { 2812 cp->cache[i] = SCSIPROBE_EXISTS; 2813 } 2814 } 2815 2816 mutex_exit(&sd_scsi_probe_cache_mutex); 2817 2818 /* Recompute the cache for this target if LUN zero */ 2819 if (lun == 0) { 2820 cp->cache[tgt] = SCSIPROBE_EXISTS; 2821 } 2822 2823 /* Don't probe if cache remembers a NORESP from a previous LUN. */ 2824 if (cp->cache[tgt] != SCSIPROBE_EXISTS) { 2825 return (SCSIPROBE_NORESP); 2826 } 2827 2828 /* Do the actual probe; save & return the result */ 2829 return (cp->cache[tgt] = scsi_probe(devp, waitfn)); 2830 } 2831 2832 2833 /* 2834 * Function: sd_scsi_target_lun_init 2835 * 2836 * Description: Initializes the attached lun chain mutex and head pointer. 2837 * 2838 * Context: Kernel thread context 2839 */ 2840 2841 static void 2842 sd_scsi_target_lun_init(void) 2843 { 2844 mutex_init(&sd_scsi_target_lun_mutex, NULL, MUTEX_DRIVER, NULL); 2845 sd_scsi_target_lun_head = NULL; 2846 } 2847 2848 2849 /* 2850 * Function: sd_scsi_target_lun_fini 2851 * 2852 * Description: Frees all resources associated with the attached lun 2853 * chain 2854 * 2855 * Context: Kernel thread context 2856 */ 2857 2858 static void 2859 sd_scsi_target_lun_fini(void) 2860 { 2861 struct sd_scsi_hba_tgt_lun *cp; 2862 struct sd_scsi_hba_tgt_lun *ncp; 2863 2864 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = ncp) { 2865 ncp = cp->next; 2866 kmem_free(cp, sizeof (struct sd_scsi_hba_tgt_lun)); 2867 } 2868 sd_scsi_target_lun_head = NULL; 2869 mutex_destroy(&sd_scsi_target_lun_mutex); 2870 } 2871 2872 2873 /* 2874 * Function: sd_scsi_get_target_lun_count 2875 * 2876 * Description: This routine will check in the attached lun chain to see 2877 * how many luns are attached on the required SCSI controller 2878 * and target. Currently, some capabilities like tagged queue 2879 * are supported per target based by HBA. So all luns in a 2880 * target have the same capabilities. Based on this assumption, 2881 * sd should only set these capabilities once per target. This 2882 * function is called when sd needs to decide how many luns 2883 * already attached on a target. 2884 * 2885 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2886 * controller device. 2887 * target - The target ID on the controller's SCSI bus. 2888 * 2889 * Return Code: The number of luns attached on the required target and 2890 * controller. 2891 * -1 if target ID is not in parallel SCSI scope or the given 2892 * dip is not in the chain. 2893 * 2894 * Context: Kernel thread context 2895 */ 2896 2897 static int 2898 sd_scsi_get_target_lun_count(dev_info_t *dip, int target) 2899 { 2900 struct sd_scsi_hba_tgt_lun *cp; 2901 2902 if ((target < 0) || (target >= NTARGETS_WIDE)) { 2903 return (-1); 2904 } 2905 2906 mutex_enter(&sd_scsi_target_lun_mutex); 2907 2908 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2909 if (cp->pdip == dip) { 2910 break; 2911 } 2912 } 2913 2914 mutex_exit(&sd_scsi_target_lun_mutex); 2915 2916 if (cp == NULL) { 2917 return (-1); 2918 } 2919 2920 return (cp->nlun[target]); 2921 } 2922 2923 2924 /* 2925 * Function: sd_scsi_update_lun_on_target 2926 * 2927 * Description: This routine is used to update the attached lun chain when a 2928 * lun is attached or detached on a target. 2929 * 2930 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI 2931 * controller device. 2932 * target - The target ID on the controller's SCSI bus. 2933 * flag - Indicate the lun is attached or detached. 2934 * 2935 * Context: Kernel thread context 2936 */ 2937 2938 static void 2939 sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag) 2940 { 2941 struct sd_scsi_hba_tgt_lun *cp; 2942 2943 mutex_enter(&sd_scsi_target_lun_mutex); 2944 2945 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) { 2946 if (cp->pdip == dip) { 2947 break; 2948 } 2949 } 2950 2951 if ((cp == NULL) && (flag == SD_SCSI_LUN_ATTACH)) { 2952 cp = kmem_zalloc(sizeof (struct sd_scsi_hba_tgt_lun), 2953 KM_SLEEP); 2954 cp->pdip = dip; 2955 cp->next = sd_scsi_target_lun_head; 2956 sd_scsi_target_lun_head = cp; 2957 } 2958 2959 mutex_exit(&sd_scsi_target_lun_mutex); 2960 2961 if (cp != NULL) { 2962 if (flag == SD_SCSI_LUN_ATTACH) { 2963 cp->nlun[target] ++; 2964 } else { 2965 cp->nlun[target] --; 2966 } 2967 } 2968 } 2969 2970 2971 /* 2972 * Function: sd_spin_up_unit 2973 * 2974 * Description: Issues the following commands to spin-up the device: 2975 * START STOP UNIT, and INQUIRY. 2976 * 2977 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 2978 * structure for this target. 2979 * 2980 * Return Code: 0 - success 2981 * EIO - failure 2982 * EACCES - reservation conflict 2983 * 2984 * Context: Kernel thread context 2985 */ 2986 2987 static int 2988 sd_spin_up_unit(sd_ssc_t *ssc) 2989 { 2990 size_t resid = 0; 2991 int has_conflict = FALSE; 2992 uchar_t *bufaddr; 2993 int status; 2994 struct sd_lun *un; 2995 2996 ASSERT(ssc != NULL); 2997 un = ssc->ssc_un; 2998 ASSERT(un != NULL); 2999 3000 /* 3001 * Send a throwaway START UNIT command. 3002 * 3003 * If we fail on this, we don't care presently what precisely 3004 * is wrong. EMC's arrays will also fail this with a check 3005 * condition (0x2/0x4/0x3) if the device is "inactive," but 3006 * we don't want to fail the attach because it may become 3007 * "active" later. 3008 */ 3009 status = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_START, 3010 SD_PATH_DIRECT); 3011 3012 if (status != 0) { 3013 if (status == EACCES) 3014 has_conflict = TRUE; 3015 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3016 } 3017 3018 /* 3019 * Send another INQUIRY command to the target. This is necessary for 3020 * non-removable media direct access devices because their INQUIRY data 3021 * may not be fully qualified until they are spun up (perhaps via the 3022 * START command above). Note: This seems to be needed for some 3023 * legacy devices only.) The INQUIRY command should succeed even if a 3024 * Reservation Conflict is present. 3025 */ 3026 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP); 3027 3028 if (sd_send_scsi_INQUIRY(ssc, bufaddr, SUN_INQSIZE, 0, 0, &resid) 3029 != 0) { 3030 kmem_free(bufaddr, SUN_INQSIZE); 3031 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 3032 return (EIO); 3033 } 3034 3035 /* 3036 * If we got enough INQUIRY data, copy it over the old INQUIRY data. 3037 * Note that this routine does not return a failure here even if the 3038 * INQUIRY command did not return any data. This is a legacy behavior. 3039 */ 3040 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) { 3041 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE); 3042 } 3043 3044 kmem_free(bufaddr, SUN_INQSIZE); 3045 3046 /* If we hit a reservation conflict above, tell the caller. */ 3047 if (has_conflict == TRUE) { 3048 return (EACCES); 3049 } 3050 3051 return (0); 3052 } 3053 3054 #ifdef _LP64 3055 /* 3056 * Function: sd_enable_descr_sense 3057 * 3058 * Description: This routine attempts to select descriptor sense format 3059 * using the Control mode page. Devices that support 64 bit 3060 * LBAs (for >2TB luns) should also implement descriptor 3061 * sense data so we will call this function whenever we see 3062 * a lun larger than 2TB. If for some reason the device 3063 * supports 64 bit LBAs but doesn't support descriptor sense 3064 * presumably the mode select will fail. Everything will 3065 * continue to work normally except that we will not get 3066 * complete sense data for commands that fail with an LBA 3067 * larger than 32 bits. 3068 * 3069 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3070 * structure for this target. 3071 * 3072 * Context: Kernel thread context only 3073 */ 3074 3075 static void 3076 sd_enable_descr_sense(sd_ssc_t *ssc) 3077 { 3078 uchar_t *header; 3079 struct mode_control_scsi3 *ctrl_bufp; 3080 size_t buflen; 3081 size_t bd_len; 3082 int status; 3083 struct sd_lun *un; 3084 3085 ASSERT(ssc != NULL); 3086 un = ssc->ssc_un; 3087 ASSERT(un != NULL); 3088 3089 /* 3090 * Read MODE SENSE page 0xA, Control Mode Page 3091 */ 3092 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH + 3093 sizeof (struct mode_control_scsi3); 3094 header = kmem_zalloc(buflen, KM_SLEEP); 3095 3096 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 3097 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT); 3098 3099 if (status != 0) { 3100 SD_ERROR(SD_LOG_COMMON, un, 3101 "sd_enable_descr_sense: mode sense ctrl page failed\n"); 3102 goto eds_exit; 3103 } 3104 3105 /* 3106 * Determine size of Block Descriptors in order to locate 3107 * the mode page data. ATAPI devices return 0, SCSI devices 3108 * should return MODE_BLK_DESC_LENGTH. 3109 */ 3110 bd_len = ((struct mode_header *)header)->bdesc_length; 3111 3112 /* Clear the mode data length field for MODE SELECT */ 3113 ((struct mode_header *)header)->length = 0; 3114 3115 ctrl_bufp = (struct mode_control_scsi3 *) 3116 (header + MODE_HEADER_LENGTH + bd_len); 3117 3118 /* 3119 * If the page length is smaller than the expected value, 3120 * the target device doesn't support D_SENSE. Bail out here. 3121 */ 3122 if (ctrl_bufp->mode_page.length < 3123 sizeof (struct mode_control_scsi3) - 2) { 3124 SD_ERROR(SD_LOG_COMMON, un, 3125 "sd_enable_descr_sense: enable D_SENSE failed\n"); 3126 goto eds_exit; 3127 } 3128 3129 /* 3130 * Clear PS bit for MODE SELECT 3131 */ 3132 ctrl_bufp->mode_page.ps = 0; 3133 3134 /* 3135 * Set D_SENSE to enable descriptor sense format. 3136 */ 3137 ctrl_bufp->d_sense = 1; 3138 3139 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3140 3141 /* 3142 * Use MODE SELECT to commit the change to the D_SENSE bit 3143 */ 3144 status = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, header, 3145 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT); 3146 3147 if (status != 0) { 3148 SD_INFO(SD_LOG_COMMON, un, 3149 "sd_enable_descr_sense: mode select ctrl page failed\n"); 3150 } else { 3151 kmem_free(header, buflen); 3152 return; 3153 } 3154 3155 eds_exit: 3156 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3157 kmem_free(header, buflen); 3158 } 3159 3160 /* 3161 * Function: sd_reenable_dsense_task 3162 * 3163 * Description: Re-enable descriptor sense after device or bus reset 3164 * 3165 * Context: Executes in a taskq() thread context 3166 */ 3167 static void 3168 sd_reenable_dsense_task(void *arg) 3169 { 3170 struct sd_lun *un = arg; 3171 sd_ssc_t *ssc; 3172 3173 ASSERT(un != NULL); 3174 3175 ssc = sd_ssc_init(un); 3176 sd_enable_descr_sense(ssc); 3177 sd_ssc_fini(ssc); 3178 } 3179 #endif /* _LP64 */ 3180 3181 /* 3182 * Function: sd_set_mmc_caps 3183 * 3184 * Description: This routine determines if the device is MMC compliant and if 3185 * the device supports CDDA via a mode sense of the CDVD 3186 * capabilities mode page. Also checks if the device is a 3187 * dvdram writable device. 3188 * 3189 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 3190 * structure for this target. 3191 * 3192 * Context: Kernel thread context only 3193 */ 3194 3195 static void 3196 sd_set_mmc_caps(sd_ssc_t *ssc) 3197 { 3198 struct mode_header_grp2 *sense_mhp; 3199 uchar_t *sense_page; 3200 caddr_t buf; 3201 int bd_len; 3202 int status; 3203 struct uscsi_cmd com; 3204 int rtn; 3205 uchar_t *out_data_rw, *out_data_hd; 3206 uchar_t *rqbuf_rw, *rqbuf_hd; 3207 struct sd_lun *un; 3208 3209 ASSERT(ssc != NULL); 3210 un = ssc->ssc_un; 3211 ASSERT(un != NULL); 3212 3213 /* 3214 * The flags which will be set in this function are - mmc compliant, 3215 * dvdram writable device, cdda support. Initialize them to FALSE 3216 * and if a capability is detected - it will be set to TRUE. 3217 */ 3218 un->un_f_mmc_cap = FALSE; 3219 un->un_f_dvdram_writable_device = FALSE; 3220 un->un_f_cfg_cdda = FALSE; 3221 3222 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3223 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf, 3224 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 3225 3226 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3227 3228 if (status != 0) { 3229 /* command failed; just return */ 3230 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3231 return; 3232 } 3233 /* 3234 * If the mode sense request for the CDROM CAPABILITIES 3235 * page (0x2A) succeeds the device is assumed to be MMC. 3236 */ 3237 un->un_f_mmc_cap = TRUE; 3238 3239 /* Get to the page data */ 3240 sense_mhp = (struct mode_header_grp2 *)buf; 3241 bd_len = (sense_mhp->bdesc_length_hi << 8) | 3242 sense_mhp->bdesc_length_lo; 3243 if (bd_len > MODE_BLK_DESC_LENGTH) { 3244 /* 3245 * We did not get back the expected block descriptor 3246 * length so we cannot determine if the device supports 3247 * CDDA. However, we still indicate the device is MMC 3248 * according to the successful response to the page 3249 * 0x2A mode sense request. 3250 */ 3251 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3252 "sd_set_mmc_caps: Mode Sense returned " 3253 "invalid block descriptor length\n"); 3254 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3255 return; 3256 } 3257 3258 /* See if read CDDA is supported */ 3259 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + 3260 bd_len); 3261 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE; 3262 3263 /* See if writing DVD RAM is supported. */ 3264 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE; 3265 if (un->un_f_dvdram_writable_device == TRUE) { 3266 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3267 return; 3268 } 3269 3270 /* 3271 * If the device presents DVD or CD capabilities in the mode 3272 * page, we can return here since a RRD will not have 3273 * these capabilities. 3274 */ 3275 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3276 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3277 return; 3278 } 3279 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3280 3281 /* 3282 * If un->un_f_dvdram_writable_device is still FALSE, 3283 * check for a Removable Rigid Disk (RRD). A RRD 3284 * device is identified by the features RANDOM_WRITABLE and 3285 * HARDWARE_DEFECT_MANAGEMENT. 3286 */ 3287 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3288 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3289 3290 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw, 3291 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3292 RANDOM_WRITABLE, SD_PATH_STANDARD); 3293 3294 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3295 3296 if (rtn != 0) { 3297 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3298 kmem_free(rqbuf_rw, SENSE_LENGTH); 3299 return; 3300 } 3301 3302 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3303 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3304 3305 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd, 3306 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3307 HARDWARE_DEFECT_MANAGEMENT, SD_PATH_STANDARD); 3308 3309 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3310 3311 if (rtn == 0) { 3312 /* 3313 * We have good information, check for random writable 3314 * and hardware defect features. 3315 */ 3316 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3317 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) { 3318 un->un_f_dvdram_writable_device = TRUE; 3319 } 3320 } 3321 3322 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3323 kmem_free(rqbuf_rw, SENSE_LENGTH); 3324 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3325 kmem_free(rqbuf_hd, SENSE_LENGTH); 3326 } 3327 3328 /* 3329 * Function: sd_check_for_writable_cd 3330 * 3331 * Description: This routine determines if the media in the device is 3332 * writable or not. It uses the get configuration command (0x46) 3333 * to determine if the media is writable 3334 * 3335 * Arguments: un - driver soft state (unit) structure 3336 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" 3337 * chain and the normal command waitq, or 3338 * SD_PATH_DIRECT_PRIORITY to use the USCSI 3339 * "direct" chain and bypass the normal command 3340 * waitq. 3341 * 3342 * Context: Never called at interrupt context. 3343 */ 3344 3345 static void 3346 sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag) 3347 { 3348 struct uscsi_cmd com; 3349 uchar_t *out_data; 3350 uchar_t *rqbuf; 3351 int rtn; 3352 uchar_t *out_data_rw, *out_data_hd; 3353 uchar_t *rqbuf_rw, *rqbuf_hd; 3354 struct mode_header_grp2 *sense_mhp; 3355 uchar_t *sense_page; 3356 caddr_t buf; 3357 int bd_len; 3358 int status; 3359 struct sd_lun *un; 3360 3361 ASSERT(ssc != NULL); 3362 un = ssc->ssc_un; 3363 ASSERT(un != NULL); 3364 ASSERT(mutex_owned(SD_MUTEX(un))); 3365 3366 /* 3367 * Initialize the writable media to false, if configuration info. 3368 * tells us otherwise then only we will set it. 3369 */ 3370 un->un_f_mmc_writable_media = FALSE; 3371 mutex_exit(SD_MUTEX(un)); 3372 3373 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 3374 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3375 3376 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, SENSE_LENGTH, 3377 out_data, SD_PROFILE_HEADER_LEN, path_flag); 3378 3379 if (rtn != 0) 3380 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3381 3382 mutex_enter(SD_MUTEX(un)); 3383 if (rtn == 0) { 3384 /* 3385 * We have good information, check for writable DVD. 3386 */ 3387 if ((out_data[6] == 0) && (out_data[7] == 0x12)) { 3388 un->un_f_mmc_writable_media = TRUE; 3389 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3390 kmem_free(rqbuf, SENSE_LENGTH); 3391 return; 3392 } 3393 } 3394 3395 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3396 kmem_free(rqbuf, SENSE_LENGTH); 3397 3398 /* 3399 * Determine if this is a RRD type device. 3400 */ 3401 mutex_exit(SD_MUTEX(un)); 3402 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3403 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf, 3404 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, path_flag); 3405 3406 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3407 3408 mutex_enter(SD_MUTEX(un)); 3409 if (status != 0) { 3410 /* command failed; just return */ 3411 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3412 return; 3413 } 3414 3415 /* Get to the page data */ 3416 sense_mhp = (struct mode_header_grp2 *)buf; 3417 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 3418 if (bd_len > MODE_BLK_DESC_LENGTH) { 3419 /* 3420 * We did not get back the expected block descriptor length so 3421 * we cannot check the mode page. 3422 */ 3423 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3424 "sd_check_for_writable_cd: Mode Sense returned " 3425 "invalid block descriptor length\n"); 3426 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3427 return; 3428 } 3429 3430 /* 3431 * If the device presents DVD or CD capabilities in the mode 3432 * page, we can return here since a RRD device will not have 3433 * these capabilities. 3434 */ 3435 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len); 3436 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3437 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3438 return; 3439 } 3440 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3441 3442 /* 3443 * If un->un_f_mmc_writable_media is still FALSE, 3444 * check for RRD type media. A RRD device is identified 3445 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT. 3446 */ 3447 mutex_exit(SD_MUTEX(un)); 3448 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3449 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3450 3451 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw, 3452 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3453 RANDOM_WRITABLE, path_flag); 3454 3455 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3456 if (rtn != 0) { 3457 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3458 kmem_free(rqbuf_rw, SENSE_LENGTH); 3459 mutex_enter(SD_MUTEX(un)); 3460 return; 3461 } 3462 3463 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3464 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3465 3466 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd, 3467 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3468 HARDWARE_DEFECT_MANAGEMENT, path_flag); 3469 3470 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 3471 mutex_enter(SD_MUTEX(un)); 3472 if (rtn == 0) { 3473 /* 3474 * We have good information, check for random writable 3475 * and hardware defect features as current. 3476 */ 3477 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3478 (out_data_rw[10] & 0x1) && 3479 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) && 3480 (out_data_hd[10] & 0x1)) { 3481 un->un_f_mmc_writable_media = TRUE; 3482 } 3483 } 3484 3485 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3486 kmem_free(rqbuf_rw, SENSE_LENGTH); 3487 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3488 kmem_free(rqbuf_hd, SENSE_LENGTH); 3489 } 3490 3491 /* 3492 * Function: sd_read_unit_properties 3493 * 3494 * Description: The following implements a property lookup mechanism. 3495 * Properties for particular disks (keyed on vendor, model 3496 * and rev numbers) are sought in the sd.conf file via 3497 * sd_process_sdconf_file(), and if not found there, are 3498 * looked for in a list hardcoded in this driver via 3499 * sd_process_sdconf_table() Once located the properties 3500 * are used to update the driver unit structure. 3501 * 3502 * Arguments: un - driver soft state (unit) structure 3503 */ 3504 3505 static void 3506 sd_read_unit_properties(struct sd_lun *un) 3507 { 3508 /* 3509 * sd_process_sdconf_file returns SD_FAILURE if it cannot find 3510 * the "sd-config-list" property (from the sd.conf file) or if 3511 * there was not a match for the inquiry vid/pid. If this event 3512 * occurs the static driver configuration table is searched for 3513 * a match. 3514 */ 3515 ASSERT(un != NULL); 3516 if (sd_process_sdconf_file(un) == SD_FAILURE) { 3517 sd_process_sdconf_table(un); 3518 } 3519 3520 /* check for LSI device */ 3521 sd_is_lsi(un); 3522 3523 3524 } 3525 3526 3527 /* 3528 * Function: sd_process_sdconf_file 3529 * 3530 * Description: Use ddi_prop_lookup(9F) to obtain the properties from the 3531 * driver's config file (ie, sd.conf) and update the driver 3532 * soft state structure accordingly. 3533 * 3534 * Arguments: un - driver soft state (unit) structure 3535 * 3536 * Return Code: SD_SUCCESS - The properties were successfully set according 3537 * to the driver configuration file. 3538 * SD_FAILURE - The driver config list was not obtained or 3539 * there was no vid/pid match. This indicates that 3540 * the static config table should be used. 3541 * 3542 * The config file has a property, "sd-config-list". Currently we support 3543 * two kinds of formats. For both formats, the value of this property 3544 * is a list of duplets: 3545 * 3546 * sd-config-list= 3547 * <duplet>, 3548 * [,<duplet>]*; 3549 * 3550 * For the improved format, where 3551 * 3552 * <duplet>:= "<vid+pid>","<tunable-list>" 3553 * 3554 * and 3555 * 3556 * <tunable-list>:= <tunable> [, <tunable> ]*; 3557 * <tunable> = <name> : <value> 3558 * 3559 * The <vid+pid> is the string that is returned by the target device on a 3560 * SCSI inquiry command, the <tunable-list> contains one or more tunables 3561 * to apply to all target devices with the specified <vid+pid>. 3562 * 3563 * Each <tunable> is a "<name> : <value>" pair. 3564 * 3565 * For the old format, the structure of each duplet is as follows: 3566 * 3567 * <duplet>:= "<vid+pid>","<data-property-name_list>" 3568 * 3569 * The first entry of the duplet is the device ID string (the concatenated 3570 * vid & pid; not to be confused with a device_id). This is defined in 3571 * the same way as in the sd_disk_table. 3572 * 3573 * The second part of the duplet is a string that identifies a 3574 * data-property-name-list. The data-property-name-list is defined as 3575 * follows: 3576 * 3577 * <data-property-name-list>:=<data-property-name> [<data-property-name>] 3578 * 3579 * The syntax of <data-property-name> depends on the <version> field. 3580 * 3581 * If version = SD_CONF_VERSION_1 we have the following syntax: 3582 * 3583 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3584 * 3585 * where the prop0 value will be used to set prop0 if bit0 set in the 3586 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1 3587 * 3588 */ 3589 3590 static int 3591 sd_process_sdconf_file(struct sd_lun *un) 3592 { 3593 char **config_list = NULL; 3594 uint_t nelements; 3595 char *vidptr; 3596 int vidlen; 3597 char *dnlist_ptr; 3598 char *dataname_ptr; 3599 char *dataname_lasts; 3600 int *data_list = NULL; 3601 uint_t data_list_len; 3602 int rval = SD_FAILURE; 3603 int i; 3604 3605 ASSERT(un != NULL); 3606 3607 /* Obtain the configuration list associated with the .conf file */ 3608 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, SD_DEVINFO(un), 3609 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, sd_config_list, 3610 &config_list, &nelements) != DDI_PROP_SUCCESS) { 3611 return (SD_FAILURE); 3612 } 3613 3614 /* 3615 * Compare vids in each duplet to the inquiry vid - if a match is 3616 * made, get the data value and update the soft state structure 3617 * accordingly. 3618 * 3619 * Each duplet should show as a pair of strings, return SD_FAILURE 3620 * otherwise. 3621 */ 3622 if (nelements & 1) { 3623 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3624 "sd-config-list should show as pairs of strings.\n"); 3625 if (config_list) 3626 ddi_prop_free(config_list); 3627 return (SD_FAILURE); 3628 } 3629 3630 for (i = 0; i < nelements; i += 2) { 3631 /* 3632 * Note: The assumption here is that each vid entry is on 3633 * a unique line from its associated duplet. 3634 */ 3635 vidptr = config_list[i]; 3636 vidlen = (int)strlen(vidptr); 3637 if ((vidlen == 0) || 3638 (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS)) { 3639 continue; 3640 } 3641 3642 /* 3643 * dnlist contains 1 or more blank separated 3644 * data-property-name entries 3645 */ 3646 dnlist_ptr = config_list[i + 1]; 3647 3648 if (strchr(dnlist_ptr, ':') != NULL) { 3649 /* 3650 * Decode the improved format sd-config-list. 3651 */ 3652 sd_nvpair_str_decode(un, dnlist_ptr); 3653 } else { 3654 /* 3655 * The old format sd-config-list, loop through all 3656 * data-property-name entries in the 3657 * data-property-name-list 3658 * setting the properties for each. 3659 */ 3660 for (dataname_ptr = sd_strtok_r(dnlist_ptr, " \t", 3661 &dataname_lasts); dataname_ptr != NULL; 3662 dataname_ptr = sd_strtok_r(NULL, " \t", 3663 &dataname_lasts)) { 3664 int version; 3665 3666 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3667 "sd_process_sdconf_file: disk:%s, " 3668 "data:%s\n", vidptr, dataname_ptr); 3669 3670 /* Get the data list */ 3671 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, 3672 SD_DEVINFO(un), 0, dataname_ptr, &data_list, 3673 &data_list_len) != DDI_PROP_SUCCESS) { 3674 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3675 "sd_process_sdconf_file: data " 3676 "property (%s) has no value\n", 3677 dataname_ptr); 3678 continue; 3679 } 3680 3681 version = data_list[0]; 3682 3683 if (version == SD_CONF_VERSION_1) { 3684 sd_tunables values; 3685 3686 /* Set the properties */ 3687 if (sd_chk_vers1_data(un, data_list[1], 3688 &data_list[2], data_list_len, 3689 dataname_ptr) == SD_SUCCESS) { 3690 sd_get_tunables_from_conf(un, 3691 data_list[1], &data_list[2], 3692 &values); 3693 sd_set_vers1_properties(un, 3694 data_list[1], &values); 3695 rval = SD_SUCCESS; 3696 } else { 3697 rval = SD_FAILURE; 3698 } 3699 } else { 3700 scsi_log(SD_DEVINFO(un), sd_label, 3701 CE_WARN, "data property %s version " 3702 "0x%x is invalid.", 3703 dataname_ptr, version); 3704 rval = SD_FAILURE; 3705 } 3706 if (data_list) 3707 ddi_prop_free(data_list); 3708 } 3709 } 3710 } 3711 3712 /* free up the memory allocated by ddi_prop_lookup_string_array(). */ 3713 if (config_list) { 3714 ddi_prop_free(config_list); 3715 } 3716 3717 return (rval); 3718 } 3719 3720 /* 3721 * Function: sd_nvpair_str_decode() 3722 * 3723 * Description: Parse the improved format sd-config-list to get 3724 * each entry of tunable, which includes a name-value pair. 3725 * Then call sd_set_properties() to set the property. 3726 * 3727 * Arguments: un - driver soft state (unit) structure 3728 * nvpair_str - the tunable list 3729 */ 3730 static void 3731 sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str) 3732 { 3733 char *nv, *name, *value, *token; 3734 char *nv_lasts, *v_lasts, *x_lasts; 3735 3736 for (nv = sd_strtok_r(nvpair_str, ",", &nv_lasts); nv != NULL; 3737 nv = sd_strtok_r(NULL, ",", &nv_lasts)) { 3738 token = sd_strtok_r(nv, ":", &v_lasts); 3739 name = sd_strtok_r(token, " \t", &x_lasts); 3740 token = sd_strtok_r(NULL, ":", &v_lasts); 3741 value = sd_strtok_r(token, " \t", &x_lasts); 3742 if (name == NULL || value == NULL) { 3743 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3744 "sd_nvpair_str_decode: " 3745 "name or value is not valid!\n"); 3746 } else { 3747 sd_set_properties(un, name, value); 3748 } 3749 } 3750 } 3751 3752 /* 3753 * Function: sd_strtok_r() 3754 * 3755 * Description: This function uses strpbrk and strspn to break 3756 * string into tokens on sequentially subsequent calls. Return 3757 * NULL when no non-separator characters remain. The first 3758 * argument is NULL for subsequent calls. 3759 */ 3760 static char * 3761 sd_strtok_r(char *string, const char *sepset, char **lasts) 3762 { 3763 char *q, *r; 3764 3765 /* First or subsequent call */ 3766 if (string == NULL) 3767 string = *lasts; 3768 3769 if (string == NULL) 3770 return (NULL); 3771 3772 /* Skip leading separators */ 3773 q = string + strspn(string, sepset); 3774 3775 if (*q == '\0') 3776 return (NULL); 3777 3778 if ((r = strpbrk(q, sepset)) == NULL) 3779 *lasts = NULL; 3780 else { 3781 *r = '\0'; 3782 *lasts = r + 1; 3783 } 3784 return (q); 3785 } 3786 3787 /* 3788 * Function: sd_set_properties() 3789 * 3790 * Description: Set device properties based on the improved 3791 * format sd-config-list. 3792 * 3793 * Arguments: un - driver soft state (unit) structure 3794 * name - supported tunable name 3795 * value - tunable value 3796 */ 3797 static void 3798 sd_set_properties(struct sd_lun *un, char *name, char *value) 3799 { 3800 char *endptr = NULL; 3801 long val = 0; 3802 3803 if (strcasecmp(name, "cache-nonvolatile") == 0) { 3804 if (strcasecmp(value, "true") == 0) { 3805 un->un_f_suppress_cache_flush = TRUE; 3806 } else if (strcasecmp(value, "false") == 0) { 3807 un->un_f_suppress_cache_flush = FALSE; 3808 } else { 3809 goto value_invalid; 3810 } 3811 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3812 "suppress_cache_flush flag set to %d\n", 3813 un->un_f_suppress_cache_flush); 3814 return; 3815 } 3816 3817 if (strcasecmp(name, "controller-type") == 0) { 3818 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3819 un->un_ctype = val; 3820 } else { 3821 goto value_invalid; 3822 } 3823 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3824 "ctype set to %d\n", un->un_ctype); 3825 return; 3826 } 3827 3828 if (strcasecmp(name, "delay-busy") == 0) { 3829 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3830 un->un_busy_timeout = drv_usectohz(val / 1000); 3831 } else { 3832 goto value_invalid; 3833 } 3834 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3835 "busy_timeout set to %d\n", un->un_busy_timeout); 3836 return; 3837 } 3838 3839 if (strcasecmp(name, "disksort") == 0) { 3840 if (strcasecmp(value, "true") == 0) { 3841 un->un_f_disksort_disabled = FALSE; 3842 } else if (strcasecmp(value, "false") == 0) { 3843 un->un_f_disksort_disabled = TRUE; 3844 } else { 3845 goto value_invalid; 3846 } 3847 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3848 "disksort disabled flag set to %d\n", 3849 un->un_f_disksort_disabled); 3850 return; 3851 } 3852 3853 if (strcasecmp(name, "timeout-releasereservation") == 0) { 3854 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3855 un->un_reserve_release_time = val; 3856 } else { 3857 goto value_invalid; 3858 } 3859 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3860 "reservation release timeout set to %d\n", 3861 un->un_reserve_release_time); 3862 return; 3863 } 3864 3865 if (strcasecmp(name, "reset-lun") == 0) { 3866 if (strcasecmp(value, "true") == 0) { 3867 un->un_f_lun_reset_enabled = TRUE; 3868 } else if (strcasecmp(value, "false") == 0) { 3869 un->un_f_lun_reset_enabled = FALSE; 3870 } else { 3871 goto value_invalid; 3872 } 3873 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3874 "lun reset enabled flag set to %d\n", 3875 un->un_f_lun_reset_enabled); 3876 return; 3877 } 3878 3879 if (strcasecmp(name, "retries-busy") == 0) { 3880 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3881 un->un_busy_retry_count = val; 3882 } else { 3883 goto value_invalid; 3884 } 3885 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3886 "busy retry count set to %d\n", un->un_busy_retry_count); 3887 return; 3888 } 3889 3890 if (strcasecmp(name, "retries-timeout") == 0) { 3891 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3892 un->un_retry_count = val; 3893 } else { 3894 goto value_invalid; 3895 } 3896 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3897 "timeout retry count set to %d\n", un->un_retry_count); 3898 return; 3899 } 3900 3901 if (strcasecmp(name, "retries-notready") == 0) { 3902 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3903 un->un_notready_retry_count = val; 3904 } else { 3905 goto value_invalid; 3906 } 3907 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3908 "notready retry count set to %d\n", 3909 un->un_notready_retry_count); 3910 return; 3911 } 3912 3913 if (strcasecmp(name, "retries-reset") == 0) { 3914 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3915 un->un_reset_retry_count = val; 3916 } else { 3917 goto value_invalid; 3918 } 3919 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3920 "reset retry count set to %d\n", 3921 un->un_reset_retry_count); 3922 return; 3923 } 3924 3925 if (strcasecmp(name, "throttle-max") == 0) { 3926 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3927 un->un_saved_throttle = un->un_throttle = val; 3928 } else { 3929 goto value_invalid; 3930 } 3931 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3932 "throttle set to %d\n", un->un_throttle); 3933 } 3934 3935 if (strcasecmp(name, "throttle-min") == 0) { 3936 if (ddi_strtol(value, &endptr, 0, &val) == 0) { 3937 un->un_min_throttle = val; 3938 } else { 3939 goto value_invalid; 3940 } 3941 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3942 "min throttle set to %d\n", un->un_min_throttle); 3943 } 3944 3945 /* 3946 * Validate the throttle values. 3947 * If any of the numbers are invalid, set everything to defaults. 3948 */ 3949 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 3950 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 3951 (un->un_min_throttle > un->un_throttle)) { 3952 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 3953 un->un_min_throttle = sd_min_throttle; 3954 } 3955 return; 3956 3957 value_invalid: 3958 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: " 3959 "value of prop %s is invalid\n", name); 3960 } 3961 3962 /* 3963 * Function: sd_get_tunables_from_conf() 3964 * 3965 * 3966 * This function reads the data list from the sd.conf file and pulls 3967 * the values that can have numeric values as arguments and places 3968 * the values in the appropriate sd_tunables member. 3969 * Since the order of the data list members varies across platforms 3970 * This function reads them from the data list in a platform specific 3971 * order and places them into the correct sd_tunable member that is 3972 * consistent across all platforms. 3973 */ 3974 static void 3975 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list, 3976 sd_tunables *values) 3977 { 3978 int i; 3979 int mask; 3980 3981 bzero(values, sizeof (sd_tunables)); 3982 3983 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3984 3985 mask = 1 << i; 3986 if (mask > flags) { 3987 break; 3988 } 3989 3990 switch (mask & flags) { 3991 case 0: /* This mask bit not set in flags */ 3992 continue; 3993 case SD_CONF_BSET_THROTTLE: 3994 values->sdt_throttle = data_list[i]; 3995 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3996 "sd_get_tunables_from_conf: throttle = %d\n", 3997 values->sdt_throttle); 3998 break; 3999 case SD_CONF_BSET_CTYPE: 4000 values->sdt_ctype = data_list[i]; 4001 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4002 "sd_get_tunables_from_conf: ctype = %d\n", 4003 values->sdt_ctype); 4004 break; 4005 case SD_CONF_BSET_NRR_COUNT: 4006 values->sdt_not_rdy_retries = data_list[i]; 4007 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4008 "sd_get_tunables_from_conf: not_rdy_retries = %d\n", 4009 values->sdt_not_rdy_retries); 4010 break; 4011 case SD_CONF_BSET_BSY_RETRY_COUNT: 4012 values->sdt_busy_retries = data_list[i]; 4013 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4014 "sd_get_tunables_from_conf: busy_retries = %d\n", 4015 values->sdt_busy_retries); 4016 break; 4017 case SD_CONF_BSET_RST_RETRIES: 4018 values->sdt_reset_retries = data_list[i]; 4019 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4020 "sd_get_tunables_from_conf: reset_retries = %d\n", 4021 values->sdt_reset_retries); 4022 break; 4023 case SD_CONF_BSET_RSV_REL_TIME: 4024 values->sdt_reserv_rel_time = data_list[i]; 4025 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4026 "sd_get_tunables_from_conf: reserv_rel_time = %d\n", 4027 values->sdt_reserv_rel_time); 4028 break; 4029 case SD_CONF_BSET_MIN_THROTTLE: 4030 values->sdt_min_throttle = data_list[i]; 4031 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4032 "sd_get_tunables_from_conf: min_throttle = %d\n", 4033 values->sdt_min_throttle); 4034 break; 4035 case SD_CONF_BSET_DISKSORT_DISABLED: 4036 values->sdt_disk_sort_dis = data_list[i]; 4037 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4038 "sd_get_tunables_from_conf: disk_sort_dis = %d\n", 4039 values->sdt_disk_sort_dis); 4040 break; 4041 case SD_CONF_BSET_LUN_RESET_ENABLED: 4042 values->sdt_lun_reset_enable = data_list[i]; 4043 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4044 "sd_get_tunables_from_conf: lun_reset_enable = %d" 4045 "\n", values->sdt_lun_reset_enable); 4046 break; 4047 case SD_CONF_BSET_CACHE_IS_NV: 4048 values->sdt_suppress_cache_flush = data_list[i]; 4049 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4050 "sd_get_tunables_from_conf: \ 4051 suppress_cache_flush = %d" 4052 "\n", values->sdt_suppress_cache_flush); 4053 break; 4054 } 4055 } 4056 } 4057 4058 /* 4059 * Function: sd_process_sdconf_table 4060 * 4061 * Description: Search the static configuration table for a match on the 4062 * inquiry vid/pid and update the driver soft state structure 4063 * according to the table property values for the device. 4064 * 4065 * The form of a configuration table entry is: 4066 * <vid+pid>,<flags>,<property-data> 4067 * "SEAGATE ST42400N",1,0x40000, 4068 * 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1; 4069 * 4070 * Arguments: un - driver soft state (unit) structure 4071 */ 4072 4073 static void 4074 sd_process_sdconf_table(struct sd_lun *un) 4075 { 4076 char *id = NULL; 4077 int table_index; 4078 int idlen; 4079 4080 ASSERT(un != NULL); 4081 for (table_index = 0; table_index < sd_disk_table_size; 4082 table_index++) { 4083 id = sd_disk_table[table_index].device_id; 4084 idlen = strlen(id); 4085 if (idlen == 0) { 4086 continue; 4087 } 4088 4089 /* 4090 * The static configuration table currently does not 4091 * implement version 10 properties. Additionally, 4092 * multiple data-property-name entries are not 4093 * implemented in the static configuration table. 4094 */ 4095 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4096 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4097 "sd_process_sdconf_table: disk %s\n", id); 4098 sd_set_vers1_properties(un, 4099 sd_disk_table[table_index].flags, 4100 sd_disk_table[table_index].properties); 4101 break; 4102 } 4103 } 4104 } 4105 4106 4107 /* 4108 * Function: sd_sdconf_id_match 4109 * 4110 * Description: This local function implements a case sensitive vid/pid 4111 * comparison as well as the boundary cases of wild card and 4112 * multiple blanks. 4113 * 4114 * Note: An implicit assumption made here is that the scsi 4115 * inquiry structure will always keep the vid, pid and 4116 * revision strings in consecutive sequence, so they can be 4117 * read as a single string. If this assumption is not the 4118 * case, a separate string, to be used for the check, needs 4119 * to be built with these strings concatenated. 4120 * 4121 * Arguments: un - driver soft state (unit) structure 4122 * id - table or config file vid/pid 4123 * idlen - length of the vid/pid (bytes) 4124 * 4125 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 4126 * SD_FAILURE - Indicates no match with the inquiry vid/pid 4127 */ 4128 4129 static int 4130 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen) 4131 { 4132 struct scsi_inquiry *sd_inq; 4133 int rval = SD_SUCCESS; 4134 4135 ASSERT(un != NULL); 4136 sd_inq = un->un_sd->sd_inq; 4137 ASSERT(id != NULL); 4138 4139 /* 4140 * We use the inq_vid as a pointer to a buffer containing the 4141 * vid and pid and use the entire vid/pid length of the table 4142 * entry for the comparison. This works because the inq_pid 4143 * data member follows inq_vid in the scsi_inquiry structure. 4144 */ 4145 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) { 4146 /* 4147 * The user id string is compared to the inquiry vid/pid 4148 * using a case insensitive comparison and ignoring 4149 * multiple spaces. 4150 */ 4151 rval = sd_blank_cmp(un, id, idlen); 4152 if (rval != SD_SUCCESS) { 4153 /* 4154 * User id strings that start and end with a "*" 4155 * are a special case. These do not have a 4156 * specific vendor, and the product string can 4157 * appear anywhere in the 16 byte PID portion of 4158 * the inquiry data. This is a simple strstr() 4159 * type search for the user id in the inquiry data. 4160 */ 4161 if ((id[0] == '*') && (id[idlen - 1] == '*')) { 4162 char *pidptr = &id[1]; 4163 int i; 4164 int j; 4165 int pidstrlen = idlen - 2; 4166 j = sizeof (SD_INQUIRY(un)->inq_pid) - 4167 pidstrlen; 4168 4169 if (j < 0) { 4170 return (SD_FAILURE); 4171 } 4172 for (i = 0; i < j; i++) { 4173 if (bcmp(&SD_INQUIRY(un)->inq_pid[i], 4174 pidptr, pidstrlen) == 0) { 4175 rval = SD_SUCCESS; 4176 break; 4177 } 4178 } 4179 } 4180 } 4181 } 4182 return (rval); 4183 } 4184 4185 4186 /* 4187 * Function: sd_blank_cmp 4188 * 4189 * Description: If the id string starts and ends with a space, treat 4190 * multiple consecutive spaces as equivalent to a single 4191 * space. For example, this causes a sd_disk_table entry 4192 * of " NEC CDROM " to match a device's id string of 4193 * "NEC CDROM". 4194 * 4195 * Note: The success exit condition for this routine is if 4196 * the pointer to the table entry is '\0' and the cnt of 4197 * the inquiry length is zero. This will happen if the inquiry 4198 * string returned by the device is padded with spaces to be 4199 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The 4200 * SCSI spec states that the inquiry string is to be padded with 4201 * spaces. 4202 * 4203 * Arguments: un - driver soft state (unit) structure 4204 * id - table or config file vid/pid 4205 * idlen - length of the vid/pid (bytes) 4206 * 4207 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 4208 * SD_FAILURE - Indicates no match with the inquiry vid/pid 4209 */ 4210 4211 static int 4212 sd_blank_cmp(struct sd_lun *un, char *id, int idlen) 4213 { 4214 char *p1; 4215 char *p2; 4216 int cnt; 4217 cnt = sizeof (SD_INQUIRY(un)->inq_vid) + 4218 sizeof (SD_INQUIRY(un)->inq_pid); 4219 4220 ASSERT(un != NULL); 4221 p2 = un->un_sd->sd_inq->inq_vid; 4222 ASSERT(id != NULL); 4223 p1 = id; 4224 4225 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) { 4226 /* 4227 * Note: string p1 is terminated by a NUL but string p2 4228 * isn't. The end of p2 is determined by cnt. 4229 */ 4230 for (;;) { 4231 /* skip over any extra blanks in both strings */ 4232 while ((*p1 != '\0') && (*p1 == ' ')) { 4233 p1++; 4234 } 4235 while ((cnt != 0) && (*p2 == ' ')) { 4236 p2++; 4237 cnt--; 4238 } 4239 4240 /* compare the two strings */ 4241 if ((cnt == 0) || 4242 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) { 4243 break; 4244 } 4245 while ((cnt > 0) && 4246 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) { 4247 p1++; 4248 p2++; 4249 cnt--; 4250 } 4251 } 4252 } 4253 4254 /* return SD_SUCCESS if both strings match */ 4255 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE); 4256 } 4257 4258 4259 /* 4260 * Function: sd_chk_vers1_data 4261 * 4262 * Description: Verify the version 1 device properties provided by the 4263 * user via the configuration file 4264 * 4265 * Arguments: un - driver soft state (unit) structure 4266 * flags - integer mask indicating properties to be set 4267 * prop_list - integer list of property values 4268 * list_len - number of the elements 4269 * 4270 * Return Code: SD_SUCCESS - Indicates the user provided data is valid 4271 * SD_FAILURE - Indicates the user provided data is invalid 4272 */ 4273 4274 static int 4275 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 4276 int list_len, char *dataname_ptr) 4277 { 4278 int i; 4279 int mask = 1; 4280 int index = 0; 4281 4282 ASSERT(un != NULL); 4283 4284 /* Check for a NULL property name and list */ 4285 if (dataname_ptr == NULL) { 4286 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4287 "sd_chk_vers1_data: NULL data property name."); 4288 return (SD_FAILURE); 4289 } 4290 if (prop_list == NULL) { 4291 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4292 "sd_chk_vers1_data: %s NULL data property list.", 4293 dataname_ptr); 4294 return (SD_FAILURE); 4295 } 4296 4297 /* Display a warning if undefined bits are set in the flags */ 4298 if (flags & ~SD_CONF_BIT_MASK) { 4299 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4300 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. " 4301 "Properties not set.", 4302 (flags & ~SD_CONF_BIT_MASK), dataname_ptr); 4303 return (SD_FAILURE); 4304 } 4305 4306 /* 4307 * Verify the length of the list by identifying the highest bit set 4308 * in the flags and validating that the property list has a length 4309 * up to the index of this bit. 4310 */ 4311 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 4312 if (flags & mask) { 4313 index++; 4314 } 4315 mask = 1 << i; 4316 } 4317 if (list_len < (index + 2)) { 4318 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4319 "sd_chk_vers1_data: " 4320 "Data property list %s size is incorrect. " 4321 "Properties not set.", dataname_ptr); 4322 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: " 4323 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS); 4324 return (SD_FAILURE); 4325 } 4326 return (SD_SUCCESS); 4327 } 4328 4329 4330 /* 4331 * Function: sd_set_vers1_properties 4332 * 4333 * Description: Set version 1 device properties based on a property list 4334 * retrieved from the driver configuration file or static 4335 * configuration table. Version 1 properties have the format: 4336 * 4337 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 4338 * 4339 * where the prop0 value will be used to set prop0 if bit0 4340 * is set in the flags 4341 * 4342 * Arguments: un - driver soft state (unit) structure 4343 * flags - integer mask indicating properties to be set 4344 * prop_list - integer list of property values 4345 */ 4346 4347 static void 4348 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list) 4349 { 4350 ASSERT(un != NULL); 4351 4352 /* 4353 * Set the flag to indicate cache is to be disabled. An attempt 4354 * to disable the cache via sd_cache_control() will be made 4355 * later during attach once the basic initialization is complete. 4356 */ 4357 if (flags & SD_CONF_BSET_NOCACHE) { 4358 un->un_f_opt_disable_cache = TRUE; 4359 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4360 "sd_set_vers1_properties: caching disabled flag set\n"); 4361 } 4362 4363 /* CD-specific configuration parameters */ 4364 if (flags & SD_CONF_BSET_PLAYMSF_BCD) { 4365 un->un_f_cfg_playmsf_bcd = TRUE; 4366 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4367 "sd_set_vers1_properties: playmsf_bcd set\n"); 4368 } 4369 if (flags & SD_CONF_BSET_READSUB_BCD) { 4370 un->un_f_cfg_readsub_bcd = TRUE; 4371 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4372 "sd_set_vers1_properties: readsub_bcd set\n"); 4373 } 4374 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) { 4375 un->un_f_cfg_read_toc_trk_bcd = TRUE; 4376 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4377 "sd_set_vers1_properties: read_toc_trk_bcd set\n"); 4378 } 4379 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) { 4380 un->un_f_cfg_read_toc_addr_bcd = TRUE; 4381 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4382 "sd_set_vers1_properties: read_toc_addr_bcd set\n"); 4383 } 4384 if (flags & SD_CONF_BSET_NO_READ_HEADER) { 4385 un->un_f_cfg_no_read_header = TRUE; 4386 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4387 "sd_set_vers1_properties: no_read_header set\n"); 4388 } 4389 if (flags & SD_CONF_BSET_READ_CD_XD4) { 4390 un->un_f_cfg_read_cd_xd4 = TRUE; 4391 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4392 "sd_set_vers1_properties: read_cd_xd4 set\n"); 4393 } 4394 4395 /* Support for devices which do not have valid/unique serial numbers */ 4396 if (flags & SD_CONF_BSET_FAB_DEVID) { 4397 un->un_f_opt_fab_devid = TRUE; 4398 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4399 "sd_set_vers1_properties: fab_devid bit set\n"); 4400 } 4401 4402 /* Support for user throttle configuration */ 4403 if (flags & SD_CONF_BSET_THROTTLE) { 4404 ASSERT(prop_list != NULL); 4405 un->un_saved_throttle = un->un_throttle = 4406 prop_list->sdt_throttle; 4407 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4408 "sd_set_vers1_properties: throttle set to %d\n", 4409 prop_list->sdt_throttle); 4410 } 4411 4412 /* Set the per disk retry count according to the conf file or table. */ 4413 if (flags & SD_CONF_BSET_NRR_COUNT) { 4414 ASSERT(prop_list != NULL); 4415 if (prop_list->sdt_not_rdy_retries) { 4416 un->un_notready_retry_count = 4417 prop_list->sdt_not_rdy_retries; 4418 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4419 "sd_set_vers1_properties: not ready retry count" 4420 " set to %d\n", un->un_notready_retry_count); 4421 } 4422 } 4423 4424 /* The controller type is reported for generic disk driver ioctls */ 4425 if (flags & SD_CONF_BSET_CTYPE) { 4426 ASSERT(prop_list != NULL); 4427 switch (prop_list->sdt_ctype) { 4428 case CTYPE_CDROM: 4429 un->un_ctype = prop_list->sdt_ctype; 4430 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4431 "sd_set_vers1_properties: ctype set to " 4432 "CTYPE_CDROM\n"); 4433 break; 4434 case CTYPE_CCS: 4435 un->un_ctype = prop_list->sdt_ctype; 4436 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4437 "sd_set_vers1_properties: ctype set to " 4438 "CTYPE_CCS\n"); 4439 break; 4440 case CTYPE_ROD: /* RW optical */ 4441 un->un_ctype = prop_list->sdt_ctype; 4442 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4443 "sd_set_vers1_properties: ctype set to " 4444 "CTYPE_ROD\n"); 4445 break; 4446 default: 4447 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4448 "sd_set_vers1_properties: Could not set " 4449 "invalid ctype value (%d)", 4450 prop_list->sdt_ctype); 4451 } 4452 } 4453 4454 /* Purple failover timeout */ 4455 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) { 4456 ASSERT(prop_list != NULL); 4457 un->un_busy_retry_count = 4458 prop_list->sdt_busy_retries; 4459 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4460 "sd_set_vers1_properties: " 4461 "busy retry count set to %d\n", 4462 un->un_busy_retry_count); 4463 } 4464 4465 /* Purple reset retry count */ 4466 if (flags & SD_CONF_BSET_RST_RETRIES) { 4467 ASSERT(prop_list != NULL); 4468 un->un_reset_retry_count = 4469 prop_list->sdt_reset_retries; 4470 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4471 "sd_set_vers1_properties: " 4472 "reset retry count set to %d\n", 4473 un->un_reset_retry_count); 4474 } 4475 4476 /* Purple reservation release timeout */ 4477 if (flags & SD_CONF_BSET_RSV_REL_TIME) { 4478 ASSERT(prop_list != NULL); 4479 un->un_reserve_release_time = 4480 prop_list->sdt_reserv_rel_time; 4481 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4482 "sd_set_vers1_properties: " 4483 "reservation release timeout set to %d\n", 4484 un->un_reserve_release_time); 4485 } 4486 4487 /* 4488 * Driver flag telling the driver to verify that no commands are pending 4489 * for a device before issuing a Test Unit Ready. This is a workaround 4490 * for a firmware bug in some Seagate eliteI drives. 4491 */ 4492 if (flags & SD_CONF_BSET_TUR_CHECK) { 4493 un->un_f_cfg_tur_check = TRUE; 4494 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4495 "sd_set_vers1_properties: tur queue check set\n"); 4496 } 4497 4498 if (flags & SD_CONF_BSET_MIN_THROTTLE) { 4499 un->un_min_throttle = prop_list->sdt_min_throttle; 4500 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4501 "sd_set_vers1_properties: min throttle set to %d\n", 4502 un->un_min_throttle); 4503 } 4504 4505 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) { 4506 un->un_f_disksort_disabled = 4507 (prop_list->sdt_disk_sort_dis != 0) ? 4508 TRUE : FALSE; 4509 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4510 "sd_set_vers1_properties: disksort disabled " 4511 "flag set to %d\n", 4512 prop_list->sdt_disk_sort_dis); 4513 } 4514 4515 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) { 4516 un->un_f_lun_reset_enabled = 4517 (prop_list->sdt_lun_reset_enable != 0) ? 4518 TRUE : FALSE; 4519 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4520 "sd_set_vers1_properties: lun reset enabled " 4521 "flag set to %d\n", 4522 prop_list->sdt_lun_reset_enable); 4523 } 4524 4525 if (flags & SD_CONF_BSET_CACHE_IS_NV) { 4526 un->un_f_suppress_cache_flush = 4527 (prop_list->sdt_suppress_cache_flush != 0) ? 4528 TRUE : FALSE; 4529 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4530 "sd_set_vers1_properties: suppress_cache_flush " 4531 "flag set to %d\n", 4532 prop_list->sdt_suppress_cache_flush); 4533 } 4534 4535 /* 4536 * Validate the throttle values. 4537 * If any of the numbers are invalid, set everything to defaults. 4538 */ 4539 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 4540 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 4541 (un->un_min_throttle > un->un_throttle)) { 4542 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 4543 un->un_min_throttle = sd_min_throttle; 4544 } 4545 } 4546 4547 /* 4548 * Function: sd_is_lsi() 4549 * 4550 * Description: Check for lsi devices, step through the static device 4551 * table to match vid/pid. 4552 * 4553 * Args: un - ptr to sd_lun 4554 * 4555 * Notes: When creating new LSI property, need to add the new LSI property 4556 * to this function. 4557 */ 4558 static void 4559 sd_is_lsi(struct sd_lun *un) 4560 { 4561 char *id = NULL; 4562 int table_index; 4563 int idlen; 4564 void *prop; 4565 4566 ASSERT(un != NULL); 4567 for (table_index = 0; table_index < sd_disk_table_size; 4568 table_index++) { 4569 id = sd_disk_table[table_index].device_id; 4570 idlen = strlen(id); 4571 if (idlen == 0) { 4572 continue; 4573 } 4574 4575 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4576 prop = sd_disk_table[table_index].properties; 4577 if (prop == &lsi_properties || 4578 prop == &lsi_oem_properties || 4579 prop == &lsi_properties_scsi || 4580 prop == &symbios_properties) { 4581 un->un_f_cfg_is_lsi = TRUE; 4582 } 4583 break; 4584 } 4585 } 4586 } 4587 4588 /* 4589 * Function: sd_get_physical_geometry 4590 * 4591 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and 4592 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the 4593 * target, and use this information to initialize the physical 4594 * geometry cache specified by pgeom_p. 4595 * 4596 * MODE SENSE is an optional command, so failure in this case 4597 * does not necessarily denote an error. We want to use the 4598 * MODE SENSE commands to derive the physical geometry of the 4599 * device, but if either command fails, the logical geometry is 4600 * used as the fallback for disk label geometry in cmlb. 4601 * 4602 * This requires that un->un_blockcount and un->un_tgt_blocksize 4603 * have already been initialized for the current target and 4604 * that the current values be passed as args so that we don't 4605 * end up ever trying to use -1 as a valid value. This could 4606 * happen if either value is reset while we're not holding 4607 * the mutex. 4608 * 4609 * Arguments: un - driver soft state (unit) structure 4610 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4611 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4612 * to use the USCSI "direct" chain and bypass the normal 4613 * command waitq. 4614 * 4615 * Context: Kernel thread only (can sleep). 4616 */ 4617 4618 static int 4619 sd_get_physical_geometry(struct sd_lun *un, cmlb_geom_t *pgeom_p, 4620 diskaddr_t capacity, int lbasize, int path_flag) 4621 { 4622 struct mode_format *page3p; 4623 struct mode_geometry *page4p; 4624 struct mode_header *headerp; 4625 int sector_size; 4626 int nsect; 4627 int nhead; 4628 int ncyl; 4629 int intrlv; 4630 int spc; 4631 diskaddr_t modesense_capacity; 4632 int rpm; 4633 int bd_len; 4634 int mode_header_length; 4635 uchar_t *p3bufp; 4636 uchar_t *p4bufp; 4637 int cdbsize; 4638 int ret = EIO; 4639 sd_ssc_t *ssc; 4640 int status; 4641 4642 ASSERT(un != NULL); 4643 4644 if (lbasize == 0) { 4645 if (ISCD(un)) { 4646 lbasize = 2048; 4647 } else { 4648 lbasize = un->un_sys_blocksize; 4649 } 4650 } 4651 pgeom_p->g_secsize = (unsigned short)lbasize; 4652 4653 /* 4654 * If the unit is a cd/dvd drive MODE SENSE page three 4655 * and MODE SENSE page four are reserved (see SBC spec 4656 * and MMC spec). To prevent soft errors just return 4657 * using the default LBA size. 4658 */ 4659 if (ISCD(un)) 4660 return (ret); 4661 4662 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0; 4663 4664 /* 4665 * Retrieve MODE SENSE page 3 - Format Device Page 4666 */ 4667 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP); 4668 ssc = sd_ssc_init(un); 4669 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p3bufp, 4670 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag); 4671 if (status != 0) { 4672 SD_ERROR(SD_LOG_COMMON, un, 4673 "sd_get_physical_geometry: mode sense page 3 failed\n"); 4674 goto page3_exit; 4675 } 4676 4677 /* 4678 * Determine size of Block Descriptors in order to locate the mode 4679 * page data. ATAPI devices return 0, SCSI devices should return 4680 * MODE_BLK_DESC_LENGTH. 4681 */ 4682 headerp = (struct mode_header *)p3bufp; 4683 if (un->un_f_cfg_is_atapi == TRUE) { 4684 struct mode_header_grp2 *mhp = 4685 (struct mode_header_grp2 *)headerp; 4686 mode_header_length = MODE_HEADER_LENGTH_GRP2; 4687 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4688 } else { 4689 mode_header_length = MODE_HEADER_LENGTH; 4690 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4691 } 4692 4693 if (bd_len > MODE_BLK_DESC_LENGTH) { 4694 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 4695 "sd_get_physical_geometry: received unexpected bd_len " 4696 "of %d, page3\n", bd_len); 4697 status = EIO; 4698 goto page3_exit; 4699 } 4700 4701 page3p = (struct mode_format *) 4702 ((caddr_t)headerp + mode_header_length + bd_len); 4703 4704 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) { 4705 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 4706 "sd_get_physical_geometry: mode sense pg3 code mismatch " 4707 "%d\n", page3p->mode_page.code); 4708 status = EIO; 4709 goto page3_exit; 4710 } 4711 4712 /* 4713 * Use this physical geometry data only if BOTH MODE SENSE commands 4714 * complete successfully; otherwise, revert to the logical geometry. 4715 * So, we need to save everything in temporary variables. 4716 */ 4717 sector_size = BE_16(page3p->data_bytes_sect); 4718 4719 /* 4720 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size 4721 */ 4722 if (sector_size == 0) { 4723 sector_size = un->un_sys_blocksize; 4724 } else { 4725 sector_size &= ~(un->un_sys_blocksize - 1); 4726 } 4727 4728 nsect = BE_16(page3p->sect_track); 4729 intrlv = BE_16(page3p->interleave); 4730 4731 SD_INFO(SD_LOG_COMMON, un, 4732 "sd_get_physical_geometry: Format Parameters (page 3)\n"); 4733 SD_INFO(SD_LOG_COMMON, un, 4734 " mode page: %d; nsect: %d; sector size: %d;\n", 4735 page3p->mode_page.code, nsect, sector_size); 4736 SD_INFO(SD_LOG_COMMON, un, 4737 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv, 4738 BE_16(page3p->track_skew), 4739 BE_16(page3p->cylinder_skew)); 4740 4741 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 4742 4743 /* 4744 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page 4745 */ 4746 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP); 4747 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p4bufp, 4748 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag); 4749 if (status != 0) { 4750 SD_ERROR(SD_LOG_COMMON, un, 4751 "sd_get_physical_geometry: mode sense page 4 failed\n"); 4752 goto page4_exit; 4753 } 4754 4755 /* 4756 * Determine size of Block Descriptors in order to locate the mode 4757 * page data. ATAPI devices return 0, SCSI devices should return 4758 * MODE_BLK_DESC_LENGTH. 4759 */ 4760 headerp = (struct mode_header *)p4bufp; 4761 if (un->un_f_cfg_is_atapi == TRUE) { 4762 struct mode_header_grp2 *mhp = 4763 (struct mode_header_grp2 *)headerp; 4764 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4765 } else { 4766 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4767 } 4768 4769 if (bd_len > MODE_BLK_DESC_LENGTH) { 4770 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 4771 "sd_get_physical_geometry: received unexpected bd_len of " 4772 "%d, page4\n", bd_len); 4773 status = EIO; 4774 goto page4_exit; 4775 } 4776 4777 page4p = (struct mode_geometry *) 4778 ((caddr_t)headerp + mode_header_length + bd_len); 4779 4780 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) { 4781 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 4782 "sd_get_physical_geometry: mode sense pg4 code mismatch " 4783 "%d\n", page4p->mode_page.code); 4784 status = EIO; 4785 goto page4_exit; 4786 } 4787 4788 /* 4789 * Stash the data now, after we know that both commands completed. 4790 */ 4791 4792 4793 nhead = (int)page4p->heads; /* uchar, so no conversion needed */ 4794 spc = nhead * nsect; 4795 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb; 4796 rpm = BE_16(page4p->rpm); 4797 4798 modesense_capacity = spc * ncyl; 4799 4800 SD_INFO(SD_LOG_COMMON, un, 4801 "sd_get_physical_geometry: Geometry Parameters (page 4)\n"); 4802 SD_INFO(SD_LOG_COMMON, un, 4803 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm); 4804 SD_INFO(SD_LOG_COMMON, un, 4805 " computed capacity(h*s*c): %d;\n", modesense_capacity); 4806 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n", 4807 (void *)pgeom_p, capacity); 4808 4809 /* 4810 * Compensate if the drive's geometry is not rectangular, i.e., 4811 * the product of C * H * S returned by MODE SENSE >= that returned 4812 * by read capacity. This is an idiosyncrasy of the original x86 4813 * disk subsystem. 4814 */ 4815 if (modesense_capacity >= capacity) { 4816 SD_INFO(SD_LOG_COMMON, un, 4817 "sd_get_physical_geometry: adjusting acyl; " 4818 "old: %d; new: %d\n", pgeom_p->g_acyl, 4819 (modesense_capacity - capacity + spc - 1) / spc); 4820 if (sector_size != 0) { 4821 /* 1243403: NEC D38x7 drives don't support sec size */ 4822 pgeom_p->g_secsize = (unsigned short)sector_size; 4823 } 4824 pgeom_p->g_nsect = (unsigned short)nsect; 4825 pgeom_p->g_nhead = (unsigned short)nhead; 4826 pgeom_p->g_capacity = capacity; 4827 pgeom_p->g_acyl = 4828 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc; 4829 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl; 4830 } 4831 4832 pgeom_p->g_rpm = (unsigned short)rpm; 4833 pgeom_p->g_intrlv = (unsigned short)intrlv; 4834 ret = 0; 4835 4836 SD_INFO(SD_LOG_COMMON, un, 4837 "sd_get_physical_geometry: mode sense geometry:\n"); 4838 SD_INFO(SD_LOG_COMMON, un, 4839 " nsect: %d; sector size: %d; interlv: %d\n", 4840 nsect, sector_size, intrlv); 4841 SD_INFO(SD_LOG_COMMON, un, 4842 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n", 4843 nhead, ncyl, rpm, modesense_capacity); 4844 SD_INFO(SD_LOG_COMMON, un, 4845 "sd_get_physical_geometry: (cached)\n"); 4846 SD_INFO(SD_LOG_COMMON, un, 4847 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 4848 pgeom_p->g_ncyl, pgeom_p->g_acyl, 4849 pgeom_p->g_nhead, pgeom_p->g_nsect); 4850 SD_INFO(SD_LOG_COMMON, un, 4851 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n", 4852 pgeom_p->g_secsize, pgeom_p->g_capacity, 4853 pgeom_p->g_intrlv, pgeom_p->g_rpm); 4854 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 4855 4856 page4_exit: 4857 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH); 4858 4859 page3_exit: 4860 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH); 4861 4862 if (status != 0) { 4863 if (status == EIO) { 4864 /* 4865 * Some disks do not support mode sense(6), we 4866 * should ignore this kind of error(sense key is 4867 * 0x5 - illegal request). 4868 */ 4869 uint8_t *sensep; 4870 int senlen; 4871 4872 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 4873 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 4874 ssc->ssc_uscsi_cmd->uscsi_rqresid); 4875 4876 if (senlen > 0 && 4877 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 4878 sd_ssc_assessment(ssc, 4879 SD_FMT_IGNORE_COMPROMISE); 4880 } else { 4881 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 4882 } 4883 } else { 4884 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 4885 } 4886 } 4887 sd_ssc_fini(ssc); 4888 return (ret); 4889 } 4890 4891 /* 4892 * Function: sd_get_virtual_geometry 4893 * 4894 * Description: Ask the controller to tell us about the target device. 4895 * 4896 * Arguments: un - pointer to softstate 4897 * capacity - disk capacity in #blocks 4898 * lbasize - disk block size in bytes 4899 * 4900 * Context: Kernel thread only 4901 */ 4902 4903 static int 4904 sd_get_virtual_geometry(struct sd_lun *un, cmlb_geom_t *lgeom_p, 4905 diskaddr_t capacity, int lbasize) 4906 { 4907 uint_t geombuf; 4908 int spc; 4909 4910 ASSERT(un != NULL); 4911 4912 /* Set sector size, and total number of sectors */ 4913 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1); 4914 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1); 4915 4916 /* Let the HBA tell us its geometry */ 4917 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1); 4918 4919 /* A value of -1 indicates an undefined "geometry" property */ 4920 if (geombuf == (-1)) { 4921 return (EINVAL); 4922 } 4923 4924 /* Initialize the logical geometry cache. */ 4925 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff; 4926 lgeom_p->g_nsect = geombuf & 0xffff; 4927 lgeom_p->g_secsize = un->un_sys_blocksize; 4928 4929 spc = lgeom_p->g_nhead * lgeom_p->g_nsect; 4930 4931 /* 4932 * Note: The driver originally converted the capacity value from 4933 * target blocks to system blocks. However, the capacity value passed 4934 * to this routine is already in terms of system blocks (this scaling 4935 * is done when the READ CAPACITY command is issued and processed). 4936 * This 'error' may have gone undetected because the usage of g_ncyl 4937 * (which is based upon g_capacity) is very limited within the driver 4938 */ 4939 lgeom_p->g_capacity = capacity; 4940 4941 /* 4942 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The 4943 * hba may return zero values if the device has been removed. 4944 */ 4945 if (spc == 0) { 4946 lgeom_p->g_ncyl = 0; 4947 } else { 4948 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc; 4949 } 4950 lgeom_p->g_acyl = 0; 4951 4952 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n"); 4953 return (0); 4954 4955 } 4956 /* 4957 * Function: sd_update_block_info 4958 * 4959 * Description: Calculate a byte count to sector count bitshift value 4960 * from sector size. 4961 * 4962 * Arguments: un: unit struct. 4963 * lbasize: new target sector size 4964 * capacity: new target capacity, ie. block count 4965 * 4966 * Context: Kernel thread context 4967 */ 4968 4969 static void 4970 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity) 4971 { 4972 if (lbasize != 0) { 4973 un->un_tgt_blocksize = lbasize; 4974 un->un_f_tgt_blocksize_is_valid = TRUE; 4975 } 4976 4977 if (capacity != 0) { 4978 un->un_blockcount = capacity; 4979 un->un_f_blockcount_is_valid = TRUE; 4980 } 4981 } 4982 4983 4984 /* 4985 * Function: sd_register_devid 4986 * 4987 * Description: This routine will obtain the device id information from the 4988 * target, obtain the serial number, and register the device 4989 * id with the ddi framework. 4990 * 4991 * Arguments: devi - the system's dev_info_t for the device. 4992 * un - driver soft state (unit) structure 4993 * reservation_flag - indicates if a reservation conflict 4994 * occurred during attach 4995 * 4996 * Context: Kernel Thread 4997 */ 4998 static void 4999 sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi, int reservation_flag) 5000 { 5001 int rval = 0; 5002 uchar_t *inq80 = NULL; 5003 size_t inq80_len = MAX_INQUIRY_SIZE; 5004 size_t inq80_resid = 0; 5005 uchar_t *inq83 = NULL; 5006 size_t inq83_len = MAX_INQUIRY_SIZE; 5007 size_t inq83_resid = 0; 5008 int dlen, len; 5009 char *sn; 5010 struct sd_lun *un; 5011 5012 ASSERT(ssc != NULL); 5013 un = ssc->ssc_un; 5014 ASSERT(un != NULL); 5015 ASSERT(mutex_owned(SD_MUTEX(un))); 5016 ASSERT((SD_DEVINFO(un)) == devi); 5017 5018 /* 5019 * If transport has already registered a devid for this target 5020 * then that takes precedence over the driver's determination 5021 * of the devid. 5022 */ 5023 if (ddi_devid_get(SD_DEVINFO(un), &un->un_devid) == DDI_SUCCESS) { 5024 ASSERT(un->un_devid); 5025 return; /* use devid registered by the transport */ 5026 } 5027 5028 /* 5029 * This is the case of antiquated Sun disk drives that have the 5030 * FAB_DEVID property set in the disk_table. These drives 5031 * manage the devid's by storing them in last 2 available sectors 5032 * on the drive and have them fabricated by the ddi layer by calling 5033 * ddi_devid_init and passing the DEVID_FAB flag. 5034 */ 5035 if (un->un_f_opt_fab_devid == TRUE) { 5036 /* 5037 * Depending on EINVAL isn't reliable, since a reserved disk 5038 * may result in invalid geometry, so check to make sure a 5039 * reservation conflict did not occur during attach. 5040 */ 5041 if ((sd_get_devid(ssc) == EINVAL) && 5042 (reservation_flag != SD_TARGET_IS_RESERVED)) { 5043 /* 5044 * The devid is invalid AND there is no reservation 5045 * conflict. Fabricate a new devid. 5046 */ 5047 (void) sd_create_devid(ssc); 5048 } 5049 5050 /* Register the devid if it exists */ 5051 if (un->un_devid != NULL) { 5052 (void) ddi_devid_register(SD_DEVINFO(un), 5053 un->un_devid); 5054 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5055 "sd_register_devid: Devid Fabricated\n"); 5056 } 5057 return; 5058 } 5059 5060 /* 5061 * We check the availability of the World Wide Name (0x83) and Unit 5062 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using 5063 * un_vpd_page_mask from them, we decide which way to get the WWN. If 5064 * 0x83 is available, that is the best choice. Our next choice is 5065 * 0x80. If neither are available, we munge the devid from the device 5066 * vid/pid/serial # for Sun qualified disks, or use the ddi framework 5067 * to fabricate a devid for non-Sun qualified disks. 5068 */ 5069 if (sd_check_vpd_page_support(ssc) == 0) { 5070 /* collect page 80 data if available */ 5071 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) { 5072 5073 mutex_exit(SD_MUTEX(un)); 5074 inq80 = kmem_zalloc(inq80_len, KM_SLEEP); 5075 5076 rval = sd_send_scsi_INQUIRY(ssc, inq80, inq80_len, 5077 0x01, 0x80, &inq80_resid); 5078 5079 if (rval != 0) { 5080 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5081 kmem_free(inq80, inq80_len); 5082 inq80 = NULL; 5083 inq80_len = 0; 5084 } else if (ddi_prop_exists( 5085 DDI_DEV_T_NONE, SD_DEVINFO(un), 5086 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 5087 INQUIRY_SERIAL_NO) == 0) { 5088 /* 5089 * If we don't already have a serial number 5090 * property, do quick verify of data returned 5091 * and define property. 5092 */ 5093 dlen = inq80_len - inq80_resid; 5094 len = (size_t)inq80[3]; 5095 if ((dlen >= 4) && ((len + 4) <= dlen)) { 5096 /* 5097 * Ensure sn termination, skip leading 5098 * blanks, and create property 5099 * 'inquiry-serial-no'. 5100 */ 5101 sn = (char *)&inq80[4]; 5102 sn[len] = 0; 5103 while (*sn && (*sn == ' ')) 5104 sn++; 5105 if (*sn) { 5106 (void) ddi_prop_update_string( 5107 DDI_DEV_T_NONE, 5108 SD_DEVINFO(un), 5109 INQUIRY_SERIAL_NO, sn); 5110 } 5111 } 5112 } 5113 mutex_enter(SD_MUTEX(un)); 5114 } 5115 5116 /* collect page 83 data if available */ 5117 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) { 5118 mutex_exit(SD_MUTEX(un)); 5119 inq83 = kmem_zalloc(inq83_len, KM_SLEEP); 5120 5121 rval = sd_send_scsi_INQUIRY(ssc, inq83, inq83_len, 5122 0x01, 0x83, &inq83_resid); 5123 5124 if (rval != 0) { 5125 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5126 kmem_free(inq83, inq83_len); 5127 inq83 = NULL; 5128 inq83_len = 0; 5129 } 5130 mutex_enter(SD_MUTEX(un)); 5131 } 5132 } 5133 5134 /* encode best devid possible based on data available */ 5135 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, 5136 (char *)ddi_driver_name(SD_DEVINFO(un)), 5137 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)), 5138 inq80, inq80_len - inq80_resid, inq83, inq83_len - 5139 inq83_resid, &un->un_devid) == DDI_SUCCESS) { 5140 5141 /* devid successfully encoded, register devid */ 5142 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); 5143 5144 } else { 5145 /* 5146 * Unable to encode a devid based on data available. 5147 * This is not a Sun qualified disk. Older Sun disk 5148 * drives that have the SD_FAB_DEVID property 5149 * set in the disk_table and non Sun qualified 5150 * disks are treated in the same manner. These 5151 * drives manage the devid's by storing them in 5152 * last 2 available sectors on the drive and 5153 * have them fabricated by the ddi layer by 5154 * calling ddi_devid_init and passing the 5155 * DEVID_FAB flag. 5156 * Create a fabricate devid only if there's no 5157 * fabricate devid existed. 5158 */ 5159 if (sd_get_devid(ssc) == EINVAL) { 5160 (void) sd_create_devid(ssc); 5161 } 5162 un->un_f_opt_fab_devid = TRUE; 5163 5164 /* Register the devid if it exists */ 5165 if (un->un_devid != NULL) { 5166 (void) ddi_devid_register(SD_DEVINFO(un), 5167 un->un_devid); 5168 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5169 "sd_register_devid: devid fabricated using " 5170 "ddi framework\n"); 5171 } 5172 } 5173 5174 /* clean up resources */ 5175 if (inq80 != NULL) { 5176 kmem_free(inq80, inq80_len); 5177 } 5178 if (inq83 != NULL) { 5179 kmem_free(inq83, inq83_len); 5180 } 5181 } 5182 5183 5184 5185 /* 5186 * Function: sd_get_devid 5187 * 5188 * Description: This routine will return 0 if a valid device id has been 5189 * obtained from the target and stored in the soft state. If a 5190 * valid device id has not been previously read and stored, a 5191 * read attempt will be made. 5192 * 5193 * Arguments: un - driver soft state (unit) structure 5194 * 5195 * Return Code: 0 if we successfully get the device id 5196 * 5197 * Context: Kernel Thread 5198 */ 5199 5200 static int 5201 sd_get_devid(sd_ssc_t *ssc) 5202 { 5203 struct dk_devid *dkdevid; 5204 ddi_devid_t tmpid; 5205 uint_t *ip; 5206 size_t sz; 5207 diskaddr_t blk; 5208 int status; 5209 int chksum; 5210 int i; 5211 size_t buffer_size; 5212 struct sd_lun *un; 5213 5214 ASSERT(ssc != NULL); 5215 un = ssc->ssc_un; 5216 ASSERT(un != NULL); 5217 ASSERT(mutex_owned(SD_MUTEX(un))); 5218 5219 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n", 5220 un); 5221 5222 if (un->un_devid != NULL) { 5223 return (0); 5224 } 5225 5226 mutex_exit(SD_MUTEX(un)); 5227 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5228 (void *)SD_PATH_DIRECT) != 0) { 5229 mutex_enter(SD_MUTEX(un)); 5230 return (EINVAL); 5231 } 5232 5233 /* 5234 * Read and verify device id, stored in the reserved cylinders at the 5235 * end of the disk. Backup label is on the odd sectors of the last 5236 * track of the last cylinder. Device id will be on track of the next 5237 * to last cylinder. 5238 */ 5239 mutex_enter(SD_MUTEX(un)); 5240 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid)); 5241 mutex_exit(SD_MUTEX(un)); 5242 dkdevid = kmem_alloc(buffer_size, KM_SLEEP); 5243 status = sd_send_scsi_READ(ssc, dkdevid, buffer_size, blk, 5244 SD_PATH_DIRECT); 5245 5246 if (status != 0) { 5247 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5248 goto error; 5249 } 5250 5251 /* Validate the revision */ 5252 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 5253 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 5254 status = EINVAL; 5255 goto error; 5256 } 5257 5258 /* Calculate the checksum */ 5259 chksum = 0; 5260 ip = (uint_t *)dkdevid; 5261 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 5262 i++) { 5263 chksum ^= ip[i]; 5264 } 5265 5266 /* Compare the checksums */ 5267 if (DKD_GETCHKSUM(dkdevid) != chksum) { 5268 status = EINVAL; 5269 goto error; 5270 } 5271 5272 /* Validate the device id */ 5273 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 5274 status = EINVAL; 5275 goto error; 5276 } 5277 5278 /* 5279 * Store the device id in the driver soft state 5280 */ 5281 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 5282 tmpid = kmem_alloc(sz, KM_SLEEP); 5283 5284 mutex_enter(SD_MUTEX(un)); 5285 5286 un->un_devid = tmpid; 5287 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 5288 5289 kmem_free(dkdevid, buffer_size); 5290 5291 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un); 5292 5293 return (status); 5294 error: 5295 mutex_enter(SD_MUTEX(un)); 5296 kmem_free(dkdevid, buffer_size); 5297 return (status); 5298 } 5299 5300 5301 /* 5302 * Function: sd_create_devid 5303 * 5304 * Description: This routine will fabricate the device id and write it 5305 * to the disk. 5306 * 5307 * Arguments: un - driver soft state (unit) structure 5308 * 5309 * Return Code: value of the fabricated device id 5310 * 5311 * Context: Kernel Thread 5312 */ 5313 5314 static ddi_devid_t 5315 sd_create_devid(sd_ssc_t *ssc) 5316 { 5317 struct sd_lun *un; 5318 5319 ASSERT(ssc != NULL); 5320 un = ssc->ssc_un; 5321 ASSERT(un != NULL); 5322 5323 /* Fabricate the devid */ 5324 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid) 5325 == DDI_FAILURE) { 5326 return (NULL); 5327 } 5328 5329 /* Write the devid to disk */ 5330 if (sd_write_deviceid(ssc) != 0) { 5331 ddi_devid_free(un->un_devid); 5332 un->un_devid = NULL; 5333 } 5334 5335 return (un->un_devid); 5336 } 5337 5338 5339 /* 5340 * Function: sd_write_deviceid 5341 * 5342 * Description: This routine will write the device id to the disk 5343 * reserved sector. 5344 * 5345 * Arguments: un - driver soft state (unit) structure 5346 * 5347 * Return Code: EINVAL 5348 * value returned by sd_send_scsi_cmd 5349 * 5350 * Context: Kernel Thread 5351 */ 5352 5353 static int 5354 sd_write_deviceid(sd_ssc_t *ssc) 5355 { 5356 struct dk_devid *dkdevid; 5357 diskaddr_t blk; 5358 uint_t *ip, chksum; 5359 int status; 5360 int i; 5361 struct sd_lun *un; 5362 5363 ASSERT(ssc != NULL); 5364 un = ssc->ssc_un; 5365 ASSERT(un != NULL); 5366 ASSERT(mutex_owned(SD_MUTEX(un))); 5367 5368 mutex_exit(SD_MUTEX(un)); 5369 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk, 5370 (void *)SD_PATH_DIRECT) != 0) { 5371 mutex_enter(SD_MUTEX(un)); 5372 return (-1); 5373 } 5374 5375 5376 /* Allocate the buffer */ 5377 dkdevid = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP); 5378 5379 /* Fill in the revision */ 5380 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 5381 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 5382 5383 /* Copy in the device id */ 5384 mutex_enter(SD_MUTEX(un)); 5385 bcopy(un->un_devid, &dkdevid->dkd_devid, 5386 ddi_devid_sizeof(un->un_devid)); 5387 mutex_exit(SD_MUTEX(un)); 5388 5389 /* Calculate the checksum */ 5390 chksum = 0; 5391 ip = (uint_t *)dkdevid; 5392 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 5393 i++) { 5394 chksum ^= ip[i]; 5395 } 5396 5397 /* Fill-in checksum */ 5398 DKD_FORMCHKSUM(chksum, dkdevid); 5399 5400 /* Write the reserved sector */ 5401 status = sd_send_scsi_WRITE(ssc, dkdevid, un->un_sys_blocksize, blk, 5402 SD_PATH_DIRECT); 5403 if (status != 0) 5404 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5405 5406 kmem_free(dkdevid, un->un_sys_blocksize); 5407 5408 mutex_enter(SD_MUTEX(un)); 5409 return (status); 5410 } 5411 5412 5413 /* 5414 * Function: sd_check_vpd_page_support 5415 * 5416 * Description: This routine sends an inquiry command with the EVPD bit set and 5417 * a page code of 0x00 to the device. It is used to determine which 5418 * vital product pages are available to find the devid. We are 5419 * looking for pages 0x83 or 0x80. If we return a negative 1, the 5420 * device does not support that command. 5421 * 5422 * Arguments: un - driver soft state (unit) structure 5423 * 5424 * Return Code: 0 - success 5425 * 1 - check condition 5426 * 5427 * Context: This routine can sleep. 5428 */ 5429 5430 static int 5431 sd_check_vpd_page_support(sd_ssc_t *ssc) 5432 { 5433 uchar_t *page_list = NULL; 5434 uchar_t page_length = 0xff; /* Use max possible length */ 5435 uchar_t evpd = 0x01; /* Set the EVPD bit */ 5436 uchar_t page_code = 0x00; /* Supported VPD Pages */ 5437 int rval = 0; 5438 int counter; 5439 struct sd_lun *un; 5440 5441 ASSERT(ssc != NULL); 5442 un = ssc->ssc_un; 5443 ASSERT(un != NULL); 5444 ASSERT(mutex_owned(SD_MUTEX(un))); 5445 5446 mutex_exit(SD_MUTEX(un)); 5447 5448 /* 5449 * We'll set the page length to the maximum to save figuring it out 5450 * with an additional call. 5451 */ 5452 page_list = kmem_zalloc(page_length, KM_SLEEP); 5453 5454 rval = sd_send_scsi_INQUIRY(ssc, page_list, page_length, evpd, 5455 page_code, NULL); 5456 5457 if (rval != 0) 5458 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5459 5460 mutex_enter(SD_MUTEX(un)); 5461 5462 /* 5463 * Now we must validate that the device accepted the command, as some 5464 * drives do not support it. If the drive does support it, we will 5465 * return 0, and the supported pages will be in un_vpd_page_mask. If 5466 * not, we return -1. 5467 */ 5468 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) { 5469 /* Loop to find one of the 2 pages we need */ 5470 counter = 4; /* Supported pages start at byte 4, with 0x00 */ 5471 5472 /* 5473 * Pages are returned in ascending order, and 0x83 is what we 5474 * are hoping for. 5475 */ 5476 while ((page_list[counter] <= 0x86) && 5477 (counter <= (page_list[VPD_PAGE_LENGTH] + 5478 VPD_HEAD_OFFSET))) { 5479 /* 5480 * Add 3 because page_list[3] is the number of 5481 * pages minus 3 5482 */ 5483 5484 switch (page_list[counter]) { 5485 case 0x00: 5486 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG; 5487 break; 5488 case 0x80: 5489 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG; 5490 break; 5491 case 0x81: 5492 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG; 5493 break; 5494 case 0x82: 5495 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG; 5496 break; 5497 case 0x83: 5498 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG; 5499 break; 5500 case 0x86: 5501 un->un_vpd_page_mask |= SD_VPD_EXTENDED_DATA_PG; 5502 break; 5503 } 5504 counter++; 5505 } 5506 5507 } else { 5508 rval = -1; 5509 5510 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5511 "sd_check_vpd_page_support: This drive does not implement " 5512 "VPD pages.\n"); 5513 } 5514 5515 kmem_free(page_list, page_length); 5516 5517 return (rval); 5518 } 5519 5520 5521 /* 5522 * Function: sd_setup_pm 5523 * 5524 * Description: Initialize Power Management on the device 5525 * 5526 * Context: Kernel Thread 5527 */ 5528 5529 static void 5530 sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi) 5531 { 5532 uint_t log_page_size; 5533 uchar_t *log_page_data; 5534 int rval = 0; 5535 struct sd_lun *un; 5536 5537 ASSERT(ssc != NULL); 5538 un = ssc->ssc_un; 5539 ASSERT(un != NULL); 5540 5541 /* 5542 * Since we are called from attach, holding a mutex for 5543 * un is unnecessary. Because some of the routines called 5544 * from here require SD_MUTEX to not be held, assert this 5545 * right up front. 5546 */ 5547 ASSERT(!mutex_owned(SD_MUTEX(un))); 5548 /* 5549 * Since the sd device does not have the 'reg' property, 5550 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 5551 * The following code is to tell cpr that this device 5552 * DOES need to be suspended and resumed. 5553 */ 5554 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 5555 "pm-hardware-state", "needs-suspend-resume"); 5556 5557 /* 5558 * This complies with the new power management framework 5559 * for certain desktop machines. Create the pm_components 5560 * property as a string array property. 5561 */ 5562 if (un->un_f_pm_supported) { 5563 /* 5564 * not all devices have a motor, try it first. 5565 * some devices may return ILLEGAL REQUEST, some 5566 * will hang 5567 * The following START_STOP_UNIT is used to check if target 5568 * device has a motor. 5569 */ 5570 un->un_f_start_stop_supported = TRUE; 5571 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_START, 5572 SD_PATH_DIRECT); 5573 5574 if (rval != 0) { 5575 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5576 un->un_f_start_stop_supported = FALSE; 5577 } 5578 5579 /* 5580 * create pm properties anyways otherwise the parent can't 5581 * go to sleep 5582 */ 5583 (void) sd_create_pm_components(devi, un); 5584 un->un_f_pm_is_enabled = TRUE; 5585 return; 5586 } 5587 5588 if (!un->un_f_log_sense_supported) { 5589 un->un_power_level = SD_SPINDLE_ON; 5590 un->un_f_pm_is_enabled = FALSE; 5591 return; 5592 } 5593 5594 rval = sd_log_page_supported(ssc, START_STOP_CYCLE_PAGE); 5595 5596 #ifdef SDDEBUG 5597 if (sd_force_pm_supported) { 5598 /* Force a successful result */ 5599 rval = 1; 5600 } 5601 #endif 5602 5603 /* 5604 * If the start-stop cycle counter log page is not supported 5605 * or if the pm-capable property is SD_PM_CAPABLE_FALSE (0) 5606 * then we should not create the pm_components property. 5607 */ 5608 if (rval == -1) { 5609 /* 5610 * Error. 5611 * Reading log sense failed, most likely this is 5612 * an older drive that does not support log sense. 5613 * If this fails auto-pm is not supported. 5614 */ 5615 un->un_power_level = SD_SPINDLE_ON; 5616 un->un_f_pm_is_enabled = FALSE; 5617 5618 } else if (rval == 0) { 5619 /* 5620 * Page not found. 5621 * The start stop cycle counter is implemented as page 5622 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For 5623 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE). 5624 */ 5625 if (sd_log_page_supported(ssc, START_STOP_CYCLE_VU_PAGE) == 1) { 5626 /* 5627 * Page found, use this one. 5628 */ 5629 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE; 5630 un->un_f_pm_is_enabled = TRUE; 5631 } else { 5632 /* 5633 * Error or page not found. 5634 * auto-pm is not supported for this device. 5635 */ 5636 un->un_power_level = SD_SPINDLE_ON; 5637 un->un_f_pm_is_enabled = FALSE; 5638 } 5639 } else { 5640 /* 5641 * Page found, use it. 5642 */ 5643 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE; 5644 un->un_f_pm_is_enabled = TRUE; 5645 } 5646 5647 5648 if (un->un_f_pm_is_enabled == TRUE) { 5649 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 5650 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 5651 5652 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 5653 log_page_size, un->un_start_stop_cycle_page, 5654 0x01, 0, SD_PATH_DIRECT); 5655 5656 if (rval != 0) { 5657 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 5658 } 5659 5660 #ifdef SDDEBUG 5661 if (sd_force_pm_supported) { 5662 /* Force a successful result */ 5663 rval = 0; 5664 } 5665 #endif 5666 5667 /* 5668 * If the Log sense for Page( Start/stop cycle counter page) 5669 * succeeds, then power management is supported and we can 5670 * enable auto-pm. 5671 */ 5672 if (rval == 0) { 5673 (void) sd_create_pm_components(devi, un); 5674 } else { 5675 un->un_power_level = SD_SPINDLE_ON; 5676 un->un_f_pm_is_enabled = FALSE; 5677 } 5678 5679 kmem_free(log_page_data, log_page_size); 5680 } 5681 } 5682 5683 5684 /* 5685 * Function: sd_create_pm_components 5686 * 5687 * Description: Initialize PM property. 5688 * 5689 * Context: Kernel thread context 5690 */ 5691 5692 static void 5693 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un) 5694 { 5695 char *pm_comp[] = { "NAME=spindle-motor", "0=off", "1=on", NULL }; 5696 5697 ASSERT(!mutex_owned(SD_MUTEX(un))); 5698 5699 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 5700 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) { 5701 /* 5702 * When components are initially created they are idle, 5703 * power up any non-removables. 5704 * Note: the return value of pm_raise_power can't be used 5705 * for determining if PM should be enabled for this device. 5706 * Even if you check the return values and remove this 5707 * property created above, the PM framework will not honor the 5708 * change after the first call to pm_raise_power. Hence, 5709 * removal of that property does not help if pm_raise_power 5710 * fails. In the case of removable media, the start/stop 5711 * will fail if the media is not present. 5712 */ 5713 if (un->un_f_attach_spinup && (pm_raise_power(SD_DEVINFO(un), 0, 5714 SD_SPINDLE_ON) == DDI_SUCCESS)) { 5715 mutex_enter(SD_MUTEX(un)); 5716 un->un_power_level = SD_SPINDLE_ON; 5717 mutex_enter(&un->un_pm_mutex); 5718 /* Set to on and not busy. */ 5719 un->un_pm_count = 0; 5720 } else { 5721 mutex_enter(SD_MUTEX(un)); 5722 un->un_power_level = SD_SPINDLE_OFF; 5723 mutex_enter(&un->un_pm_mutex); 5724 /* Set to off. */ 5725 un->un_pm_count = -1; 5726 } 5727 mutex_exit(&un->un_pm_mutex); 5728 mutex_exit(SD_MUTEX(un)); 5729 } else { 5730 un->un_power_level = SD_SPINDLE_ON; 5731 un->un_f_pm_is_enabled = FALSE; 5732 } 5733 } 5734 5735 5736 /* 5737 * Function: sd_ddi_suspend 5738 * 5739 * Description: Performs system power-down operations. This includes 5740 * setting the drive state to indicate its suspended so 5741 * that no new commands will be accepted. Also, wait for 5742 * all commands that are in transport or queued to a timer 5743 * for retry to complete. All timeout threads are cancelled. 5744 * 5745 * Return Code: DDI_FAILURE or DDI_SUCCESS 5746 * 5747 * Context: Kernel thread context 5748 */ 5749 5750 static int 5751 sd_ddi_suspend(dev_info_t *devi) 5752 { 5753 struct sd_lun *un; 5754 clock_t wait_cmds_complete; 5755 5756 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 5757 if (un == NULL) { 5758 return (DDI_FAILURE); 5759 } 5760 5761 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n"); 5762 5763 mutex_enter(SD_MUTEX(un)); 5764 5765 /* Return success if the device is already suspended. */ 5766 if (un->un_state == SD_STATE_SUSPENDED) { 5767 mutex_exit(SD_MUTEX(un)); 5768 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5769 "device already suspended, exiting\n"); 5770 return (DDI_SUCCESS); 5771 } 5772 5773 /* Return failure if the device is being used by HA */ 5774 if (un->un_resvd_status & 5775 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) { 5776 mutex_exit(SD_MUTEX(un)); 5777 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5778 "device in use by HA, exiting\n"); 5779 return (DDI_FAILURE); 5780 } 5781 5782 /* 5783 * Return failure if the device is in a resource wait 5784 * or power changing state. 5785 */ 5786 if ((un->un_state == SD_STATE_RWAIT) || 5787 (un->un_state == SD_STATE_PM_CHANGING)) { 5788 mutex_exit(SD_MUTEX(un)); 5789 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 5790 "device in resource wait state, exiting\n"); 5791 return (DDI_FAILURE); 5792 } 5793 5794 5795 un->un_save_state = un->un_last_state; 5796 New_state(un, SD_STATE_SUSPENDED); 5797 5798 /* 5799 * Wait for all commands that are in transport or queued to a timer 5800 * for retry to complete. 5801 * 5802 * While waiting, no new commands will be accepted or sent because of 5803 * the new state we set above. 5804 * 5805 * Wait till current operation has completed. If we are in the resource 5806 * wait state (with an intr outstanding) then we need to wait till the 5807 * intr completes and starts the next cmd. We want to wait for 5808 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND. 5809 */ 5810 wait_cmds_complete = ddi_get_lbolt() + 5811 (sd_wait_cmds_complete * drv_usectohz(1000000)); 5812 5813 while (un->un_ncmds_in_transport != 0) { 5814 /* 5815 * Fail if commands do not finish in the specified time. 5816 */ 5817 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un), 5818 wait_cmds_complete) == -1) { 5819 /* 5820 * Undo the state changes made above. Everything 5821 * must go back to it's original value. 5822 */ 5823 Restore_state(un); 5824 un->un_last_state = un->un_save_state; 5825 /* Wake up any threads that might be waiting. */ 5826 cv_broadcast(&un->un_suspend_cv); 5827 mutex_exit(SD_MUTEX(un)); 5828 SD_ERROR(SD_LOG_IO_PM, un, 5829 "sd_ddi_suspend: failed due to outstanding cmds\n"); 5830 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n"); 5831 return (DDI_FAILURE); 5832 } 5833 } 5834 5835 /* 5836 * Cancel SCSI watch thread and timeouts, if any are active 5837 */ 5838 5839 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) { 5840 opaque_t temp_token = un->un_swr_token; 5841 mutex_exit(SD_MUTEX(un)); 5842 scsi_watch_suspend(temp_token); 5843 mutex_enter(SD_MUTEX(un)); 5844 } 5845 5846 if (un->un_reset_throttle_timeid != NULL) { 5847 timeout_id_t temp_id = un->un_reset_throttle_timeid; 5848 un->un_reset_throttle_timeid = NULL; 5849 mutex_exit(SD_MUTEX(un)); 5850 (void) untimeout(temp_id); 5851 mutex_enter(SD_MUTEX(un)); 5852 } 5853 5854 if (un->un_dcvb_timeid != NULL) { 5855 timeout_id_t temp_id = un->un_dcvb_timeid; 5856 un->un_dcvb_timeid = NULL; 5857 mutex_exit(SD_MUTEX(un)); 5858 (void) untimeout(temp_id); 5859 mutex_enter(SD_MUTEX(un)); 5860 } 5861 5862 mutex_enter(&un->un_pm_mutex); 5863 if (un->un_pm_timeid != NULL) { 5864 timeout_id_t temp_id = un->un_pm_timeid; 5865 un->un_pm_timeid = NULL; 5866 mutex_exit(&un->un_pm_mutex); 5867 mutex_exit(SD_MUTEX(un)); 5868 (void) untimeout(temp_id); 5869 mutex_enter(SD_MUTEX(un)); 5870 } else { 5871 mutex_exit(&un->un_pm_mutex); 5872 } 5873 5874 if (un->un_retry_timeid != NULL) { 5875 timeout_id_t temp_id = un->un_retry_timeid; 5876 un->un_retry_timeid = NULL; 5877 mutex_exit(SD_MUTEX(un)); 5878 (void) untimeout(temp_id); 5879 mutex_enter(SD_MUTEX(un)); 5880 5881 if (un->un_retry_bp != NULL) { 5882 un->un_retry_bp->av_forw = un->un_waitq_headp; 5883 un->un_waitq_headp = un->un_retry_bp; 5884 if (un->un_waitq_tailp == NULL) { 5885 un->un_waitq_tailp = un->un_retry_bp; 5886 } 5887 un->un_retry_bp = NULL; 5888 un->un_retry_statp = NULL; 5889 } 5890 } 5891 5892 if (un->un_direct_priority_timeid != NULL) { 5893 timeout_id_t temp_id = un->un_direct_priority_timeid; 5894 un->un_direct_priority_timeid = NULL; 5895 mutex_exit(SD_MUTEX(un)); 5896 (void) untimeout(temp_id); 5897 mutex_enter(SD_MUTEX(un)); 5898 } 5899 5900 if (un->un_f_is_fibre == TRUE) { 5901 /* 5902 * Remove callbacks for insert and remove events 5903 */ 5904 if (un->un_insert_event != NULL) { 5905 mutex_exit(SD_MUTEX(un)); 5906 (void) ddi_remove_event_handler(un->un_insert_cb_id); 5907 mutex_enter(SD_MUTEX(un)); 5908 un->un_insert_event = NULL; 5909 } 5910 5911 if (un->un_remove_event != NULL) { 5912 mutex_exit(SD_MUTEX(un)); 5913 (void) ddi_remove_event_handler(un->un_remove_cb_id); 5914 mutex_enter(SD_MUTEX(un)); 5915 un->un_remove_event = NULL; 5916 } 5917 } 5918 5919 mutex_exit(SD_MUTEX(un)); 5920 5921 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n"); 5922 5923 return (DDI_SUCCESS); 5924 } 5925 5926 5927 /* 5928 * Function: sd_ddi_pm_suspend 5929 * 5930 * Description: Set the drive state to low power. 5931 * Someone else is required to actually change the drive 5932 * power level. 5933 * 5934 * Arguments: un - driver soft state (unit) structure 5935 * 5936 * Return Code: DDI_FAILURE or DDI_SUCCESS 5937 * 5938 * Context: Kernel thread context 5939 */ 5940 5941 static int 5942 sd_ddi_pm_suspend(struct sd_lun *un) 5943 { 5944 ASSERT(un != NULL); 5945 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: entry\n"); 5946 5947 ASSERT(!mutex_owned(SD_MUTEX(un))); 5948 mutex_enter(SD_MUTEX(un)); 5949 5950 /* 5951 * Exit if power management is not enabled for this device, or if 5952 * the device is being used by HA. 5953 */ 5954 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status & 5955 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) { 5956 mutex_exit(SD_MUTEX(un)); 5957 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exiting\n"); 5958 return (DDI_SUCCESS); 5959 } 5960 5961 SD_INFO(SD_LOG_POWER, un, "sd_ddi_pm_suspend: un_ncmds_in_driver=%ld\n", 5962 un->un_ncmds_in_driver); 5963 5964 /* 5965 * See if the device is not busy, ie.: 5966 * - we have no commands in the driver for this device 5967 * - not waiting for resources 5968 */ 5969 if ((un->un_ncmds_in_driver == 0) && 5970 (un->un_state != SD_STATE_RWAIT)) { 5971 /* 5972 * The device is not busy, so it is OK to go to low power state. 5973 * Indicate low power, but rely on someone else to actually 5974 * change it. 5975 */ 5976 mutex_enter(&un->un_pm_mutex); 5977 un->un_pm_count = -1; 5978 mutex_exit(&un->un_pm_mutex); 5979 un->un_power_level = SD_SPINDLE_OFF; 5980 } 5981 5982 mutex_exit(SD_MUTEX(un)); 5983 5984 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exit\n"); 5985 5986 return (DDI_SUCCESS); 5987 } 5988 5989 5990 /* 5991 * Function: sd_ddi_resume 5992 * 5993 * Description: Performs system power-up operations.. 5994 * 5995 * Return Code: DDI_SUCCESS 5996 * DDI_FAILURE 5997 * 5998 * Context: Kernel thread context 5999 */ 6000 6001 static int 6002 sd_ddi_resume(dev_info_t *devi) 6003 { 6004 struct sd_lun *un; 6005 6006 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 6007 if (un == NULL) { 6008 return (DDI_FAILURE); 6009 } 6010 6011 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n"); 6012 6013 mutex_enter(SD_MUTEX(un)); 6014 Restore_state(un); 6015 6016 /* 6017 * Restore the state which was saved to give the 6018 * the right state in un_last_state 6019 */ 6020 un->un_last_state = un->un_save_state; 6021 /* 6022 * Note: throttle comes back at full. 6023 * Also note: this MUST be done before calling pm_raise_power 6024 * otherwise the system can get hung in biowait. The scenario where 6025 * this'll happen is under cpr suspend. Writing of the system 6026 * state goes through sddump, which writes 0 to un_throttle. If 6027 * writing the system state then fails, example if the partition is 6028 * too small, then cpr attempts a resume. If throttle isn't restored 6029 * from the saved value until after calling pm_raise_power then 6030 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs 6031 * in biowait. 6032 */ 6033 un->un_throttle = un->un_saved_throttle; 6034 6035 /* 6036 * The chance of failure is very rare as the only command done in power 6037 * entry point is START command when you transition from 0->1 or 6038 * unknown->1. Put it to SPINDLE ON state irrespective of the state at 6039 * which suspend was done. Ignore the return value as the resume should 6040 * not be failed. In the case of removable media the media need not be 6041 * inserted and hence there is a chance that raise power will fail with 6042 * media not present. 6043 */ 6044 if (un->un_f_attach_spinup) { 6045 mutex_exit(SD_MUTEX(un)); 6046 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 6047 mutex_enter(SD_MUTEX(un)); 6048 } 6049 6050 /* 6051 * Don't broadcast to the suspend cv and therefore possibly 6052 * start I/O until after power has been restored. 6053 */ 6054 cv_broadcast(&un->un_suspend_cv); 6055 cv_broadcast(&un->un_state_cv); 6056 6057 /* restart thread */ 6058 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) { 6059 scsi_watch_resume(un->un_swr_token); 6060 } 6061 6062 #if (defined(__fibre)) 6063 if (un->un_f_is_fibre == TRUE) { 6064 /* 6065 * Add callbacks for insert and remove events 6066 */ 6067 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 6068 sd_init_event_callbacks(un); 6069 } 6070 } 6071 #endif 6072 6073 /* 6074 * Transport any pending commands to the target. 6075 * 6076 * If this is a low-activity device commands in queue will have to wait 6077 * until new commands come in, which may take awhile. Also, we 6078 * specifically don't check un_ncmds_in_transport because we know that 6079 * there really are no commands in progress after the unit was 6080 * suspended and we could have reached the throttle level, been 6081 * suspended, and have no new commands coming in for awhile. Highly 6082 * unlikely, but so is the low-activity disk scenario. 6083 */ 6084 ddi_xbuf_dispatch(un->un_xbuf_attr); 6085 6086 sd_start_cmds(un, NULL); 6087 mutex_exit(SD_MUTEX(un)); 6088 6089 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n"); 6090 6091 return (DDI_SUCCESS); 6092 } 6093 6094 6095 /* 6096 * Function: sd_ddi_pm_resume 6097 * 6098 * Description: Set the drive state to powered on. 6099 * Someone else is required to actually change the drive 6100 * power level. 6101 * 6102 * Arguments: un - driver soft state (unit) structure 6103 * 6104 * Return Code: DDI_SUCCESS 6105 * 6106 * Context: Kernel thread context 6107 */ 6108 6109 static int 6110 sd_ddi_pm_resume(struct sd_lun *un) 6111 { 6112 ASSERT(un != NULL); 6113 6114 ASSERT(!mutex_owned(SD_MUTEX(un))); 6115 mutex_enter(SD_MUTEX(un)); 6116 un->un_power_level = SD_SPINDLE_ON; 6117 6118 ASSERT(!mutex_owned(&un->un_pm_mutex)); 6119 mutex_enter(&un->un_pm_mutex); 6120 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 6121 un->un_pm_count++; 6122 ASSERT(un->un_pm_count == 0); 6123 /* 6124 * Note: no longer do the cv_broadcast on un_suspend_cv. The 6125 * un_suspend_cv is for a system resume, not a power management 6126 * device resume. (4297749) 6127 * cv_broadcast(&un->un_suspend_cv); 6128 */ 6129 } 6130 mutex_exit(&un->un_pm_mutex); 6131 mutex_exit(SD_MUTEX(un)); 6132 6133 return (DDI_SUCCESS); 6134 } 6135 6136 6137 /* 6138 * Function: sd_pm_idletimeout_handler 6139 * 6140 * Description: A timer routine that's active only while a device is busy. 6141 * The purpose is to extend slightly the pm framework's busy 6142 * view of the device to prevent busy/idle thrashing for 6143 * back-to-back commands. Do this by comparing the current time 6144 * to the time at which the last command completed and when the 6145 * difference is greater than sd_pm_idletime, call 6146 * pm_idle_component. In addition to indicating idle to the pm 6147 * framework, update the chain type to again use the internal pm 6148 * layers of the driver. 6149 * 6150 * Arguments: arg - driver soft state (unit) structure 6151 * 6152 * Context: Executes in a timeout(9F) thread context 6153 */ 6154 6155 static void 6156 sd_pm_idletimeout_handler(void *arg) 6157 { 6158 struct sd_lun *un = arg; 6159 6160 time_t now; 6161 6162 mutex_enter(&sd_detach_mutex); 6163 if (un->un_detach_count != 0) { 6164 /* Abort if the instance is detaching */ 6165 mutex_exit(&sd_detach_mutex); 6166 return; 6167 } 6168 mutex_exit(&sd_detach_mutex); 6169 6170 now = ddi_get_time(); 6171 /* 6172 * Grab both mutexes, in the proper order, since we're accessing 6173 * both PM and softstate variables. 6174 */ 6175 mutex_enter(SD_MUTEX(un)); 6176 mutex_enter(&un->un_pm_mutex); 6177 if (((now - un->un_pm_idle_time) > sd_pm_idletime) && 6178 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) { 6179 /* 6180 * Update the chain types. 6181 * This takes affect on the next new command received. 6182 */ 6183 if (un->un_f_non_devbsize_supported) { 6184 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 6185 } else { 6186 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 6187 } 6188 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 6189 6190 SD_TRACE(SD_LOG_IO_PM, un, 6191 "sd_pm_idletimeout_handler: idling device\n"); 6192 (void) pm_idle_component(SD_DEVINFO(un), 0); 6193 un->un_pm_idle_timeid = NULL; 6194 } else { 6195 un->un_pm_idle_timeid = 6196 timeout(sd_pm_idletimeout_handler, un, 6197 (drv_usectohz((clock_t)300000))); /* 300 ms. */ 6198 } 6199 mutex_exit(&un->un_pm_mutex); 6200 mutex_exit(SD_MUTEX(un)); 6201 } 6202 6203 6204 /* 6205 * Function: sd_pm_timeout_handler 6206 * 6207 * Description: Callback to tell framework we are idle. 6208 * 6209 * Context: timeout(9f) thread context. 6210 */ 6211 6212 static void 6213 sd_pm_timeout_handler(void *arg) 6214 { 6215 struct sd_lun *un = arg; 6216 6217 (void) pm_idle_component(SD_DEVINFO(un), 0); 6218 mutex_enter(&un->un_pm_mutex); 6219 un->un_pm_timeid = NULL; 6220 mutex_exit(&un->un_pm_mutex); 6221 } 6222 6223 6224 /* 6225 * Function: sdpower 6226 * 6227 * Description: PM entry point. 6228 * 6229 * Return Code: DDI_SUCCESS 6230 * DDI_FAILURE 6231 * 6232 * Context: Kernel thread context 6233 */ 6234 6235 static int 6236 sdpower(dev_info_t *devi, int component, int level) 6237 { 6238 struct sd_lun *un; 6239 int instance; 6240 int rval = DDI_SUCCESS; 6241 uint_t i, log_page_size, maxcycles, ncycles; 6242 uchar_t *log_page_data; 6243 int log_sense_page; 6244 int medium_present; 6245 time_t intvlp; 6246 dev_t dev; 6247 struct pm_trans_data sd_pm_tran_data; 6248 uchar_t save_state; 6249 int sval; 6250 uchar_t state_before_pm; 6251 int got_semaphore_here; 6252 sd_ssc_t *ssc; 6253 6254 instance = ddi_get_instance(devi); 6255 6256 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 6257 (SD_SPINDLE_OFF > level) || (level > SD_SPINDLE_ON) || 6258 component != 0) { 6259 return (DDI_FAILURE); 6260 } 6261 6262 dev = sd_make_device(SD_DEVINFO(un)); 6263 ssc = sd_ssc_init(un); 6264 6265 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level); 6266 6267 /* 6268 * Must synchronize power down with close. 6269 * Attempt to decrement/acquire the open/close semaphore, 6270 * but do NOT wait on it. If it's not greater than zero, 6271 * ie. it can't be decremented without waiting, then 6272 * someone else, either open or close, already has it 6273 * and the try returns 0. Use that knowledge here to determine 6274 * if it's OK to change the device power level. 6275 * Also, only increment it on exit if it was decremented, ie. gotten, 6276 * here. 6277 */ 6278 got_semaphore_here = sema_tryp(&un->un_semoclose); 6279 6280 mutex_enter(SD_MUTEX(un)); 6281 6282 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n", 6283 un->un_ncmds_in_driver); 6284 6285 /* 6286 * If un_ncmds_in_driver is non-zero it indicates commands are 6287 * already being processed in the driver, or if the semaphore was 6288 * not gotten here it indicates an open or close is being processed. 6289 * At the same time somebody is requesting to go low power which 6290 * can't happen, therefore we need to return failure. 6291 */ 6292 if ((level == SD_SPINDLE_OFF) && 6293 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) { 6294 mutex_exit(SD_MUTEX(un)); 6295 6296 if (got_semaphore_here != 0) { 6297 sema_v(&un->un_semoclose); 6298 } 6299 SD_TRACE(SD_LOG_IO_PM, un, 6300 "sdpower: exit, device has queued cmds.\n"); 6301 6302 goto sdpower_failed; 6303 } 6304 6305 /* 6306 * if it is OFFLINE that means the disk is completely dead 6307 * in our case we have to put the disk in on or off by sending commands 6308 * Of course that will fail anyway so return back here. 6309 * 6310 * Power changes to a device that's OFFLINE or SUSPENDED 6311 * are not allowed. 6312 */ 6313 if ((un->un_state == SD_STATE_OFFLINE) || 6314 (un->un_state == SD_STATE_SUSPENDED)) { 6315 mutex_exit(SD_MUTEX(un)); 6316 6317 if (got_semaphore_here != 0) { 6318 sema_v(&un->un_semoclose); 6319 } 6320 SD_TRACE(SD_LOG_IO_PM, un, 6321 "sdpower: exit, device is off-line.\n"); 6322 6323 goto sdpower_failed; 6324 } 6325 6326 /* 6327 * Change the device's state to indicate it's power level 6328 * is being changed. Do this to prevent a power off in the 6329 * middle of commands, which is especially bad on devices 6330 * that are really powered off instead of just spun down. 6331 */ 6332 state_before_pm = un->un_state; 6333 un->un_state = SD_STATE_PM_CHANGING; 6334 6335 mutex_exit(SD_MUTEX(un)); 6336 6337 /* 6338 * If "pm-capable" property is set to TRUE by HBA drivers, 6339 * bypass the following checking, otherwise, check the log 6340 * sense information for this device 6341 */ 6342 if ((level == SD_SPINDLE_OFF) && un->un_f_log_sense_supported) { 6343 /* 6344 * Get the log sense information to understand whether the 6345 * the powercycle counts have gone beyond the threshhold. 6346 */ 6347 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 6348 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 6349 6350 mutex_enter(SD_MUTEX(un)); 6351 log_sense_page = un->un_start_stop_cycle_page; 6352 mutex_exit(SD_MUTEX(un)); 6353 6354 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 6355 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT); 6356 6357 if (rval != 0) { 6358 if (rval == EIO) 6359 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 6360 else 6361 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6362 } 6363 6364 #ifdef SDDEBUG 6365 if (sd_force_pm_supported) { 6366 /* Force a successful result */ 6367 rval = 0; 6368 } 6369 #endif 6370 if (rval != 0) { 6371 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 6372 "Log Sense Failed\n"); 6373 6374 kmem_free(log_page_data, log_page_size); 6375 /* Cannot support power management on those drives */ 6376 6377 if (got_semaphore_here != 0) { 6378 sema_v(&un->un_semoclose); 6379 } 6380 /* 6381 * On exit put the state back to it's original value 6382 * and broadcast to anyone waiting for the power 6383 * change completion. 6384 */ 6385 mutex_enter(SD_MUTEX(un)); 6386 un->un_state = state_before_pm; 6387 cv_broadcast(&un->un_suspend_cv); 6388 mutex_exit(SD_MUTEX(un)); 6389 SD_TRACE(SD_LOG_IO_PM, un, 6390 "sdpower: exit, Log Sense Failed.\n"); 6391 6392 goto sdpower_failed; 6393 } 6394 6395 /* 6396 * From the page data - Convert the essential information to 6397 * pm_trans_data 6398 */ 6399 maxcycles = 6400 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) | 6401 (log_page_data[0x1E] << 8) | log_page_data[0x1F]; 6402 6403 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles; 6404 6405 ncycles = 6406 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) | 6407 (log_page_data[0x26] << 8) | log_page_data[0x27]; 6408 6409 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles; 6410 6411 for (i = 0; i < DC_SCSI_MFR_LEN; i++) { 6412 sd_pm_tran_data.un.scsi_cycles.svc_date[i] = 6413 log_page_data[8+i]; 6414 } 6415 6416 kmem_free(log_page_data, log_page_size); 6417 6418 /* 6419 * Call pm_trans_check routine to get the Ok from 6420 * the global policy 6421 */ 6422 6423 sd_pm_tran_data.format = DC_SCSI_FORMAT; 6424 sd_pm_tran_data.un.scsi_cycles.flag = 0; 6425 6426 rval = pm_trans_check(&sd_pm_tran_data, &intvlp); 6427 #ifdef SDDEBUG 6428 if (sd_force_pm_supported) { 6429 /* Force a successful result */ 6430 rval = 1; 6431 } 6432 #endif 6433 switch (rval) { 6434 case 0: 6435 /* 6436 * Not Ok to Power cycle or error in parameters passed 6437 * Would have given the advised time to consider power 6438 * cycle. Based on the new intvlp parameter we are 6439 * supposed to pretend we are busy so that pm framework 6440 * will never call our power entry point. Because of 6441 * that install a timeout handler and wait for the 6442 * recommended time to elapse so that power management 6443 * can be effective again. 6444 * 6445 * To effect this behavior, call pm_busy_component to 6446 * indicate to the framework this device is busy. 6447 * By not adjusting un_pm_count the rest of PM in 6448 * the driver will function normally, and independent 6449 * of this but because the framework is told the device 6450 * is busy it won't attempt powering down until it gets 6451 * a matching idle. The timeout handler sends this. 6452 * Note: sd_pm_entry can't be called here to do this 6453 * because sdpower may have been called as a result 6454 * of a call to pm_raise_power from within sd_pm_entry. 6455 * 6456 * If a timeout handler is already active then 6457 * don't install another. 6458 */ 6459 mutex_enter(&un->un_pm_mutex); 6460 if (un->un_pm_timeid == NULL) { 6461 un->un_pm_timeid = 6462 timeout(sd_pm_timeout_handler, 6463 un, intvlp * drv_usectohz(1000000)); 6464 mutex_exit(&un->un_pm_mutex); 6465 (void) pm_busy_component(SD_DEVINFO(un), 0); 6466 } else { 6467 mutex_exit(&un->un_pm_mutex); 6468 } 6469 if (got_semaphore_here != 0) { 6470 sema_v(&un->un_semoclose); 6471 } 6472 /* 6473 * On exit put the state back to it's original value 6474 * and broadcast to anyone waiting for the power 6475 * change completion. 6476 */ 6477 mutex_enter(SD_MUTEX(un)); 6478 un->un_state = state_before_pm; 6479 cv_broadcast(&un->un_suspend_cv); 6480 mutex_exit(SD_MUTEX(un)); 6481 6482 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, " 6483 "trans check Failed, not ok to power cycle.\n"); 6484 6485 goto sdpower_failed; 6486 case -1: 6487 if (got_semaphore_here != 0) { 6488 sema_v(&un->un_semoclose); 6489 } 6490 /* 6491 * On exit put the state back to it's original value 6492 * and broadcast to anyone waiting for the power 6493 * change completion. 6494 */ 6495 mutex_enter(SD_MUTEX(un)); 6496 un->un_state = state_before_pm; 6497 cv_broadcast(&un->un_suspend_cv); 6498 mutex_exit(SD_MUTEX(un)); 6499 SD_TRACE(SD_LOG_IO_PM, un, 6500 "sdpower: exit, trans check command Failed.\n"); 6501 6502 goto sdpower_failed; 6503 } 6504 } 6505 6506 if (level == SD_SPINDLE_OFF) { 6507 /* 6508 * Save the last state... if the STOP FAILS we need it 6509 * for restoring 6510 */ 6511 mutex_enter(SD_MUTEX(un)); 6512 save_state = un->un_last_state; 6513 /* 6514 * There must not be any cmds. getting processed 6515 * in the driver when we get here. Power to the 6516 * device is potentially going off. 6517 */ 6518 ASSERT(un->un_ncmds_in_driver == 0); 6519 mutex_exit(SD_MUTEX(un)); 6520 6521 /* 6522 * For now suspend the device completely before spindle is 6523 * turned off 6524 */ 6525 if ((rval = sd_ddi_pm_suspend(un)) == DDI_FAILURE) { 6526 if (got_semaphore_here != 0) { 6527 sema_v(&un->un_semoclose); 6528 } 6529 /* 6530 * On exit put the state back to it's original value 6531 * and broadcast to anyone waiting for the power 6532 * change completion. 6533 */ 6534 mutex_enter(SD_MUTEX(un)); 6535 un->un_state = state_before_pm; 6536 cv_broadcast(&un->un_suspend_cv); 6537 mutex_exit(SD_MUTEX(un)); 6538 SD_TRACE(SD_LOG_IO_PM, un, 6539 "sdpower: exit, PM suspend Failed.\n"); 6540 6541 goto sdpower_failed; 6542 } 6543 } 6544 6545 /* 6546 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open, 6547 * close, or strategy. Dump no long uses this routine, it uses it's 6548 * own code so it can be done in polled mode. 6549 */ 6550 6551 medium_present = TRUE; 6552 6553 /* 6554 * When powering up, issue a TUR in case the device is at unit 6555 * attention. Don't do retries. Bypass the PM layer, otherwise 6556 * a deadlock on un_pm_busy_cv will occur. 6557 */ 6558 if (level == SD_SPINDLE_ON) { 6559 sval = sd_send_scsi_TEST_UNIT_READY(ssc, 6560 SD_DONT_RETRY_TUR | SD_BYPASS_PM); 6561 if (sval != 0) 6562 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6563 } 6564 6565 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n", 6566 ((level == SD_SPINDLE_ON) ? "START" : "STOP")); 6567 6568 sval = sd_send_scsi_START_STOP_UNIT(ssc, 6569 ((level == SD_SPINDLE_ON) ? SD_TARGET_START : SD_TARGET_STOP), 6570 SD_PATH_DIRECT); 6571 if (sval != 0) { 6572 if (sval == EIO) 6573 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 6574 else 6575 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 6576 } 6577 6578 /* Command failed, check for media present. */ 6579 if ((sval == ENXIO) && un->un_f_has_removable_media) { 6580 medium_present = FALSE; 6581 } 6582 6583 /* 6584 * The conditions of interest here are: 6585 * if a spindle off with media present fails, 6586 * then restore the state and return an error. 6587 * else if a spindle on fails, 6588 * then return an error (there's no state to restore). 6589 * In all other cases we setup for the new state 6590 * and return success. 6591 */ 6592 switch (level) { 6593 case SD_SPINDLE_OFF: 6594 if ((medium_present == TRUE) && (sval != 0)) { 6595 /* The stop command from above failed */ 6596 rval = DDI_FAILURE; 6597 /* 6598 * The stop command failed, and we have media 6599 * present. Put the level back by calling the 6600 * sd_pm_resume() and set the state back to 6601 * it's previous value. 6602 */ 6603 (void) sd_ddi_pm_resume(un); 6604 mutex_enter(SD_MUTEX(un)); 6605 un->un_last_state = save_state; 6606 mutex_exit(SD_MUTEX(un)); 6607 break; 6608 } 6609 /* 6610 * The stop command from above succeeded. 6611 */ 6612 if (un->un_f_monitor_media_state) { 6613 /* 6614 * Terminate watch thread in case of removable media 6615 * devices going into low power state. This is as per 6616 * the requirements of pm framework, otherwise commands 6617 * will be generated for the device (through watch 6618 * thread), even when the device is in low power state. 6619 */ 6620 mutex_enter(SD_MUTEX(un)); 6621 un->un_f_watcht_stopped = FALSE; 6622 if (un->un_swr_token != NULL) { 6623 opaque_t temp_token = un->un_swr_token; 6624 un->un_f_watcht_stopped = TRUE; 6625 un->un_swr_token = NULL; 6626 mutex_exit(SD_MUTEX(un)); 6627 (void) scsi_watch_request_terminate(temp_token, 6628 SCSI_WATCH_TERMINATE_ALL_WAIT); 6629 } else { 6630 mutex_exit(SD_MUTEX(un)); 6631 } 6632 } 6633 break; 6634 6635 default: /* The level requested is spindle on... */ 6636 /* 6637 * Legacy behavior: return success on a failed spinup 6638 * if there is no media in the drive. 6639 * Do this by looking at medium_present here. 6640 */ 6641 if ((sval != 0) && medium_present) { 6642 /* The start command from above failed */ 6643 rval = DDI_FAILURE; 6644 break; 6645 } 6646 /* 6647 * The start command from above succeeded 6648 * Resume the devices now that we have 6649 * started the disks 6650 */ 6651 (void) sd_ddi_pm_resume(un); 6652 6653 /* 6654 * Resume the watch thread since it was suspended 6655 * when the device went into low power mode. 6656 */ 6657 if (un->un_f_monitor_media_state) { 6658 mutex_enter(SD_MUTEX(un)); 6659 if (un->un_f_watcht_stopped == TRUE) { 6660 opaque_t temp_token; 6661 6662 un->un_f_watcht_stopped = FALSE; 6663 mutex_exit(SD_MUTEX(un)); 6664 temp_token = scsi_watch_request_submit( 6665 SD_SCSI_DEVP(un), 6666 sd_check_media_time, 6667 SENSE_LENGTH, sd_media_watch_cb, 6668 (caddr_t)dev); 6669 mutex_enter(SD_MUTEX(un)); 6670 un->un_swr_token = temp_token; 6671 } 6672 mutex_exit(SD_MUTEX(un)); 6673 } 6674 } 6675 if (got_semaphore_here != 0) { 6676 sema_v(&un->un_semoclose); 6677 } 6678 /* 6679 * On exit put the state back to it's original value 6680 * and broadcast to anyone waiting for the power 6681 * change completion. 6682 */ 6683 mutex_enter(SD_MUTEX(un)); 6684 un->un_state = state_before_pm; 6685 cv_broadcast(&un->un_suspend_cv); 6686 mutex_exit(SD_MUTEX(un)); 6687 6688 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval); 6689 6690 sd_ssc_fini(ssc); 6691 return (rval); 6692 6693 sdpower_failed: 6694 6695 sd_ssc_fini(ssc); 6696 return (DDI_FAILURE); 6697 } 6698 6699 6700 6701 /* 6702 * Function: sdattach 6703 * 6704 * Description: Driver's attach(9e) entry point function. 6705 * 6706 * Arguments: devi - opaque device info handle 6707 * cmd - attach type 6708 * 6709 * Return Code: DDI_SUCCESS 6710 * DDI_FAILURE 6711 * 6712 * Context: Kernel thread context 6713 */ 6714 6715 static int 6716 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 6717 { 6718 switch (cmd) { 6719 case DDI_ATTACH: 6720 return (sd_unit_attach(devi)); 6721 case DDI_RESUME: 6722 return (sd_ddi_resume(devi)); 6723 default: 6724 break; 6725 } 6726 return (DDI_FAILURE); 6727 } 6728 6729 6730 /* 6731 * Function: sddetach 6732 * 6733 * Description: Driver's detach(9E) entry point function. 6734 * 6735 * Arguments: devi - opaque device info handle 6736 * cmd - detach type 6737 * 6738 * Return Code: DDI_SUCCESS 6739 * DDI_FAILURE 6740 * 6741 * Context: Kernel thread context 6742 */ 6743 6744 static int 6745 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 6746 { 6747 switch (cmd) { 6748 case DDI_DETACH: 6749 return (sd_unit_detach(devi)); 6750 case DDI_SUSPEND: 6751 return (sd_ddi_suspend(devi)); 6752 default: 6753 break; 6754 } 6755 return (DDI_FAILURE); 6756 } 6757 6758 6759 /* 6760 * Function: sd_sync_with_callback 6761 * 6762 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft 6763 * state while the callback routine is active. 6764 * 6765 * Arguments: un: softstate structure for the instance 6766 * 6767 * Context: Kernel thread context 6768 */ 6769 6770 static void 6771 sd_sync_with_callback(struct sd_lun *un) 6772 { 6773 ASSERT(un != NULL); 6774 6775 mutex_enter(SD_MUTEX(un)); 6776 6777 ASSERT(un->un_in_callback >= 0); 6778 6779 while (un->un_in_callback > 0) { 6780 mutex_exit(SD_MUTEX(un)); 6781 delay(2); 6782 mutex_enter(SD_MUTEX(un)); 6783 } 6784 6785 mutex_exit(SD_MUTEX(un)); 6786 } 6787 6788 /* 6789 * Function: sd_unit_attach 6790 * 6791 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates 6792 * the soft state structure for the device and performs 6793 * all necessary structure and device initializations. 6794 * 6795 * Arguments: devi: the system's dev_info_t for the device. 6796 * 6797 * Return Code: DDI_SUCCESS if attach is successful. 6798 * DDI_FAILURE if any part of the attach fails. 6799 * 6800 * Context: Called at attach(9e) time for the DDI_ATTACH flag. 6801 * Kernel thread context only. Can sleep. 6802 */ 6803 6804 static int 6805 sd_unit_attach(dev_info_t *devi) 6806 { 6807 struct scsi_device *devp; 6808 struct sd_lun *un; 6809 char *variantp; 6810 int reservation_flag = SD_TARGET_IS_UNRESERVED; 6811 int instance; 6812 int rval; 6813 int wc_enabled; 6814 int tgt; 6815 uint64_t capacity; 6816 uint_t lbasize = 0; 6817 dev_info_t *pdip = ddi_get_parent(devi); 6818 int offbyone = 0; 6819 int geom_label_valid = 0; 6820 sd_ssc_t *ssc; 6821 int status; 6822 struct sd_fm_internal *sfip = NULL; 6823 int max_xfer_size; 6824 6825 /* 6826 * Retrieve the target driver's private data area. This was set 6827 * up by the HBA. 6828 */ 6829 devp = ddi_get_driver_private(devi); 6830 6831 /* 6832 * Retrieve the target ID of the device. 6833 */ 6834 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6835 SCSI_ADDR_PROP_TARGET, -1); 6836 6837 /* 6838 * Since we have no idea what state things were left in by the last 6839 * user of the device, set up some 'default' settings, ie. turn 'em 6840 * off. The scsi_ifsetcap calls force re-negotiations with the drive. 6841 * Do this before the scsi_probe, which sends an inquiry. 6842 * This is a fix for bug (4430280). 6843 * Of special importance is wide-xfer. The drive could have been left 6844 * in wide transfer mode by the last driver to communicate with it, 6845 * this includes us. If that's the case, and if the following is not 6846 * setup properly or we don't re-negotiate with the drive prior to 6847 * transferring data to/from the drive, it causes bus parity errors, 6848 * data overruns, and unexpected interrupts. This first occurred when 6849 * the fix for bug (4378686) was made. 6850 */ 6851 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1); 6852 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1); 6853 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1); 6854 6855 /* 6856 * Currently, scsi_ifsetcap sets tagged-qing capability for all LUNs 6857 * on a target. Setting it per lun instance actually sets the 6858 * capability of this target, which affects those luns already 6859 * attached on the same target. So during attach, we can only disable 6860 * this capability only when no other lun has been attached on this 6861 * target. By doing this, we assume a target has the same tagged-qing 6862 * capability for every lun. The condition can be removed when HBA 6863 * is changed to support per lun based tagged-qing capability. 6864 */ 6865 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 6866 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1); 6867 } 6868 6869 /* 6870 * Use scsi_probe() to issue an INQUIRY command to the device. 6871 * This call will allocate and fill in the scsi_inquiry structure 6872 * and point the sd_inq member of the scsi_device structure to it. 6873 * If the attach succeeds, then this memory will not be de-allocated 6874 * (via scsi_unprobe()) until the instance is detached. 6875 */ 6876 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 6877 goto probe_failed; 6878 } 6879 6880 /* 6881 * Check the device type as specified in the inquiry data and 6882 * claim it if it is of a type that we support. 6883 */ 6884 switch (devp->sd_inq->inq_dtype) { 6885 case DTYPE_DIRECT: 6886 break; 6887 case DTYPE_RODIRECT: 6888 break; 6889 case DTYPE_OPTICAL: 6890 break; 6891 case DTYPE_NOTPRESENT: 6892 default: 6893 /* Unsupported device type; fail the attach. */ 6894 goto probe_failed; 6895 } 6896 6897 /* 6898 * Allocate the soft state structure for this unit. 6899 * 6900 * We rely upon this memory being set to all zeroes by 6901 * ddi_soft_state_zalloc(). We assume that any member of the 6902 * soft state structure that is not explicitly initialized by 6903 * this routine will have a value of zero. 6904 */ 6905 instance = ddi_get_instance(devp->sd_dev); 6906 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) { 6907 goto probe_failed; 6908 } 6909 6910 /* 6911 * Retrieve a pointer to the newly-allocated soft state. 6912 * 6913 * This should NEVER fail if the ddi_soft_state_zalloc() call above 6914 * was successful, unless something has gone horribly wrong and the 6915 * ddi's soft state internals are corrupt (in which case it is 6916 * probably better to halt here than just fail the attach....) 6917 */ 6918 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 6919 panic("sd_unit_attach: NULL soft state on instance:0x%x", 6920 instance); 6921 /*NOTREACHED*/ 6922 } 6923 6924 /* 6925 * Link the back ptr of the driver soft state to the scsi_device 6926 * struct for this lun. 6927 * Save a pointer to the softstate in the driver-private area of 6928 * the scsi_device struct. 6929 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until 6930 * we first set un->un_sd below. 6931 */ 6932 un->un_sd = devp; 6933 devp->sd_private = (opaque_t)un; 6934 6935 /* 6936 * The following must be after devp is stored in the soft state struct. 6937 */ 6938 #ifdef SDDEBUG 6939 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 6940 "%s_unit_attach: un:0x%p instance:%d\n", 6941 ddi_driver_name(devi), un, instance); 6942 #endif 6943 6944 /* 6945 * Set up the device type and node type (for the minor nodes). 6946 * By default we assume that the device can at least support the 6947 * Common Command Set. Call it a CD-ROM if it reports itself 6948 * as a RODIRECT device. 6949 */ 6950 switch (devp->sd_inq->inq_dtype) { 6951 case DTYPE_RODIRECT: 6952 un->un_node_type = DDI_NT_CD_CHAN; 6953 un->un_ctype = CTYPE_CDROM; 6954 break; 6955 case DTYPE_OPTICAL: 6956 un->un_node_type = DDI_NT_BLOCK_CHAN; 6957 un->un_ctype = CTYPE_ROD; 6958 break; 6959 default: 6960 un->un_node_type = DDI_NT_BLOCK_CHAN; 6961 un->un_ctype = CTYPE_CCS; 6962 break; 6963 } 6964 6965 /* 6966 * Try to read the interconnect type from the HBA. 6967 * 6968 * Note: This driver is currently compiled as two binaries, a parallel 6969 * scsi version (sd) and a fibre channel version (ssd). All functional 6970 * differences are determined at compile time. In the future a single 6971 * binary will be provided and the interconnect type will be used to 6972 * differentiate between fibre and parallel scsi behaviors. At that time 6973 * it will be necessary for all fibre channel HBAs to support this 6974 * property. 6975 * 6976 * set un_f_is_fiber to TRUE ( default fiber ) 6977 */ 6978 un->un_f_is_fibre = TRUE; 6979 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { 6980 case INTERCONNECT_SSA: 6981 un->un_interconnect_type = SD_INTERCONNECT_SSA; 6982 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6983 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un); 6984 break; 6985 case INTERCONNECT_PARALLEL: 6986 un->un_f_is_fibre = FALSE; 6987 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 6988 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6989 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); 6990 break; 6991 case INTERCONNECT_SATA: 6992 un->un_f_is_fibre = FALSE; 6993 un->un_interconnect_type = SD_INTERCONNECT_SATA; 6994 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6995 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un); 6996 break; 6997 case INTERCONNECT_FIBRE: 6998 un->un_interconnect_type = SD_INTERCONNECT_FIBRE; 6999 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7000 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); 7001 break; 7002 case INTERCONNECT_FABRIC: 7003 un->un_interconnect_type = SD_INTERCONNECT_FABRIC; 7004 un->un_node_type = DDI_NT_BLOCK_FABRIC; 7005 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7006 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); 7007 break; 7008 default: 7009 #ifdef SD_DEFAULT_INTERCONNECT_TYPE 7010 /* 7011 * The HBA does not support the "interconnect-type" property 7012 * (or did not provide a recognized type). 7013 * 7014 * Note: This will be obsoleted when a single fibre channel 7015 * and parallel scsi driver is delivered. In the meantime the 7016 * interconnect type will be set to the platform default.If that 7017 * type is not parallel SCSI, it means that we should be 7018 * assuming "ssd" semantics. However, here this also means that 7019 * the FC HBA is not supporting the "interconnect-type" property 7020 * like we expect it to, so log this occurrence. 7021 */ 7022 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE; 7023 if (!SD_IS_PARALLEL_SCSI(un)) { 7024 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7025 "sd_unit_attach: un:0x%p Assuming " 7026 "INTERCONNECT_FIBRE\n", un); 7027 } else { 7028 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7029 "sd_unit_attach: un:0x%p Assuming " 7030 "INTERCONNECT_PARALLEL\n", un); 7031 un->un_f_is_fibre = FALSE; 7032 } 7033 #else 7034 /* 7035 * Note: This source will be implemented when a single fibre 7036 * channel and parallel scsi driver is delivered. The default 7037 * will be to assume that if a device does not support the 7038 * "interconnect-type" property it is a parallel SCSI HBA and 7039 * we will set the interconnect type for parallel scsi. 7040 */ 7041 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7042 un->un_f_is_fibre = FALSE; 7043 #endif 7044 break; 7045 } 7046 7047 if (un->un_f_is_fibre == TRUE) { 7048 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) == 7049 SCSI_VERSION_3) { 7050 switch (un->un_interconnect_type) { 7051 case SD_INTERCONNECT_FIBRE: 7052 case SD_INTERCONNECT_SSA: 7053 un->un_node_type = DDI_NT_BLOCK_WWN; 7054 break; 7055 default: 7056 break; 7057 } 7058 } 7059 } 7060 7061 /* 7062 * Initialize the Request Sense command for the target 7063 */ 7064 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { 7065 goto alloc_rqs_failed; 7066 } 7067 7068 /* 7069 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc 7070 * with separate binary for sd and ssd. 7071 * 7072 * x86 has 1 binary, un_retry_count is set base on connection type. 7073 * The hardcoded values will go away when Sparc uses 1 binary 7074 * for sd and ssd. This hardcoded values need to match 7075 * SD_RETRY_COUNT in sddef.h 7076 * The value used is base on interconnect type. 7077 * fibre = 3, parallel = 5 7078 */ 7079 #if defined(__i386) || defined(__amd64) 7080 un->un_retry_count = un->un_f_is_fibre ? 3 : 5; 7081 #else 7082 un->un_retry_count = SD_RETRY_COUNT; 7083 #endif 7084 7085 /* 7086 * Set the per disk retry count to the default number of retries 7087 * for disks and CDROMs. This value can be overridden by the 7088 * disk property list or an entry in sd.conf. 7089 */ 7090 un->un_notready_retry_count = 7091 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un) 7092 : DISK_NOT_READY_RETRY_COUNT(un); 7093 7094 /* 7095 * Set the busy retry count to the default value of un_retry_count. 7096 * This can be overridden by entries in sd.conf or the device 7097 * config table. 7098 */ 7099 un->un_busy_retry_count = un->un_retry_count; 7100 7101 /* 7102 * Init the reset threshold for retries. This number determines 7103 * how many retries must be performed before a reset can be issued 7104 * (for certain error conditions). This can be overridden by entries 7105 * in sd.conf or the device config table. 7106 */ 7107 un->un_reset_retry_count = (un->un_retry_count / 2); 7108 7109 /* 7110 * Set the victim_retry_count to the default un_retry_count 7111 */ 7112 un->un_victim_retry_count = (2 * un->un_retry_count); 7113 7114 /* 7115 * Set the reservation release timeout to the default value of 7116 * 5 seconds. This can be overridden by entries in ssd.conf or the 7117 * device config table. 7118 */ 7119 un->un_reserve_release_time = 5; 7120 7121 /* 7122 * Set up the default maximum transfer size. Note that this may 7123 * get updated later in the attach, when setting up default wide 7124 * operations for disks. 7125 */ 7126 #if defined(__i386) || defined(__amd64) 7127 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; 7128 un->un_partial_dma_supported = 1; 7129 #else 7130 un->un_max_xfer_size = (uint_t)maxphys; 7131 #endif 7132 7133 /* 7134 * Get "allow bus device reset" property (defaults to "enabled" if 7135 * the property was not defined). This is to disable bus resets for 7136 * certain kinds of error recovery. Note: In the future when a run-time 7137 * fibre check is available the soft state flag should default to 7138 * enabled. 7139 */ 7140 if (un->un_f_is_fibre == TRUE) { 7141 un->un_f_allow_bus_device_reset = TRUE; 7142 } else { 7143 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7144 "allow-bus-device-reset", 1) != 0) { 7145 un->un_f_allow_bus_device_reset = TRUE; 7146 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7147 "sd_unit_attach: un:0x%p Bus device reset " 7148 "enabled\n", un); 7149 } else { 7150 un->un_f_allow_bus_device_reset = FALSE; 7151 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7152 "sd_unit_attach: un:0x%p Bus device reset " 7153 "disabled\n", un); 7154 } 7155 } 7156 7157 /* 7158 * Check if this is an ATAPI device. ATAPI devices use Group 1 7159 * Read/Write commands and Group 2 Mode Sense/Select commands. 7160 * 7161 * Note: The "obsolete" way of doing this is to check for the "atapi" 7162 * property. The new "variant" property with a value of "atapi" has been 7163 * introduced so that future 'variants' of standard SCSI behavior (like 7164 * atapi) could be specified by the underlying HBA drivers by supplying 7165 * a new value for the "variant" property, instead of having to define a 7166 * new property. 7167 */ 7168 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { 7169 un->un_f_cfg_is_atapi = TRUE; 7170 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7171 "sd_unit_attach: un:0x%p Atapi device\n", un); 7172 } 7173 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", 7174 &variantp) == DDI_PROP_SUCCESS) { 7175 if (strcmp(variantp, "atapi") == 0) { 7176 un->un_f_cfg_is_atapi = TRUE; 7177 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7178 "sd_unit_attach: un:0x%p Atapi device\n", un); 7179 } 7180 ddi_prop_free(variantp); 7181 } 7182 7183 un->un_cmd_timeout = SD_IO_TIME; 7184 7185 un->un_busy_timeout = SD_BSY_TIMEOUT; 7186 7187 /* Info on current states, statuses, etc. (Updated frequently) */ 7188 un->un_state = SD_STATE_NORMAL; 7189 un->un_last_state = SD_STATE_NORMAL; 7190 7191 /* Control & status info for command throttling */ 7192 un->un_throttle = sd_max_throttle; 7193 un->un_saved_throttle = sd_max_throttle; 7194 un->un_min_throttle = sd_min_throttle; 7195 7196 if (un->un_f_is_fibre == TRUE) { 7197 un->un_f_use_adaptive_throttle = TRUE; 7198 } else { 7199 un->un_f_use_adaptive_throttle = FALSE; 7200 } 7201 7202 /* Removable media support. */ 7203 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 7204 un->un_mediastate = DKIO_NONE; 7205 un->un_specified_mediastate = DKIO_NONE; 7206 7207 /* CVs for suspend/resume (PM or DR) */ 7208 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 7209 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 7210 7211 /* Power management support. */ 7212 un->un_power_level = SD_SPINDLE_UNINIT; 7213 7214 cv_init(&un->un_wcc_cv, NULL, CV_DRIVER, NULL); 7215 un->un_f_wcc_inprog = 0; 7216 7217 /* 7218 * The open/close semaphore is used to serialize threads executing 7219 * in the driver's open & close entry point routines for a given 7220 * instance. 7221 */ 7222 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); 7223 7224 /* 7225 * The conf file entry and softstate variable is a forceful override, 7226 * meaning a non-zero value must be entered to change the default. 7227 */ 7228 un->un_f_disksort_disabled = FALSE; 7229 7230 /* 7231 * Retrieve the properties from the static driver table or the driver 7232 * configuration file (.conf) for this unit and update the soft state 7233 * for the device as needed for the indicated properties. 7234 * Note: the property configuration needs to occur here as some of the 7235 * following routines may have dependencies on soft state flags set 7236 * as part of the driver property configuration. 7237 */ 7238 sd_read_unit_properties(un); 7239 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7240 "sd_unit_attach: un:0x%p property configuration complete.\n", un); 7241 7242 /* 7243 * Only if a device has "hotpluggable" property, it is 7244 * treated as hotpluggable device. Otherwise, it is 7245 * regarded as non-hotpluggable one. 7246 */ 7247 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "hotpluggable", 7248 -1) != -1) { 7249 un->un_f_is_hotpluggable = TRUE; 7250 } 7251 7252 /* 7253 * set unit's attributes(flags) according to "hotpluggable" and 7254 * RMB bit in INQUIRY data. 7255 */ 7256 sd_set_unit_attributes(un, devi); 7257 7258 /* 7259 * By default, we mark the capacity, lbasize, and geometry 7260 * as invalid. Only if we successfully read a valid capacity 7261 * will we update the un_blockcount and un_tgt_blocksize with the 7262 * valid values (the geometry will be validated later). 7263 */ 7264 un->un_f_blockcount_is_valid = FALSE; 7265 un->un_f_tgt_blocksize_is_valid = FALSE; 7266 7267 /* 7268 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine 7269 * otherwise. 7270 */ 7271 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE; 7272 un->un_blockcount = 0; 7273 7274 /* 7275 * Set up the per-instance info needed to determine the correct 7276 * CDBs and other info for issuing commands to the target. 7277 */ 7278 sd_init_cdb_limits(un); 7279 7280 /* 7281 * Set up the IO chains to use, based upon the target type. 7282 */ 7283 if (un->un_f_non_devbsize_supported) { 7284 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 7285 } else { 7286 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 7287 } 7288 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 7289 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD; 7290 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD; 7291 7292 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf), 7293 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit, 7294 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER); 7295 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi); 7296 7297 7298 if (ISCD(un)) { 7299 un->un_additional_codes = sd_additional_codes; 7300 } else { 7301 un->un_additional_codes = NULL; 7302 } 7303 7304 /* 7305 * Create the kstats here so they can be available for attach-time 7306 * routines that send commands to the unit (either polled or via 7307 * sd_send_scsi_cmd). 7308 * 7309 * Note: This is a critical sequence that needs to be maintained: 7310 * 1) Instantiate the kstats here, before any routines using the 7311 * iopath (i.e. sd_send_scsi_cmd). 7312 * 2) Instantiate and initialize the partition stats 7313 * (sd_set_pstats). 7314 * 3) Initialize the error stats (sd_set_errstats), following 7315 * sd_validate_geometry(),sd_register_devid(), 7316 * and sd_cache_control(). 7317 */ 7318 7319 un->un_stats = kstat_create(sd_label, instance, 7320 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 7321 if (un->un_stats != NULL) { 7322 un->un_stats->ks_lock = SD_MUTEX(un); 7323 kstat_install(un->un_stats); 7324 } 7325 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7326 "sd_unit_attach: un:0x%p un_stats created\n", un); 7327 7328 sd_create_errstats(un, instance); 7329 if (un->un_errstats == NULL) { 7330 goto create_errstats_failed; 7331 } 7332 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7333 "sd_unit_attach: un:0x%p errstats created\n", un); 7334 7335 /* 7336 * The following if/else code was relocated here from below as part 7337 * of the fix for bug (4430280). However with the default setup added 7338 * on entry to this routine, it's no longer absolutely necessary for 7339 * this to be before the call to sd_spin_up_unit. 7340 */ 7341 if (SD_IS_PARALLEL_SCSI(un) || SD_IS_SERIAL(un)) { 7342 int tq_trigger_flag = (((devp->sd_inq->inq_ansi == 4) || 7343 (devp->sd_inq->inq_ansi == 5)) && 7344 devp->sd_inq->inq_bque) || devp->sd_inq->inq_cmdque; 7345 7346 /* 7347 * If tagged queueing is supported by the target 7348 * and by the host adapter then we will enable it 7349 */ 7350 un->un_tagflags = 0; 7351 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && tq_trigger_flag && 7352 (un->un_f_arq_enabled == TRUE)) { 7353 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 7354 1, 1) == 1) { 7355 un->un_tagflags = FLAG_STAG; 7356 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7357 "sd_unit_attach: un:0x%p tag queueing " 7358 "enabled\n", un); 7359 } else if (scsi_ifgetcap(SD_ADDRESS(un), 7360 "untagged-qing", 0) == 1) { 7361 un->un_f_opt_queueing = TRUE; 7362 un->un_saved_throttle = un->un_throttle = 7363 min(un->un_throttle, 3); 7364 } else { 7365 un->un_f_opt_queueing = FALSE; 7366 un->un_saved_throttle = un->un_throttle = 1; 7367 } 7368 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) 7369 == 1) && (un->un_f_arq_enabled == TRUE)) { 7370 /* The Host Adapter supports internal queueing. */ 7371 un->un_f_opt_queueing = TRUE; 7372 un->un_saved_throttle = un->un_throttle = 7373 min(un->un_throttle, 3); 7374 } else { 7375 un->un_f_opt_queueing = FALSE; 7376 un->un_saved_throttle = un->un_throttle = 1; 7377 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7378 "sd_unit_attach: un:0x%p no tag queueing\n", un); 7379 } 7380 7381 /* 7382 * Enable large transfers for SATA/SAS drives 7383 */ 7384 if (SD_IS_SERIAL(un)) { 7385 un->un_max_xfer_size = 7386 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7387 sd_max_xfer_size, SD_MAX_XFER_SIZE); 7388 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7389 "sd_unit_attach: un:0x%p max transfer " 7390 "size=0x%x\n", un, un->un_max_xfer_size); 7391 7392 } 7393 7394 /* Setup or tear down default wide operations for disks */ 7395 7396 /* 7397 * Note: Legacy: it may be possible for both "sd_max_xfer_size" 7398 * and "ssd_max_xfer_size" to exist simultaneously on the same 7399 * system and be set to different values. In the future this 7400 * code may need to be updated when the ssd module is 7401 * obsoleted and removed from the system. (4299588) 7402 */ 7403 if (SD_IS_PARALLEL_SCSI(un) && 7404 (devp->sd_inq->inq_rdf == RDF_SCSI2) && 7405 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { 7406 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7407 1, 1) == 1) { 7408 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7409 "sd_unit_attach: un:0x%p Wide Transfer " 7410 "enabled\n", un); 7411 } 7412 7413 /* 7414 * If tagged queuing has also been enabled, then 7415 * enable large xfers 7416 */ 7417 if (un->un_saved_throttle == sd_max_throttle) { 7418 un->un_max_xfer_size = 7419 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7420 sd_max_xfer_size, SD_MAX_XFER_SIZE); 7421 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7422 "sd_unit_attach: un:0x%p max transfer " 7423 "size=0x%x\n", un, un->un_max_xfer_size); 7424 } 7425 } else { 7426 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 7427 0, 1) == 1) { 7428 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7429 "sd_unit_attach: un:0x%p " 7430 "Wide Transfer disabled\n", un); 7431 } 7432 } 7433 } else { 7434 un->un_tagflags = FLAG_STAG; 7435 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, 7436 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE); 7437 } 7438 7439 /* 7440 * If this target supports LUN reset, try to enable it. 7441 */ 7442 if (un->un_f_lun_reset_enabled) { 7443 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { 7444 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7445 "un:0x%p lun_reset capability set\n", un); 7446 } else { 7447 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7448 "un:0x%p lun-reset capability not set\n", un); 7449 } 7450 } 7451 7452 /* 7453 * Adjust the maximum transfer size. This is to fix 7454 * the problem of partial DMA support on SPARC. Some 7455 * HBA driver, like aac, has very small dma_attr_maxxfer 7456 * size, which requires partial DMA support on SPARC. 7457 * In the future the SPARC pci nexus driver may solve 7458 * the problem instead of this fix. 7459 */ 7460 max_xfer_size = scsi_ifgetcap(SD_ADDRESS(un), "dma-max", 1); 7461 if ((max_xfer_size > 0) && (max_xfer_size < un->un_max_xfer_size)) { 7462 /* We need DMA partial even on sparc to ensure sddump() works */ 7463 un->un_max_xfer_size = max_xfer_size; 7464 if (un->un_partial_dma_supported == 0) 7465 un->un_partial_dma_supported = 1; 7466 } 7467 if (ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 7468 DDI_PROP_DONTPASS, "buf_break", 0) == 1) { 7469 if (ddi_xbuf_attr_setup_brk(un->un_xbuf_attr, 7470 un->un_max_xfer_size) == 1) { 7471 un->un_buf_breakup_supported = 1; 7472 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 7473 "un:0x%p Buf breakup enabled\n", un); 7474 } 7475 } 7476 7477 /* 7478 * Set PKT_DMA_PARTIAL flag. 7479 */ 7480 if (un->un_partial_dma_supported == 1) { 7481 un->un_pkt_flags = PKT_DMA_PARTIAL; 7482 } else { 7483 un->un_pkt_flags = 0; 7484 } 7485 7486 /* Initialize sd_ssc_t for internal uscsi commands */ 7487 ssc = sd_ssc_init(un); 7488 scsi_fm_init(devp); 7489 7490 /* 7491 * Allocate memory for SCSI FMA stuffs. 7492 */ 7493 un->un_fm_private = 7494 kmem_zalloc(sizeof (struct sd_fm_internal), KM_SLEEP); 7495 sfip = (struct sd_fm_internal *)un->un_fm_private; 7496 sfip->fm_ssc.ssc_uscsi_cmd = &sfip->fm_ucmd; 7497 sfip->fm_ssc.ssc_uscsi_info = &sfip->fm_uinfo; 7498 sfip->fm_ssc.ssc_un = un; 7499 7500 if (ISCD(un) || 7501 un->un_f_has_removable_media || 7502 devp->sd_fm_capable == DDI_FM_NOT_CAPABLE) { 7503 /* 7504 * We don't touch CDROM or the DDI_FM_NOT_CAPABLE device. 7505 * Their log are unchanged. 7506 */ 7507 sfip->fm_log_level = SD_FM_LOG_NSUP; 7508 } else { 7509 /* 7510 * If enter here, it should be non-CDROM and FM-capable 7511 * device, and it will not keep the old scsi_log as before 7512 * in /var/adm/messages. However, the property 7513 * "fm-scsi-log" will control whether the FM telemetry will 7514 * be logged in /var/adm/messages. 7515 */ 7516 int fm_scsi_log; 7517 fm_scsi_log = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 7518 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "fm-scsi-log", 0); 7519 7520 if (fm_scsi_log) 7521 sfip->fm_log_level = SD_FM_LOG_EREPORT; 7522 else 7523 sfip->fm_log_level = SD_FM_LOG_SILENT; 7524 } 7525 7526 /* 7527 * At this point in the attach, we have enough info in the 7528 * soft state to be able to issue commands to the target. 7529 * 7530 * All command paths used below MUST issue their commands as 7531 * SD_PATH_DIRECT. This is important as intermediate layers 7532 * are not all initialized yet (such as PM). 7533 */ 7534 7535 /* 7536 * Send a TEST UNIT READY command to the device. This should clear 7537 * any outstanding UNIT ATTENTION that may be present. 7538 * 7539 * Note: Don't check for success, just track if there is a reservation, 7540 * this is a throw away command to clear any unit attentions. 7541 * 7542 * Note: This MUST be the first command issued to the target during 7543 * attach to ensure power on UNIT ATTENTIONS are cleared. 7544 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated 7545 * with attempts at spinning up a device with no media. 7546 */ 7547 status = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR); 7548 if (status != 0) { 7549 if (status == EACCES) 7550 reservation_flag = SD_TARGET_IS_RESERVED; 7551 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7552 } 7553 7554 /* 7555 * If the device is NOT a removable media device, attempt to spin 7556 * it up (using the START_STOP_UNIT command) and read its capacity 7557 * (using the READ CAPACITY command). Note, however, that either 7558 * of these could fail and in some cases we would continue with 7559 * the attach despite the failure (see below). 7560 */ 7561 if (un->un_f_descr_format_supported) { 7562 7563 switch (sd_spin_up_unit(ssc)) { 7564 case 0: 7565 /* 7566 * Spin-up was successful; now try to read the 7567 * capacity. If successful then save the results 7568 * and mark the capacity & lbasize as valid. 7569 */ 7570 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7571 "sd_unit_attach: un:0x%p spin-up successful\n", un); 7572 7573 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity, 7574 &lbasize, SD_PATH_DIRECT); 7575 7576 switch (status) { 7577 case 0: { 7578 if (capacity > DK_MAX_BLOCKS) { 7579 #ifdef _LP64 7580 if ((capacity + 1) > 7581 SD_GROUP1_MAX_ADDRESS) { 7582 /* 7583 * Enable descriptor format 7584 * sense data so that we can 7585 * get 64 bit sense data 7586 * fields. 7587 */ 7588 sd_enable_descr_sense(ssc); 7589 } 7590 #else 7591 /* 32-bit kernels can't handle this */ 7592 scsi_log(SD_DEVINFO(un), 7593 sd_label, CE_WARN, 7594 "disk has %llu blocks, which " 7595 "is too large for a 32-bit " 7596 "kernel", capacity); 7597 7598 #if defined(__i386) || defined(__amd64) 7599 /* 7600 * 1TB disk was treated as (1T - 512)B 7601 * in the past, so that it might have 7602 * valid VTOC and solaris partitions, 7603 * we have to allow it to continue to 7604 * work. 7605 */ 7606 if (capacity -1 > DK_MAX_BLOCKS) 7607 #endif 7608 goto spinup_failed; 7609 #endif 7610 } 7611 7612 /* 7613 * Here it's not necessary to check the case: 7614 * the capacity of the device is bigger than 7615 * what the max hba cdb can support. Because 7616 * sd_send_scsi_READ_CAPACITY will retrieve 7617 * the capacity by sending USCSI command, which 7618 * is constrained by the max hba cdb. Actually, 7619 * sd_send_scsi_READ_CAPACITY will return 7620 * EINVAL when using bigger cdb than required 7621 * cdb length. Will handle this case in 7622 * "case EINVAL". 7623 */ 7624 7625 /* 7626 * The following relies on 7627 * sd_send_scsi_READ_CAPACITY never 7628 * returning 0 for capacity and/or lbasize. 7629 */ 7630 sd_update_block_info(un, lbasize, capacity); 7631 7632 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7633 "sd_unit_attach: un:0x%p capacity = %ld " 7634 "blocks; lbasize= %ld.\n", un, 7635 un->un_blockcount, un->un_tgt_blocksize); 7636 7637 break; 7638 } 7639 case EINVAL: 7640 /* 7641 * In the case where the max-cdb-length property 7642 * is smaller than the required CDB length for 7643 * a SCSI device, a target driver can fail to 7644 * attach to that device. 7645 */ 7646 scsi_log(SD_DEVINFO(un), 7647 sd_label, CE_WARN, 7648 "disk capacity is too large " 7649 "for current cdb length"); 7650 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7651 7652 goto spinup_failed; 7653 case EACCES: 7654 /* 7655 * Should never get here if the spin-up 7656 * succeeded, but code it in anyway. 7657 * From here, just continue with the attach... 7658 */ 7659 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7660 "sd_unit_attach: un:0x%p " 7661 "sd_send_scsi_READ_CAPACITY " 7662 "returned reservation conflict\n", un); 7663 reservation_flag = SD_TARGET_IS_RESERVED; 7664 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7665 break; 7666 default: 7667 /* 7668 * Likewise, should never get here if the 7669 * spin-up succeeded. Just continue with 7670 * the attach... 7671 */ 7672 if (status == EIO) 7673 sd_ssc_assessment(ssc, 7674 SD_FMT_STATUS_CHECK); 7675 else 7676 sd_ssc_assessment(ssc, 7677 SD_FMT_IGNORE); 7678 break; 7679 } 7680 break; 7681 case EACCES: 7682 /* 7683 * Device is reserved by another host. In this case 7684 * we could not spin it up or read the capacity, but 7685 * we continue with the attach anyway. 7686 */ 7687 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7688 "sd_unit_attach: un:0x%p spin-up reservation " 7689 "conflict.\n", un); 7690 reservation_flag = SD_TARGET_IS_RESERVED; 7691 break; 7692 default: 7693 /* Fail the attach if the spin-up failed. */ 7694 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7695 "sd_unit_attach: un:0x%p spin-up failed.", un); 7696 goto spinup_failed; 7697 } 7698 7699 } 7700 7701 /* 7702 * Check to see if this is a MMC drive 7703 */ 7704 if (ISCD(un)) { 7705 sd_set_mmc_caps(ssc); 7706 } 7707 7708 7709 /* 7710 * Add a zero-length attribute to tell the world we support 7711 * kernel ioctls (for layered drivers) 7712 */ 7713 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7714 DDI_KERNEL_IOCTL, NULL, 0); 7715 7716 /* 7717 * Add a boolean property to tell the world we support 7718 * the B_FAILFAST flag (for layered drivers) 7719 */ 7720 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 7721 "ddi-failfast-supported", NULL, 0); 7722 7723 /* 7724 * Initialize power management 7725 */ 7726 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL); 7727 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL); 7728 sd_setup_pm(ssc, devi); 7729 if (un->un_f_pm_is_enabled == FALSE) { 7730 /* 7731 * For performance, point to a jump table that does 7732 * not include pm. 7733 * The direct and priority chains don't change with PM. 7734 * 7735 * Note: this is currently done based on individual device 7736 * capabilities. When an interface for determining system 7737 * power enabled state becomes available, or when additional 7738 * layers are added to the command chain, these values will 7739 * have to be re-evaluated for correctness. 7740 */ 7741 if (un->un_f_non_devbsize_supported) { 7742 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM; 7743 } else { 7744 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM; 7745 } 7746 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 7747 } 7748 7749 /* 7750 * This property is set to 0 by HA software to avoid retries 7751 * on a reserved disk. (The preferred property name is 7752 * "retry-on-reservation-conflict") (1189689) 7753 * 7754 * Note: The use of a global here can have unintended consequences. A 7755 * per instance variable is preferable to match the capabilities of 7756 * different underlying hba's (4402600) 7757 */ 7758 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, 7759 DDI_PROP_DONTPASS, "retry-on-reservation-conflict", 7760 sd_retry_on_reservation_conflict); 7761 if (sd_retry_on_reservation_conflict != 0) { 7762 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, 7763 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name, 7764 sd_retry_on_reservation_conflict); 7765 } 7766 7767 /* Set up options for QFULL handling. */ 7768 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7769 "qfull-retries", -1)) != -1) { 7770 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries", 7771 rval, 1); 7772 } 7773 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 7774 "qfull-retry-interval", -1)) != -1) { 7775 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval", 7776 rval, 1); 7777 } 7778 7779 /* 7780 * This just prints a message that announces the existence of the 7781 * device. The message is always printed in the system logfile, but 7782 * only appears on the console if the system is booted with the 7783 * -v (verbose) argument. 7784 */ 7785 ddi_report_dev(devi); 7786 7787 un->un_mediastate = DKIO_NONE; 7788 7789 cmlb_alloc_handle(&un->un_cmlbhandle); 7790 7791 #if defined(__i386) || defined(__amd64) 7792 /* 7793 * On x86, compensate for off-by-1 legacy error 7794 */ 7795 if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable && 7796 (lbasize == un->un_sys_blocksize)) 7797 offbyone = CMLB_OFF_BY_ONE; 7798 #endif 7799 7800 if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype, 7801 un->un_f_has_removable_media, un->un_f_is_hotpluggable, 7802 un->un_node_type, offbyone, un->un_cmlbhandle, 7803 (void *)SD_PATH_DIRECT) != 0) { 7804 goto cmlb_attach_failed; 7805 } 7806 7807 7808 /* 7809 * Read and validate the device's geometry (ie, disk label) 7810 * A new unformatted drive will not have a valid geometry, but 7811 * the driver needs to successfully attach to this device so 7812 * the drive can be formatted via ioctls. 7813 */ 7814 geom_label_valid = (cmlb_validate(un->un_cmlbhandle, 0, 7815 (void *)SD_PATH_DIRECT) == 0) ? 1: 0; 7816 7817 mutex_enter(SD_MUTEX(un)); 7818 7819 /* 7820 * Read and initialize the devid for the unit. 7821 */ 7822 if (un->un_f_devid_supported) { 7823 sd_register_devid(ssc, devi, reservation_flag); 7824 } 7825 mutex_exit(SD_MUTEX(un)); 7826 7827 #if (defined(__fibre)) 7828 /* 7829 * Register callbacks for fibre only. You can't do this solely 7830 * on the basis of the devid_type because this is hba specific. 7831 * We need to query our hba capabilities to find out whether to 7832 * register or not. 7833 */ 7834 if (un->un_f_is_fibre) { 7835 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 7836 sd_init_event_callbacks(un); 7837 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7838 "sd_unit_attach: un:0x%p event callbacks inserted", 7839 un); 7840 } 7841 } 7842 #endif 7843 7844 if (un->un_f_opt_disable_cache == TRUE) { 7845 /* 7846 * Disable both read cache and write cache. This is 7847 * the historic behavior of the keywords in the config file. 7848 */ 7849 if (sd_cache_control(ssc, SD_CACHE_DISABLE, SD_CACHE_DISABLE) != 7850 0) { 7851 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 7852 "sd_unit_attach: un:0x%p Could not disable " 7853 "caching", un); 7854 goto devid_failed; 7855 } 7856 } 7857 7858 /* 7859 * Check the value of the WCE bit now and 7860 * set un_f_write_cache_enabled accordingly. 7861 */ 7862 (void) sd_get_write_cache_enabled(ssc, &wc_enabled); 7863 mutex_enter(SD_MUTEX(un)); 7864 un->un_f_write_cache_enabled = (wc_enabled != 0); 7865 mutex_exit(SD_MUTEX(un)); 7866 7867 /* 7868 * Check the value of the NV_SUP bit and set 7869 * un_f_suppress_cache_flush accordingly. 7870 */ 7871 sd_get_nv_sup(ssc); 7872 7873 /* 7874 * Find out what type of reservation this disk supports. 7875 */ 7876 status = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS, 0, NULL); 7877 7878 switch (status) { 7879 case 0: 7880 /* 7881 * SCSI-3 reservations are supported. 7882 */ 7883 un->un_reservation_type = SD_SCSI3_RESERVATION; 7884 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7885 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un); 7886 break; 7887 case ENOTSUP: 7888 /* 7889 * The PERSISTENT RESERVE IN command would not be recognized by 7890 * a SCSI-2 device, so assume the reservation type is SCSI-2. 7891 */ 7892 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7893 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un); 7894 un->un_reservation_type = SD_SCSI2_RESERVATION; 7895 7896 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7897 break; 7898 default: 7899 /* 7900 * default to SCSI-3 reservations 7901 */ 7902 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7903 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un); 7904 un->un_reservation_type = SD_SCSI3_RESERVATION; 7905 7906 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 7907 break; 7908 } 7909 7910 /* 7911 * Set the pstat and error stat values here, so data obtained during the 7912 * previous attach-time routines is available. 7913 * 7914 * Note: This is a critical sequence that needs to be maintained: 7915 * 1) Instantiate the kstats before any routines using the iopath 7916 * (i.e. sd_send_scsi_cmd). 7917 * 2) Initialize the error stats (sd_set_errstats) and partition 7918 * stats (sd_set_pstats)here, following 7919 * cmlb_validate_geometry(), sd_register_devid(), and 7920 * sd_cache_control(). 7921 */ 7922 7923 if (un->un_f_pkstats_enabled && geom_label_valid) { 7924 sd_set_pstats(un); 7925 SD_TRACE(SD_LOG_IO_PARTITION, un, 7926 "sd_unit_attach: un:0x%p pstats created and set\n", un); 7927 } 7928 7929 sd_set_errstats(un); 7930 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7931 "sd_unit_attach: un:0x%p errstats set\n", un); 7932 7933 7934 /* 7935 * After successfully attaching an instance, we record the information 7936 * of how many luns have been attached on the relative target and 7937 * controller for parallel SCSI. This information is used when sd tries 7938 * to set the tagged queuing capability in HBA. 7939 */ 7940 if (SD_IS_PARALLEL_SCSI(un) && (tgt >= 0) && (tgt < NTARGETS_WIDE)) { 7941 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_ATTACH); 7942 } 7943 7944 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7945 "sd_unit_attach: un:0x%p exit success\n", un); 7946 7947 /* Uninitialize sd_ssc_t pointer */ 7948 sd_ssc_fini(ssc); 7949 7950 return (DDI_SUCCESS); 7951 7952 /* 7953 * An error occurred during the attach; clean up & return failure. 7954 */ 7955 7956 devid_failed: 7957 7958 setup_pm_failed: 7959 ddi_remove_minor_node(devi, NULL); 7960 7961 cmlb_attach_failed: 7962 /* 7963 * Cleanup from the scsi_ifsetcap() calls (437868) 7964 */ 7965 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 7966 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 7967 7968 /* 7969 * Refer to the comments of setting tagged-qing in the beginning of 7970 * sd_unit_attach. We can only disable tagged queuing when there is 7971 * no lun attached on the target. 7972 */ 7973 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) { 7974 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 7975 } 7976 7977 if (un->un_f_is_fibre == FALSE) { 7978 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 7979 } 7980 7981 spinup_failed: 7982 7983 /* Uninitialize sd_ssc_t pointer */ 7984 sd_ssc_fini(ssc); 7985 7986 mutex_enter(SD_MUTEX(un)); 7987 7988 /* Deallocate SCSI FMA memory spaces */ 7989 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal)); 7990 7991 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ 7992 if (un->un_direct_priority_timeid != NULL) { 7993 timeout_id_t temp_id = un->un_direct_priority_timeid; 7994 un->un_direct_priority_timeid = NULL; 7995 mutex_exit(SD_MUTEX(un)); 7996 (void) untimeout(temp_id); 7997 mutex_enter(SD_MUTEX(un)); 7998 } 7999 8000 /* Cancel any pending start/stop timeouts */ 8001 if (un->un_startstop_timeid != NULL) { 8002 timeout_id_t temp_id = un->un_startstop_timeid; 8003 un->un_startstop_timeid = NULL; 8004 mutex_exit(SD_MUTEX(un)); 8005 (void) untimeout(temp_id); 8006 mutex_enter(SD_MUTEX(un)); 8007 } 8008 8009 /* Cancel any pending reset-throttle timeouts */ 8010 if (un->un_reset_throttle_timeid != NULL) { 8011 timeout_id_t temp_id = un->un_reset_throttle_timeid; 8012 un->un_reset_throttle_timeid = NULL; 8013 mutex_exit(SD_MUTEX(un)); 8014 (void) untimeout(temp_id); 8015 mutex_enter(SD_MUTEX(un)); 8016 } 8017 8018 /* Cancel any pending retry timeouts */ 8019 if (un->un_retry_timeid != NULL) { 8020 timeout_id_t temp_id = un->un_retry_timeid; 8021 un->un_retry_timeid = NULL; 8022 mutex_exit(SD_MUTEX(un)); 8023 (void) untimeout(temp_id); 8024 mutex_enter(SD_MUTEX(un)); 8025 } 8026 8027 /* Cancel any pending delayed cv broadcast timeouts */ 8028 if (un->un_dcvb_timeid != NULL) { 8029 timeout_id_t temp_id = un->un_dcvb_timeid; 8030 un->un_dcvb_timeid = NULL; 8031 mutex_exit(SD_MUTEX(un)); 8032 (void) untimeout(temp_id); 8033 mutex_enter(SD_MUTEX(un)); 8034 } 8035 8036 mutex_exit(SD_MUTEX(un)); 8037 8038 /* There should not be any in-progress I/O so ASSERT this check */ 8039 ASSERT(un->un_ncmds_in_transport == 0); 8040 ASSERT(un->un_ncmds_in_driver == 0); 8041 8042 /* Do not free the softstate if the callback routine is active */ 8043 sd_sync_with_callback(un); 8044 8045 /* 8046 * Partition stats apparently are not used with removables. These would 8047 * not have been created during attach, so no need to clean them up... 8048 */ 8049 if (un->un_errstats != NULL) { 8050 kstat_delete(un->un_errstats); 8051 un->un_errstats = NULL; 8052 } 8053 8054 create_errstats_failed: 8055 8056 if (un->un_stats != NULL) { 8057 kstat_delete(un->un_stats); 8058 un->un_stats = NULL; 8059 } 8060 8061 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8062 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8063 8064 ddi_prop_remove_all(devi); 8065 sema_destroy(&un->un_semoclose); 8066 cv_destroy(&un->un_state_cv); 8067 8068 getrbuf_failed: 8069 8070 sd_free_rqs(un); 8071 8072 alloc_rqs_failed: 8073 8074 devp->sd_private = NULL; 8075 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ 8076 8077 get_softstate_failed: 8078 /* 8079 * Note: the man pages are unclear as to whether or not doing a 8080 * ddi_soft_state_free(sd_state, instance) is the right way to 8081 * clean up after the ddi_soft_state_zalloc() if the subsequent 8082 * ddi_get_soft_state() fails. The implication seems to be 8083 * that the get_soft_state cannot fail if the zalloc succeeds. 8084 */ 8085 ddi_soft_state_free(sd_state, instance); 8086 8087 probe_failed: 8088 scsi_unprobe(devp); 8089 8090 return (DDI_FAILURE); 8091 } 8092 8093 8094 /* 8095 * Function: sd_unit_detach 8096 * 8097 * Description: Performs DDI_DETACH processing for sddetach(). 8098 * 8099 * Return Code: DDI_SUCCESS 8100 * DDI_FAILURE 8101 * 8102 * Context: Kernel thread context 8103 */ 8104 8105 static int 8106 sd_unit_detach(dev_info_t *devi) 8107 { 8108 struct scsi_device *devp; 8109 struct sd_lun *un; 8110 int i; 8111 int tgt; 8112 dev_t dev; 8113 dev_info_t *pdip = ddi_get_parent(devi); 8114 int instance = ddi_get_instance(devi); 8115 8116 mutex_enter(&sd_detach_mutex); 8117 8118 /* 8119 * Fail the detach for any of the following: 8120 * - Unable to get the sd_lun struct for the instance 8121 * - A layered driver has an outstanding open on the instance 8122 * - Another thread is already detaching this instance 8123 * - Another thread is currently performing an open 8124 */ 8125 devp = ddi_get_driver_private(devi); 8126 if ((devp == NULL) || 8127 ((un = (struct sd_lun *)devp->sd_private) == NULL) || 8128 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) || 8129 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) { 8130 mutex_exit(&sd_detach_mutex); 8131 return (DDI_FAILURE); 8132 } 8133 8134 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un); 8135 8136 /* 8137 * Mark this instance as currently in a detach, to inhibit any 8138 * opens from a layered driver. 8139 */ 8140 un->un_detach_count++; 8141 mutex_exit(&sd_detach_mutex); 8142 8143 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 8144 SCSI_ADDR_PROP_TARGET, -1); 8145 8146 dev = sd_make_device(SD_DEVINFO(un)); 8147 8148 #ifndef lint 8149 _NOTE(COMPETING_THREADS_NOW); 8150 #endif 8151 8152 mutex_enter(SD_MUTEX(un)); 8153 8154 /* 8155 * Fail the detach if there are any outstanding layered 8156 * opens on this device. 8157 */ 8158 for (i = 0; i < NDKMAP; i++) { 8159 if (un->un_ocmap.lyropen[i] != 0) { 8160 goto err_notclosed; 8161 } 8162 } 8163 8164 /* 8165 * Verify there are NO outstanding commands issued to this device. 8166 * ie, un_ncmds_in_transport == 0. 8167 * It's possible to have outstanding commands through the physio 8168 * code path, even though everything's closed. 8169 */ 8170 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) || 8171 (un->un_direct_priority_timeid != NULL) || 8172 (un->un_state == SD_STATE_RWAIT)) { 8173 mutex_exit(SD_MUTEX(un)); 8174 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8175 "sd_dr_detach: Detach failure due to outstanding cmds\n"); 8176 goto err_stillbusy; 8177 } 8178 8179 /* 8180 * If we have the device reserved, release the reservation. 8181 */ 8182 if ((un->un_resvd_status & SD_RESERVE) && 8183 !(un->un_resvd_status & SD_LOST_RESERVE)) { 8184 mutex_exit(SD_MUTEX(un)); 8185 /* 8186 * Note: sd_reserve_release sends a command to the device 8187 * via the sd_ioctlcmd() path, and can sleep. 8188 */ 8189 if (sd_reserve_release(dev, SD_RELEASE) != 0) { 8190 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8191 "sd_dr_detach: Cannot release reservation \n"); 8192 } 8193 } else { 8194 mutex_exit(SD_MUTEX(un)); 8195 } 8196 8197 /* 8198 * Untimeout any reserve recover, throttle reset, restart unit 8199 * and delayed broadcast timeout threads. Protect the timeout pointer 8200 * from getting nulled by their callback functions. 8201 */ 8202 mutex_enter(SD_MUTEX(un)); 8203 if (un->un_resvd_timeid != NULL) { 8204 timeout_id_t temp_id = un->un_resvd_timeid; 8205 un->un_resvd_timeid = NULL; 8206 mutex_exit(SD_MUTEX(un)); 8207 (void) untimeout(temp_id); 8208 mutex_enter(SD_MUTEX(un)); 8209 } 8210 8211 if (un->un_reset_throttle_timeid != NULL) { 8212 timeout_id_t temp_id = un->un_reset_throttle_timeid; 8213 un->un_reset_throttle_timeid = NULL; 8214 mutex_exit(SD_MUTEX(un)); 8215 (void) untimeout(temp_id); 8216 mutex_enter(SD_MUTEX(un)); 8217 } 8218 8219 if (un->un_startstop_timeid != NULL) { 8220 timeout_id_t temp_id = un->un_startstop_timeid; 8221 un->un_startstop_timeid = NULL; 8222 mutex_exit(SD_MUTEX(un)); 8223 (void) untimeout(temp_id); 8224 mutex_enter(SD_MUTEX(un)); 8225 } 8226 8227 if (un->un_dcvb_timeid != NULL) { 8228 timeout_id_t temp_id = un->un_dcvb_timeid; 8229 un->un_dcvb_timeid = NULL; 8230 mutex_exit(SD_MUTEX(un)); 8231 (void) untimeout(temp_id); 8232 } else { 8233 mutex_exit(SD_MUTEX(un)); 8234 } 8235 8236 /* Remove any pending reservation reclaim requests for this device */ 8237 sd_rmv_resv_reclaim_req(dev); 8238 8239 mutex_enter(SD_MUTEX(un)); 8240 8241 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */ 8242 if (un->un_direct_priority_timeid != NULL) { 8243 timeout_id_t temp_id = un->un_direct_priority_timeid; 8244 un->un_direct_priority_timeid = NULL; 8245 mutex_exit(SD_MUTEX(un)); 8246 (void) untimeout(temp_id); 8247 mutex_enter(SD_MUTEX(un)); 8248 } 8249 8250 /* Cancel any active multi-host disk watch thread requests */ 8251 if (un->un_mhd_token != NULL) { 8252 mutex_exit(SD_MUTEX(un)); 8253 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token)); 8254 if (scsi_watch_request_terminate(un->un_mhd_token, 8255 SCSI_WATCH_TERMINATE_NOWAIT)) { 8256 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8257 "sd_dr_detach: Cannot cancel mhd watch request\n"); 8258 /* 8259 * Note: We are returning here after having removed 8260 * some driver timeouts above. This is consistent with 8261 * the legacy implementation but perhaps the watch 8262 * terminate call should be made with the wait flag set. 8263 */ 8264 goto err_stillbusy; 8265 } 8266 mutex_enter(SD_MUTEX(un)); 8267 un->un_mhd_token = NULL; 8268 } 8269 8270 if (un->un_swr_token != NULL) { 8271 mutex_exit(SD_MUTEX(un)); 8272 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token)); 8273 if (scsi_watch_request_terminate(un->un_swr_token, 8274 SCSI_WATCH_TERMINATE_NOWAIT)) { 8275 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8276 "sd_dr_detach: Cannot cancel swr watch request\n"); 8277 /* 8278 * Note: We are returning here after having removed 8279 * some driver timeouts above. This is consistent with 8280 * the legacy implementation but perhaps the watch 8281 * terminate call should be made with the wait flag set. 8282 */ 8283 goto err_stillbusy; 8284 } 8285 mutex_enter(SD_MUTEX(un)); 8286 un->un_swr_token = NULL; 8287 } 8288 8289 mutex_exit(SD_MUTEX(un)); 8290 8291 /* 8292 * Clear any scsi_reset_notifies. We clear the reset notifies 8293 * if we have not registered one. 8294 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! 8295 */ 8296 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 8297 sd_mhd_reset_notify_cb, (caddr_t)un); 8298 8299 /* 8300 * protect the timeout pointers from getting nulled by 8301 * their callback functions during the cancellation process. 8302 * In such a scenario untimeout can be invoked with a null value. 8303 */ 8304 _NOTE(NO_COMPETING_THREADS_NOW); 8305 8306 mutex_enter(&un->un_pm_mutex); 8307 if (un->un_pm_idle_timeid != NULL) { 8308 timeout_id_t temp_id = un->un_pm_idle_timeid; 8309 un->un_pm_idle_timeid = NULL; 8310 mutex_exit(&un->un_pm_mutex); 8311 8312 /* 8313 * Timeout is active; cancel it. 8314 * Note that it'll never be active on a device 8315 * that does not support PM therefore we don't 8316 * have to check before calling pm_idle_component. 8317 */ 8318 (void) untimeout(temp_id); 8319 (void) pm_idle_component(SD_DEVINFO(un), 0); 8320 mutex_enter(&un->un_pm_mutex); 8321 } 8322 8323 /* 8324 * Check whether there is already a timeout scheduled for power 8325 * management. If yes then don't lower the power here, that's. 8326 * the timeout handler's job. 8327 */ 8328 if (un->un_pm_timeid != NULL) { 8329 timeout_id_t temp_id = un->un_pm_timeid; 8330 un->un_pm_timeid = NULL; 8331 mutex_exit(&un->un_pm_mutex); 8332 /* 8333 * Timeout is active; cancel it. 8334 * Note that it'll never be active on a device 8335 * that does not support PM therefore we don't 8336 * have to check before calling pm_idle_component. 8337 */ 8338 (void) untimeout(temp_id); 8339 (void) pm_idle_component(SD_DEVINFO(un), 0); 8340 8341 } else { 8342 mutex_exit(&un->un_pm_mutex); 8343 if ((un->un_f_pm_is_enabled == TRUE) && 8344 (pm_lower_power(SD_DEVINFO(un), 0, SD_SPINDLE_OFF) != 8345 DDI_SUCCESS)) { 8346 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8347 "sd_dr_detach: Lower power request failed, ignoring.\n"); 8348 /* 8349 * Fix for bug: 4297749, item # 13 8350 * The above test now includes a check to see if PM is 8351 * supported by this device before call 8352 * pm_lower_power(). 8353 * Note, the following is not dead code. The call to 8354 * pm_lower_power above will generate a call back into 8355 * our sdpower routine which might result in a timeout 8356 * handler getting activated. Therefore the following 8357 * code is valid and necessary. 8358 */ 8359 mutex_enter(&un->un_pm_mutex); 8360 if (un->un_pm_timeid != NULL) { 8361 timeout_id_t temp_id = un->un_pm_timeid; 8362 un->un_pm_timeid = NULL; 8363 mutex_exit(&un->un_pm_mutex); 8364 (void) untimeout(temp_id); 8365 (void) pm_idle_component(SD_DEVINFO(un), 0); 8366 } else { 8367 mutex_exit(&un->un_pm_mutex); 8368 } 8369 } 8370 } 8371 8372 /* 8373 * Cleanup from the scsi_ifsetcap() calls (437868) 8374 * Relocated here from above to be after the call to 8375 * pm_lower_power, which was getting errors. 8376 */ 8377 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 8378 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 8379 8380 /* 8381 * Currently, tagged queuing is supported per target based by HBA. 8382 * Setting this per lun instance actually sets the capability of this 8383 * target in HBA, which affects those luns already attached on the 8384 * same target. So during detach, we can only disable this capability 8385 * only when this is the only lun left on this target. By doing 8386 * this, we assume a target has the same tagged queuing capability 8387 * for every lun. The condition can be removed when HBA is changed to 8388 * support per lun based tagged queuing capability. 8389 */ 8390 if (sd_scsi_get_target_lun_count(pdip, tgt) <= 1) { 8391 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 8392 } 8393 8394 if (un->un_f_is_fibre == FALSE) { 8395 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 8396 } 8397 8398 /* 8399 * Remove any event callbacks, fibre only 8400 */ 8401 if (un->un_f_is_fibre == TRUE) { 8402 if ((un->un_insert_event != NULL) && 8403 (ddi_remove_event_handler(un->un_insert_cb_id) != 8404 DDI_SUCCESS)) { 8405 /* 8406 * Note: We are returning here after having done 8407 * substantial cleanup above. This is consistent 8408 * with the legacy implementation but this may not 8409 * be the right thing to do. 8410 */ 8411 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8412 "sd_dr_detach: Cannot cancel insert event\n"); 8413 goto err_remove_event; 8414 } 8415 un->un_insert_event = NULL; 8416 8417 if ((un->un_remove_event != NULL) && 8418 (ddi_remove_event_handler(un->un_remove_cb_id) != 8419 DDI_SUCCESS)) { 8420 /* 8421 * Note: We are returning here after having done 8422 * substantial cleanup above. This is consistent 8423 * with the legacy implementation but this may not 8424 * be the right thing to do. 8425 */ 8426 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8427 "sd_dr_detach: Cannot cancel remove event\n"); 8428 goto err_remove_event; 8429 } 8430 un->un_remove_event = NULL; 8431 } 8432 8433 /* Do not free the softstate if the callback routine is active */ 8434 sd_sync_with_callback(un); 8435 8436 cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 8437 cmlb_free_handle(&un->un_cmlbhandle); 8438 8439 /* 8440 * Hold the detach mutex here, to make sure that no other threads ever 8441 * can access a (partially) freed soft state structure. 8442 */ 8443 mutex_enter(&sd_detach_mutex); 8444 8445 /* 8446 * Clean up the soft state struct. 8447 * Cleanup is done in reverse order of allocs/inits. 8448 * At this point there should be no competing threads anymore. 8449 */ 8450 8451 scsi_fm_fini(devp); 8452 8453 /* 8454 * Deallocate memory for SCSI FMA. 8455 */ 8456 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal)); 8457 8458 /* Unregister and free device id. */ 8459 ddi_devid_unregister(devi); 8460 if (un->un_devid) { 8461 ddi_devid_free(un->un_devid); 8462 un->un_devid = NULL; 8463 } 8464 8465 /* 8466 * Destroy wmap cache if it exists. 8467 */ 8468 if (un->un_wm_cache != NULL) { 8469 kmem_cache_destroy(un->un_wm_cache); 8470 un->un_wm_cache = NULL; 8471 } 8472 8473 /* 8474 * kstat cleanup is done in detach for all device types (4363169). 8475 * We do not want to fail detach if the device kstats are not deleted 8476 * since there is a confusion about the devo_refcnt for the device. 8477 * We just delete the kstats and let detach complete successfully. 8478 */ 8479 if (un->un_stats != NULL) { 8480 kstat_delete(un->un_stats); 8481 un->un_stats = NULL; 8482 } 8483 if (un->un_errstats != NULL) { 8484 kstat_delete(un->un_errstats); 8485 un->un_errstats = NULL; 8486 } 8487 8488 /* Remove partition stats */ 8489 if (un->un_f_pkstats_enabled) { 8490 for (i = 0; i < NSDMAP; i++) { 8491 if (un->un_pstats[i] != NULL) { 8492 kstat_delete(un->un_pstats[i]); 8493 un->un_pstats[i] = NULL; 8494 } 8495 } 8496 } 8497 8498 /* Remove xbuf registration */ 8499 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8500 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8501 8502 /* Remove driver properties */ 8503 ddi_prop_remove_all(devi); 8504 8505 mutex_destroy(&un->un_pm_mutex); 8506 cv_destroy(&un->un_pm_busy_cv); 8507 8508 cv_destroy(&un->un_wcc_cv); 8509 8510 /* Open/close semaphore */ 8511 sema_destroy(&un->un_semoclose); 8512 8513 /* Removable media condvar. */ 8514 cv_destroy(&un->un_state_cv); 8515 8516 /* Suspend/resume condvar. */ 8517 cv_destroy(&un->un_suspend_cv); 8518 cv_destroy(&un->un_disk_busy_cv); 8519 8520 sd_free_rqs(un); 8521 8522 /* Free up soft state */ 8523 devp->sd_private = NULL; 8524 8525 bzero(un, sizeof (struct sd_lun)); 8526 ddi_soft_state_free(sd_state, instance); 8527 8528 mutex_exit(&sd_detach_mutex); 8529 8530 /* This frees up the INQUIRY data associated with the device. */ 8531 scsi_unprobe(devp); 8532 8533 /* 8534 * After successfully detaching an instance, we update the information 8535 * of how many luns have been attached in the relative target and 8536 * controller for parallel SCSI. This information is used when sd tries 8537 * to set the tagged queuing capability in HBA. 8538 * Since un has been released, we can't use SD_IS_PARALLEL_SCSI(un) to 8539 * check if the device is parallel SCSI. However, we don't need to 8540 * check here because we've already checked during attach. No device 8541 * that is not parallel SCSI is in the chain. 8542 */ 8543 if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) { 8544 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH); 8545 } 8546 8547 return (DDI_SUCCESS); 8548 8549 err_notclosed: 8550 mutex_exit(SD_MUTEX(un)); 8551 8552 err_stillbusy: 8553 _NOTE(NO_COMPETING_THREADS_NOW); 8554 8555 err_remove_event: 8556 mutex_enter(&sd_detach_mutex); 8557 un->un_detach_count--; 8558 mutex_exit(&sd_detach_mutex); 8559 8560 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n"); 8561 return (DDI_FAILURE); 8562 } 8563 8564 8565 /* 8566 * Function: sd_create_errstats 8567 * 8568 * Description: This routine instantiates the device error stats. 8569 * 8570 * Note: During attach the stats are instantiated first so they are 8571 * available for attach-time routines that utilize the driver 8572 * iopath to send commands to the device. The stats are initialized 8573 * separately so data obtained during some attach-time routines is 8574 * available. (4362483) 8575 * 8576 * Arguments: un - driver soft state (unit) structure 8577 * instance - driver instance 8578 * 8579 * Context: Kernel thread context 8580 */ 8581 8582 static void 8583 sd_create_errstats(struct sd_lun *un, int instance) 8584 { 8585 struct sd_errstats *stp; 8586 char kstatmodule_err[KSTAT_STRLEN]; 8587 char kstatname[KSTAT_STRLEN]; 8588 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t)); 8589 8590 ASSERT(un != NULL); 8591 8592 if (un->un_errstats != NULL) { 8593 return; 8594 } 8595 8596 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err), 8597 "%serr", sd_label); 8598 (void) snprintf(kstatname, sizeof (kstatname), 8599 "%s%d,err", sd_label, instance); 8600 8601 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname, 8602 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT); 8603 8604 if (un->un_errstats == NULL) { 8605 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8606 "sd_create_errstats: Failed kstat_create\n"); 8607 return; 8608 } 8609 8610 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8611 kstat_named_init(&stp->sd_softerrs, "Soft Errors", 8612 KSTAT_DATA_UINT32); 8613 kstat_named_init(&stp->sd_harderrs, "Hard Errors", 8614 KSTAT_DATA_UINT32); 8615 kstat_named_init(&stp->sd_transerrs, "Transport Errors", 8616 KSTAT_DATA_UINT32); 8617 kstat_named_init(&stp->sd_vid, "Vendor", 8618 KSTAT_DATA_CHAR); 8619 kstat_named_init(&stp->sd_pid, "Product", 8620 KSTAT_DATA_CHAR); 8621 kstat_named_init(&stp->sd_revision, "Revision", 8622 KSTAT_DATA_CHAR); 8623 kstat_named_init(&stp->sd_serial, "Serial No", 8624 KSTAT_DATA_CHAR); 8625 kstat_named_init(&stp->sd_capacity, "Size", 8626 KSTAT_DATA_ULONGLONG); 8627 kstat_named_init(&stp->sd_rq_media_err, "Media Error", 8628 KSTAT_DATA_UINT32); 8629 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready", 8630 KSTAT_DATA_UINT32); 8631 kstat_named_init(&stp->sd_rq_nodev_err, "No Device", 8632 KSTAT_DATA_UINT32); 8633 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable", 8634 KSTAT_DATA_UINT32); 8635 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request", 8636 KSTAT_DATA_UINT32); 8637 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis", 8638 KSTAT_DATA_UINT32); 8639 8640 un->un_errstats->ks_private = un; 8641 un->un_errstats->ks_update = nulldev; 8642 8643 kstat_install(un->un_errstats); 8644 } 8645 8646 8647 /* 8648 * Function: sd_set_errstats 8649 * 8650 * Description: This routine sets the value of the vendor id, product id, 8651 * revision, serial number, and capacity device error stats. 8652 * 8653 * Note: During attach the stats are instantiated first so they are 8654 * available for attach-time routines that utilize the driver 8655 * iopath to send commands to the device. The stats are initialized 8656 * separately so data obtained during some attach-time routines is 8657 * available. (4362483) 8658 * 8659 * Arguments: un - driver soft state (unit) structure 8660 * 8661 * Context: Kernel thread context 8662 */ 8663 8664 static void 8665 sd_set_errstats(struct sd_lun *un) 8666 { 8667 struct sd_errstats *stp; 8668 8669 ASSERT(un != NULL); 8670 ASSERT(un->un_errstats != NULL); 8671 stp = (struct sd_errstats *)un->un_errstats->ks_data; 8672 ASSERT(stp != NULL); 8673 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8); 8674 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16); 8675 (void) strncpy(stp->sd_revision.value.c, 8676 un->un_sd->sd_inq->inq_revision, 4); 8677 8678 /* 8679 * All the errstats are persistent across detach/attach, 8680 * so reset all the errstats here in case of the hot 8681 * replacement of disk drives, except for not changed 8682 * Sun qualified drives. 8683 */ 8684 if ((bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) != 0) || 8685 (bcmp(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8686 sizeof (SD_INQUIRY(un)->inq_serial)) != 0)) { 8687 stp->sd_softerrs.value.ui32 = 0; 8688 stp->sd_harderrs.value.ui32 = 0; 8689 stp->sd_transerrs.value.ui32 = 0; 8690 stp->sd_rq_media_err.value.ui32 = 0; 8691 stp->sd_rq_ntrdy_err.value.ui32 = 0; 8692 stp->sd_rq_nodev_err.value.ui32 = 0; 8693 stp->sd_rq_recov_err.value.ui32 = 0; 8694 stp->sd_rq_illrq_err.value.ui32 = 0; 8695 stp->sd_rq_pfa_err.value.ui32 = 0; 8696 } 8697 8698 /* 8699 * Set the "Serial No" kstat for Sun qualified drives (indicated by 8700 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid) 8701 * (4376302)) 8702 */ 8703 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) { 8704 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 8705 sizeof (SD_INQUIRY(un)->inq_serial)); 8706 } 8707 8708 if (un->un_f_blockcount_is_valid != TRUE) { 8709 /* 8710 * Set capacity error stat to 0 for no media. This ensures 8711 * a valid capacity is displayed in response to 'iostat -E' 8712 * when no media is present in the device. 8713 */ 8714 stp->sd_capacity.value.ui64 = 0; 8715 } else { 8716 /* 8717 * Multiply un_blockcount by un->un_sys_blocksize to get 8718 * capacity. 8719 * 8720 * Note: for non-512 blocksize devices "un_blockcount" has been 8721 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by 8722 * (un_tgt_blocksize / un->un_sys_blocksize). 8723 */ 8724 stp->sd_capacity.value.ui64 = (uint64_t) 8725 ((uint64_t)un->un_blockcount * un->un_sys_blocksize); 8726 } 8727 } 8728 8729 8730 /* 8731 * Function: sd_set_pstats 8732 * 8733 * Description: This routine instantiates and initializes the partition 8734 * stats for each partition with more than zero blocks. 8735 * (4363169) 8736 * 8737 * Arguments: un - driver soft state (unit) structure 8738 * 8739 * Context: Kernel thread context 8740 */ 8741 8742 static void 8743 sd_set_pstats(struct sd_lun *un) 8744 { 8745 char kstatname[KSTAT_STRLEN]; 8746 int instance; 8747 int i; 8748 diskaddr_t nblks = 0; 8749 char *partname = NULL; 8750 8751 ASSERT(un != NULL); 8752 8753 instance = ddi_get_instance(SD_DEVINFO(un)); 8754 8755 /* Note:x86: is this a VTOC8/VTOC16 difference? */ 8756 for (i = 0; i < NSDMAP; i++) { 8757 8758 if (cmlb_partinfo(un->un_cmlbhandle, i, 8759 &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0) 8760 continue; 8761 mutex_enter(SD_MUTEX(un)); 8762 8763 if ((un->un_pstats[i] == NULL) && 8764 (nblks != 0)) { 8765 8766 (void) snprintf(kstatname, sizeof (kstatname), 8767 "%s%d,%s", sd_label, instance, 8768 partname); 8769 8770 un->un_pstats[i] = kstat_create(sd_label, 8771 instance, kstatname, "partition", KSTAT_TYPE_IO, 8772 1, KSTAT_FLAG_PERSISTENT); 8773 if (un->un_pstats[i] != NULL) { 8774 un->un_pstats[i]->ks_lock = SD_MUTEX(un); 8775 kstat_install(un->un_pstats[i]); 8776 } 8777 } 8778 mutex_exit(SD_MUTEX(un)); 8779 } 8780 } 8781 8782 8783 #if (defined(__fibre)) 8784 /* 8785 * Function: sd_init_event_callbacks 8786 * 8787 * Description: This routine initializes the insertion and removal event 8788 * callbacks. (fibre only) 8789 * 8790 * Arguments: un - driver soft state (unit) structure 8791 * 8792 * Context: Kernel thread context 8793 */ 8794 8795 static void 8796 sd_init_event_callbacks(struct sd_lun *un) 8797 { 8798 ASSERT(un != NULL); 8799 8800 if ((un->un_insert_event == NULL) && 8801 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT, 8802 &un->un_insert_event) == DDI_SUCCESS)) { 8803 /* 8804 * Add the callback for an insertion event 8805 */ 8806 (void) ddi_add_event_handler(SD_DEVINFO(un), 8807 un->un_insert_event, sd_event_callback, (void *)un, 8808 &(un->un_insert_cb_id)); 8809 } 8810 8811 if ((un->un_remove_event == NULL) && 8812 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT, 8813 &un->un_remove_event) == DDI_SUCCESS)) { 8814 /* 8815 * Add the callback for a removal event 8816 */ 8817 (void) ddi_add_event_handler(SD_DEVINFO(un), 8818 un->un_remove_event, sd_event_callback, (void *)un, 8819 &(un->un_remove_cb_id)); 8820 } 8821 } 8822 8823 8824 /* 8825 * Function: sd_event_callback 8826 * 8827 * Description: This routine handles insert/remove events (photon). The 8828 * state is changed to OFFLINE which can be used to supress 8829 * error msgs. (fibre only) 8830 * 8831 * Arguments: un - driver soft state (unit) structure 8832 * 8833 * Context: Callout thread context 8834 */ 8835 /* ARGSUSED */ 8836 static void 8837 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg, 8838 void *bus_impldata) 8839 { 8840 struct sd_lun *un = (struct sd_lun *)arg; 8841 8842 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event)); 8843 if (event == un->un_insert_event) { 8844 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event"); 8845 mutex_enter(SD_MUTEX(un)); 8846 if (un->un_state == SD_STATE_OFFLINE) { 8847 if (un->un_last_state != SD_STATE_SUSPENDED) { 8848 un->un_state = un->un_last_state; 8849 } else { 8850 /* 8851 * We have gone through SUSPEND/RESUME while 8852 * we were offline. Restore the last state 8853 */ 8854 un->un_state = un->un_save_state; 8855 } 8856 } 8857 mutex_exit(SD_MUTEX(un)); 8858 8859 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event)); 8860 } else if (event == un->un_remove_event) { 8861 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event"); 8862 mutex_enter(SD_MUTEX(un)); 8863 /* 8864 * We need to handle an event callback that occurs during 8865 * the suspend operation, since we don't prevent it. 8866 */ 8867 if (un->un_state != SD_STATE_OFFLINE) { 8868 if (un->un_state != SD_STATE_SUSPENDED) { 8869 New_state(un, SD_STATE_OFFLINE); 8870 } else { 8871 un->un_last_state = SD_STATE_OFFLINE; 8872 } 8873 } 8874 mutex_exit(SD_MUTEX(un)); 8875 } else { 8876 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 8877 "!Unknown event\n"); 8878 } 8879 8880 } 8881 #endif 8882 8883 /* 8884 * Function: sd_cache_control() 8885 * 8886 * Description: This routine is the driver entry point for setting 8887 * read and write caching by modifying the WCE (write cache 8888 * enable) and RCD (read cache disable) bits of mode 8889 * page 8 (MODEPAGE_CACHING). 8890 * 8891 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 8892 * structure for this target. 8893 * rcd_flag - flag for controlling the read cache 8894 * wce_flag - flag for controlling the write cache 8895 * 8896 * Return Code: EIO 8897 * code returned by sd_send_scsi_MODE_SENSE and 8898 * sd_send_scsi_MODE_SELECT 8899 * 8900 * Context: Kernel Thread 8901 */ 8902 8903 static int 8904 sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag) 8905 { 8906 struct mode_caching *mode_caching_page; 8907 uchar_t *header; 8908 size_t buflen; 8909 int hdrlen; 8910 int bd_len; 8911 int rval = 0; 8912 struct mode_header_grp2 *mhp; 8913 struct sd_lun *un; 8914 int status; 8915 8916 ASSERT(ssc != NULL); 8917 un = ssc->ssc_un; 8918 ASSERT(un != NULL); 8919 8920 /* 8921 * Do a test unit ready, otherwise a mode sense may not work if this 8922 * is the first command sent to the device after boot. 8923 */ 8924 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 8925 if (status != 0) 8926 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 8927 8928 if (un->un_f_cfg_is_atapi == TRUE) { 8929 hdrlen = MODE_HEADER_LENGTH_GRP2; 8930 } else { 8931 hdrlen = MODE_HEADER_LENGTH; 8932 } 8933 8934 /* 8935 * Allocate memory for the retrieved mode page and its headers. Set 8936 * a pointer to the page itself. Use mode_cache_scsi3 to insure 8937 * we get all of the mode sense data otherwise, the mode select 8938 * will fail. mode_cache_scsi3 is a superset of mode_caching. 8939 */ 8940 buflen = hdrlen + MODE_BLK_DESC_LENGTH + 8941 sizeof (struct mode_cache_scsi3); 8942 8943 header = kmem_zalloc(buflen, KM_SLEEP); 8944 8945 /* Get the information from the device. */ 8946 if (un->un_f_cfg_is_atapi == TRUE) { 8947 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, header, buflen, 8948 MODEPAGE_CACHING, SD_PATH_DIRECT); 8949 } else { 8950 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 8951 MODEPAGE_CACHING, SD_PATH_DIRECT); 8952 } 8953 8954 if (rval != 0) { 8955 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 8956 "sd_cache_control: Mode Sense Failed\n"); 8957 goto mode_sense_failed; 8958 } 8959 8960 /* 8961 * Determine size of Block Descriptors in order to locate 8962 * the mode page data. ATAPI devices return 0, SCSI devices 8963 * should return MODE_BLK_DESC_LENGTH. 8964 */ 8965 if (un->un_f_cfg_is_atapi == TRUE) { 8966 mhp = (struct mode_header_grp2 *)header; 8967 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 8968 } else { 8969 bd_len = ((struct mode_header *)header)->bdesc_length; 8970 } 8971 8972 if (bd_len > MODE_BLK_DESC_LENGTH) { 8973 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 0, 8974 "sd_cache_control: Mode Sense returned invalid block " 8975 "descriptor length\n"); 8976 rval = EIO; 8977 goto mode_sense_failed; 8978 } 8979 8980 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 8981 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 8982 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 8983 "sd_cache_control: Mode Sense caching page code mismatch " 8984 "%d\n", mode_caching_page->mode_page.code); 8985 rval = EIO; 8986 goto mode_sense_failed; 8987 } 8988 8989 /* Check the relevant bits on successful mode sense. */ 8990 if ((mode_caching_page->rcd && rcd_flag == SD_CACHE_ENABLE) || 8991 (!mode_caching_page->rcd && rcd_flag == SD_CACHE_DISABLE) || 8992 (mode_caching_page->wce && wce_flag == SD_CACHE_DISABLE) || 8993 (!mode_caching_page->wce && wce_flag == SD_CACHE_ENABLE)) { 8994 8995 size_t sbuflen; 8996 uchar_t save_pg; 8997 8998 /* 8999 * Construct select buffer length based on the 9000 * length of the sense data returned. 9001 */ 9002 sbuflen = hdrlen + MODE_BLK_DESC_LENGTH + 9003 sizeof (struct mode_page) + 9004 (int)mode_caching_page->mode_page.length; 9005 9006 /* 9007 * Set the caching bits as requested. 9008 */ 9009 if (rcd_flag == SD_CACHE_ENABLE) 9010 mode_caching_page->rcd = 0; 9011 else if (rcd_flag == SD_CACHE_DISABLE) 9012 mode_caching_page->rcd = 1; 9013 9014 if (wce_flag == SD_CACHE_ENABLE) 9015 mode_caching_page->wce = 1; 9016 else if (wce_flag == SD_CACHE_DISABLE) 9017 mode_caching_page->wce = 0; 9018 9019 /* 9020 * Save the page if the mode sense says the 9021 * drive supports it. 9022 */ 9023 save_pg = mode_caching_page->mode_page.ps ? 9024 SD_SAVE_PAGE : SD_DONTSAVE_PAGE; 9025 9026 /* Clear reserved bits before mode select. */ 9027 mode_caching_page->mode_page.ps = 0; 9028 9029 /* 9030 * Clear out mode header for mode select. 9031 * The rest of the retrieved page will be reused. 9032 */ 9033 bzero(header, hdrlen); 9034 9035 if (un->un_f_cfg_is_atapi == TRUE) { 9036 mhp = (struct mode_header_grp2 *)header; 9037 mhp->bdesc_length_hi = bd_len >> 8; 9038 mhp->bdesc_length_lo = (uchar_t)bd_len & 0xff; 9039 } else { 9040 ((struct mode_header *)header)->bdesc_length = bd_len; 9041 } 9042 9043 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9044 9045 /* Issue mode select to change the cache settings */ 9046 if (un->un_f_cfg_is_atapi == TRUE) { 9047 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP1, header, 9048 sbuflen, save_pg, SD_PATH_DIRECT); 9049 } else { 9050 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, header, 9051 sbuflen, save_pg, SD_PATH_DIRECT); 9052 } 9053 9054 } 9055 9056 9057 mode_sense_failed: 9058 9059 kmem_free(header, buflen); 9060 9061 if (rval != 0) { 9062 if (rval == EIO) 9063 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 9064 else 9065 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9066 } 9067 return (rval); 9068 } 9069 9070 9071 /* 9072 * Function: sd_get_write_cache_enabled() 9073 * 9074 * Description: This routine is the driver entry point for determining if 9075 * write caching is enabled. It examines the WCE (write cache 9076 * enable) bits of mode page 8 (MODEPAGE_CACHING). 9077 * 9078 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 9079 * structure for this target. 9080 * is_enabled - pointer to int where write cache enabled state 9081 * is returned (non-zero -> write cache enabled) 9082 * 9083 * 9084 * Return Code: EIO 9085 * code returned by sd_send_scsi_MODE_SENSE 9086 * 9087 * Context: Kernel Thread 9088 * 9089 * NOTE: If ioctl is added to disable write cache, this sequence should 9090 * be followed so that no locking is required for accesses to 9091 * un->un_f_write_cache_enabled: 9092 * do mode select to clear wce 9093 * do synchronize cache to flush cache 9094 * set un->un_f_write_cache_enabled = FALSE 9095 * 9096 * Conversely, an ioctl to enable the write cache should be done 9097 * in this order: 9098 * set un->un_f_write_cache_enabled = TRUE 9099 * do mode select to set wce 9100 */ 9101 9102 static int 9103 sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled) 9104 { 9105 struct mode_caching *mode_caching_page; 9106 uchar_t *header; 9107 size_t buflen; 9108 int hdrlen; 9109 int bd_len; 9110 int rval = 0; 9111 struct sd_lun *un; 9112 int status; 9113 9114 ASSERT(ssc != NULL); 9115 un = ssc->ssc_un; 9116 ASSERT(un != NULL); 9117 ASSERT(is_enabled != NULL); 9118 9119 /* in case of error, flag as enabled */ 9120 *is_enabled = TRUE; 9121 9122 /* 9123 * Do a test unit ready, otherwise a mode sense may not work if this 9124 * is the first command sent to the device after boot. 9125 */ 9126 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 9127 9128 if (status != 0) 9129 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9130 9131 if (un->un_f_cfg_is_atapi == TRUE) { 9132 hdrlen = MODE_HEADER_LENGTH_GRP2; 9133 } else { 9134 hdrlen = MODE_HEADER_LENGTH; 9135 } 9136 9137 /* 9138 * Allocate memory for the retrieved mode page and its headers. Set 9139 * a pointer to the page itself. 9140 */ 9141 buflen = hdrlen + MODE_BLK_DESC_LENGTH + sizeof (struct mode_caching); 9142 header = kmem_zalloc(buflen, KM_SLEEP); 9143 9144 /* Get the information from the device. */ 9145 if (un->un_f_cfg_is_atapi == TRUE) { 9146 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, header, buflen, 9147 MODEPAGE_CACHING, SD_PATH_DIRECT); 9148 } else { 9149 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen, 9150 MODEPAGE_CACHING, SD_PATH_DIRECT); 9151 } 9152 9153 if (rval != 0) { 9154 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 9155 "sd_get_write_cache_enabled: Mode Sense Failed\n"); 9156 goto mode_sense_failed; 9157 } 9158 9159 /* 9160 * Determine size of Block Descriptors in order to locate 9161 * the mode page data. ATAPI devices return 0, SCSI devices 9162 * should return MODE_BLK_DESC_LENGTH. 9163 */ 9164 if (un->un_f_cfg_is_atapi == TRUE) { 9165 struct mode_header_grp2 *mhp; 9166 mhp = (struct mode_header_grp2 *)header; 9167 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 9168 } else { 9169 bd_len = ((struct mode_header *)header)->bdesc_length; 9170 } 9171 9172 if (bd_len > MODE_BLK_DESC_LENGTH) { 9173 /* FMA should make upset complain here */ 9174 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 0, 9175 "sd_get_write_cache_enabled: Mode Sense returned invalid " 9176 "block descriptor length\n"); 9177 rval = EIO; 9178 goto mode_sense_failed; 9179 } 9180 9181 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 9182 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) { 9183 /* FMA could make upset complain here */ 9184 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON, 9185 "sd_get_write_cache_enabled: Mode Sense caching page " 9186 "code mismatch %d\n", mode_caching_page->mode_page.code); 9187 rval = EIO; 9188 goto mode_sense_failed; 9189 } 9190 *is_enabled = mode_caching_page->wce; 9191 9192 mode_sense_failed: 9193 if (rval == 0) { 9194 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 9195 } else if (rval == EIO) { 9196 /* 9197 * Some disks do not support mode sense(6), we 9198 * should ignore this kind of error(sense key is 9199 * 0x5 - illegal request). 9200 */ 9201 uint8_t *sensep; 9202 int senlen; 9203 9204 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 9205 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 9206 ssc->ssc_uscsi_cmd->uscsi_rqresid); 9207 9208 if (senlen > 0 && 9209 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 9210 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 9211 } else { 9212 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 9213 } 9214 } else { 9215 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9216 } 9217 kmem_free(header, buflen); 9218 return (rval); 9219 } 9220 9221 /* 9222 * Function: sd_get_nv_sup() 9223 * 9224 * Description: This routine is the driver entry point for 9225 * determining whether non-volatile cache is supported. This 9226 * determination process works as follows: 9227 * 9228 * 1. sd first queries sd.conf on whether 9229 * suppress_cache_flush bit is set for this device. 9230 * 9231 * 2. if not there, then queries the internal disk table. 9232 * 9233 * 3. if either sd.conf or internal disk table specifies 9234 * cache flush be suppressed, we don't bother checking 9235 * NV_SUP bit. 9236 * 9237 * If SUPPRESS_CACHE_FLUSH bit is not set to 1, sd queries 9238 * the optional INQUIRY VPD page 0x86. If the device 9239 * supports VPD page 0x86, sd examines the NV_SUP 9240 * (non-volatile cache support) bit in the INQUIRY VPD page 9241 * 0x86: 9242 * o If NV_SUP bit is set, sd assumes the device has a 9243 * non-volatile cache and set the 9244 * un_f_sync_nv_supported to TRUE. 9245 * o Otherwise cache is not non-volatile, 9246 * un_f_sync_nv_supported is set to FALSE. 9247 * 9248 * Arguments: un - driver soft state (unit) structure 9249 * 9250 * Return Code: 9251 * 9252 * Context: Kernel Thread 9253 */ 9254 9255 static void 9256 sd_get_nv_sup(sd_ssc_t *ssc) 9257 { 9258 int rval = 0; 9259 uchar_t *inq86 = NULL; 9260 size_t inq86_len = MAX_INQUIRY_SIZE; 9261 size_t inq86_resid = 0; 9262 struct dk_callback *dkc; 9263 struct sd_lun *un; 9264 9265 ASSERT(ssc != NULL); 9266 un = ssc->ssc_un; 9267 ASSERT(un != NULL); 9268 9269 mutex_enter(SD_MUTEX(un)); 9270 9271 /* 9272 * Be conservative on the device's support of 9273 * SYNC_NV bit: un_f_sync_nv_supported is 9274 * initialized to be false. 9275 */ 9276 un->un_f_sync_nv_supported = FALSE; 9277 9278 /* 9279 * If either sd.conf or internal disk table 9280 * specifies cache flush be suppressed, then 9281 * we don't bother checking NV_SUP bit. 9282 */ 9283 if (un->un_f_suppress_cache_flush == TRUE) { 9284 mutex_exit(SD_MUTEX(un)); 9285 return; 9286 } 9287 9288 if (sd_check_vpd_page_support(ssc) == 0 && 9289 un->un_vpd_page_mask & SD_VPD_EXTENDED_DATA_PG) { 9290 mutex_exit(SD_MUTEX(un)); 9291 /* collect page 86 data if available */ 9292 inq86 = kmem_zalloc(inq86_len, KM_SLEEP); 9293 9294 rval = sd_send_scsi_INQUIRY(ssc, inq86, inq86_len, 9295 0x01, 0x86, &inq86_resid); 9296 9297 if (rval == 0 && (inq86_len - inq86_resid > 6)) { 9298 SD_TRACE(SD_LOG_COMMON, un, 9299 "sd_get_nv_sup: \ 9300 successfully get VPD page: %x \ 9301 PAGE LENGTH: %x BYTE 6: %x\n", 9302 inq86[1], inq86[3], inq86[6]); 9303 9304 mutex_enter(SD_MUTEX(un)); 9305 /* 9306 * check the value of NV_SUP bit: only if the device 9307 * reports NV_SUP bit to be 1, the 9308 * un_f_sync_nv_supported bit will be set to true. 9309 */ 9310 if (inq86[6] & SD_VPD_NV_SUP) { 9311 un->un_f_sync_nv_supported = TRUE; 9312 } 9313 mutex_exit(SD_MUTEX(un)); 9314 } else if (rval != 0) { 9315 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9316 } 9317 9318 kmem_free(inq86, inq86_len); 9319 } else { 9320 mutex_exit(SD_MUTEX(un)); 9321 } 9322 9323 /* 9324 * Send a SYNC CACHE command to check whether 9325 * SYNC_NV bit is supported. This command should have 9326 * un_f_sync_nv_supported set to correct value. 9327 */ 9328 mutex_enter(SD_MUTEX(un)); 9329 if (un->un_f_sync_nv_supported) { 9330 mutex_exit(SD_MUTEX(un)); 9331 dkc = kmem_zalloc(sizeof (struct dk_callback), KM_SLEEP); 9332 dkc->dkc_flag = FLUSH_VOLATILE; 9333 (void) sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 9334 9335 /* 9336 * Send a TEST UNIT READY command to the device. This should 9337 * clear any outstanding UNIT ATTENTION that may be present. 9338 */ 9339 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR); 9340 if (rval != 0) 9341 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 9342 9343 kmem_free(dkc, sizeof (struct dk_callback)); 9344 } else { 9345 mutex_exit(SD_MUTEX(un)); 9346 } 9347 9348 SD_TRACE(SD_LOG_COMMON, un, "sd_get_nv_sup: \ 9349 un_f_suppress_cache_flush is set to %d\n", 9350 un->un_f_suppress_cache_flush); 9351 } 9352 9353 /* 9354 * Function: sd_make_device 9355 * 9356 * Description: Utility routine to return the Solaris device number from 9357 * the data in the device's dev_info structure. 9358 * 9359 * Return Code: The Solaris device number 9360 * 9361 * Context: Any 9362 */ 9363 9364 static dev_t 9365 sd_make_device(dev_info_t *devi) 9366 { 9367 return (makedevice(ddi_driver_major(devi), 9368 ddi_get_instance(devi) << SDUNIT_SHIFT)); 9369 } 9370 9371 9372 /* 9373 * Function: sd_pm_entry 9374 * 9375 * Description: Called at the start of a new command to manage power 9376 * and busy status of a device. This includes determining whether 9377 * the current power state of the device is sufficient for 9378 * performing the command or whether it must be changed. 9379 * The PM framework is notified appropriately. 9380 * Only with a return status of DDI_SUCCESS will the 9381 * component be busy to the framework. 9382 * 9383 * All callers of sd_pm_entry must check the return status 9384 * and only call sd_pm_exit it it was DDI_SUCCESS. A status 9385 * of DDI_FAILURE indicates the device failed to power up. 9386 * In this case un_pm_count has been adjusted so the result 9387 * on exit is still powered down, ie. count is less than 0. 9388 * Calling sd_pm_exit with this count value hits an ASSERT. 9389 * 9390 * Return Code: DDI_SUCCESS or DDI_FAILURE 9391 * 9392 * Context: Kernel thread context. 9393 */ 9394 9395 static int 9396 sd_pm_entry(struct sd_lun *un) 9397 { 9398 int return_status = DDI_SUCCESS; 9399 9400 ASSERT(!mutex_owned(SD_MUTEX(un))); 9401 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9402 9403 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n"); 9404 9405 if (un->un_f_pm_is_enabled == FALSE) { 9406 SD_TRACE(SD_LOG_IO_PM, un, 9407 "sd_pm_entry: exiting, PM not enabled\n"); 9408 return (return_status); 9409 } 9410 9411 /* 9412 * Just increment a counter if PM is enabled. On the transition from 9413 * 0 ==> 1, mark the device as busy. The iodone side will decrement 9414 * the count with each IO and mark the device as idle when the count 9415 * hits 0. 9416 * 9417 * If the count is less than 0 the device is powered down. If a powered 9418 * down device is successfully powered up then the count must be 9419 * incremented to reflect the power up. Note that it'll get incremented 9420 * a second time to become busy. 9421 * 9422 * Because the following has the potential to change the device state 9423 * and must release the un_pm_mutex to do so, only one thread can be 9424 * allowed through at a time. 9425 */ 9426 9427 mutex_enter(&un->un_pm_mutex); 9428 while (un->un_pm_busy == TRUE) { 9429 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex); 9430 } 9431 un->un_pm_busy = TRUE; 9432 9433 if (un->un_pm_count < 1) { 9434 9435 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n"); 9436 9437 /* 9438 * Indicate we are now busy so the framework won't attempt to 9439 * power down the device. This call will only fail if either 9440 * we passed a bad component number or the device has no 9441 * components. Neither of these should ever happen. 9442 */ 9443 mutex_exit(&un->un_pm_mutex); 9444 return_status = pm_busy_component(SD_DEVINFO(un), 0); 9445 ASSERT(return_status == DDI_SUCCESS); 9446 9447 mutex_enter(&un->un_pm_mutex); 9448 9449 if (un->un_pm_count < 0) { 9450 mutex_exit(&un->un_pm_mutex); 9451 9452 SD_TRACE(SD_LOG_IO_PM, un, 9453 "sd_pm_entry: power up component\n"); 9454 9455 /* 9456 * pm_raise_power will cause sdpower to be called 9457 * which brings the device power level to the 9458 * desired state, ON in this case. If successful, 9459 * un_pm_count and un_power_level will be updated 9460 * appropriately. 9461 */ 9462 return_status = pm_raise_power(SD_DEVINFO(un), 0, 9463 SD_SPINDLE_ON); 9464 9465 mutex_enter(&un->un_pm_mutex); 9466 9467 if (return_status != DDI_SUCCESS) { 9468 /* 9469 * Power up failed. 9470 * Idle the device and adjust the count 9471 * so the result on exit is that we're 9472 * still powered down, ie. count is less than 0. 9473 */ 9474 SD_TRACE(SD_LOG_IO_PM, un, 9475 "sd_pm_entry: power up failed," 9476 " idle the component\n"); 9477 9478 (void) pm_idle_component(SD_DEVINFO(un), 0); 9479 un->un_pm_count--; 9480 } else { 9481 /* 9482 * Device is powered up, verify the 9483 * count is non-negative. 9484 * This is debug only. 9485 */ 9486 ASSERT(un->un_pm_count == 0); 9487 } 9488 } 9489 9490 if (return_status == DDI_SUCCESS) { 9491 /* 9492 * For performance, now that the device has been tagged 9493 * as busy, and it's known to be powered up, update the 9494 * chain types to use jump tables that do not include 9495 * pm. This significantly lowers the overhead and 9496 * therefore improves performance. 9497 */ 9498 9499 mutex_exit(&un->un_pm_mutex); 9500 mutex_enter(SD_MUTEX(un)); 9501 SD_TRACE(SD_LOG_IO_PM, un, 9502 "sd_pm_entry: changing uscsi_chain_type from %d\n", 9503 un->un_uscsi_chain_type); 9504 9505 if (un->un_f_non_devbsize_supported) { 9506 un->un_buf_chain_type = 9507 SD_CHAIN_INFO_RMMEDIA_NO_PM; 9508 } else { 9509 un->un_buf_chain_type = 9510 SD_CHAIN_INFO_DISK_NO_PM; 9511 } 9512 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 9513 9514 SD_TRACE(SD_LOG_IO_PM, un, 9515 " changed uscsi_chain_type to %d\n", 9516 un->un_uscsi_chain_type); 9517 mutex_exit(SD_MUTEX(un)); 9518 mutex_enter(&un->un_pm_mutex); 9519 9520 if (un->un_pm_idle_timeid == NULL) { 9521 /* 300 ms. */ 9522 un->un_pm_idle_timeid = 9523 timeout(sd_pm_idletimeout_handler, un, 9524 (drv_usectohz((clock_t)300000))); 9525 /* 9526 * Include an extra call to busy which keeps the 9527 * device busy with-respect-to the PM layer 9528 * until the timer fires, at which time it'll 9529 * get the extra idle call. 9530 */ 9531 (void) pm_busy_component(SD_DEVINFO(un), 0); 9532 } 9533 } 9534 } 9535 un->un_pm_busy = FALSE; 9536 /* Next... */ 9537 cv_signal(&un->un_pm_busy_cv); 9538 9539 un->un_pm_count++; 9540 9541 SD_TRACE(SD_LOG_IO_PM, un, 9542 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count); 9543 9544 mutex_exit(&un->un_pm_mutex); 9545 9546 return (return_status); 9547 } 9548 9549 9550 /* 9551 * Function: sd_pm_exit 9552 * 9553 * Description: Called at the completion of a command to manage busy 9554 * status for the device. If the device becomes idle the 9555 * PM framework is notified. 9556 * 9557 * Context: Kernel thread context 9558 */ 9559 9560 static void 9561 sd_pm_exit(struct sd_lun *un) 9562 { 9563 ASSERT(!mutex_owned(SD_MUTEX(un))); 9564 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9565 9566 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n"); 9567 9568 /* 9569 * After attach the following flag is only read, so don't 9570 * take the penalty of acquiring a mutex for it. 9571 */ 9572 if (un->un_f_pm_is_enabled == TRUE) { 9573 9574 mutex_enter(&un->un_pm_mutex); 9575 un->un_pm_count--; 9576 9577 SD_TRACE(SD_LOG_IO_PM, un, 9578 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count); 9579 9580 ASSERT(un->un_pm_count >= 0); 9581 if (un->un_pm_count == 0) { 9582 mutex_exit(&un->un_pm_mutex); 9583 9584 SD_TRACE(SD_LOG_IO_PM, un, 9585 "sd_pm_exit: idle component\n"); 9586 9587 (void) pm_idle_component(SD_DEVINFO(un), 0); 9588 9589 } else { 9590 mutex_exit(&un->un_pm_mutex); 9591 } 9592 } 9593 9594 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n"); 9595 } 9596 9597 9598 /* 9599 * Function: sdopen 9600 * 9601 * Description: Driver's open(9e) entry point function. 9602 * 9603 * Arguments: dev_i - pointer to device number 9604 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE) 9605 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9606 * cred_p - user credential pointer 9607 * 9608 * Return Code: EINVAL 9609 * ENXIO 9610 * EIO 9611 * EROFS 9612 * EBUSY 9613 * 9614 * Context: Kernel thread context 9615 */ 9616 /* ARGSUSED */ 9617 static int 9618 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 9619 { 9620 struct sd_lun *un; 9621 int nodelay; 9622 int part; 9623 uint64_t partmask; 9624 int instance; 9625 dev_t dev; 9626 int rval = EIO; 9627 diskaddr_t nblks = 0; 9628 diskaddr_t label_cap; 9629 9630 /* Validate the open type */ 9631 if (otyp >= OTYPCNT) { 9632 return (EINVAL); 9633 } 9634 9635 dev = *dev_p; 9636 instance = SDUNIT(dev); 9637 mutex_enter(&sd_detach_mutex); 9638 9639 /* 9640 * Fail the open if there is no softstate for the instance, or 9641 * if another thread somewhere is trying to detach the instance. 9642 */ 9643 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 9644 (un->un_detach_count != 0)) { 9645 mutex_exit(&sd_detach_mutex); 9646 /* 9647 * The probe cache only needs to be cleared when open (9e) fails 9648 * with ENXIO (4238046). 9649 */ 9650 /* 9651 * un-conditionally clearing probe cache is ok with 9652 * separate sd/ssd binaries 9653 * x86 platform can be an issue with both parallel 9654 * and fibre in 1 binary 9655 */ 9656 sd_scsi_clear_probe_cache(); 9657 return (ENXIO); 9658 } 9659 9660 /* 9661 * The un_layer_count is to prevent another thread in specfs from 9662 * trying to detach the instance, which can happen when we are 9663 * called from a higher-layer driver instead of thru specfs. 9664 * This will not be needed when DDI provides a layered driver 9665 * interface that allows specfs to know that an instance is in 9666 * use by a layered driver & should not be detached. 9667 * 9668 * Note: the semantics for layered driver opens are exactly one 9669 * close for every open. 9670 */ 9671 if (otyp == OTYP_LYR) { 9672 un->un_layer_count++; 9673 } 9674 9675 /* 9676 * Keep a count of the current # of opens in progress. This is because 9677 * some layered drivers try to call us as a regular open. This can 9678 * cause problems that we cannot prevent, however by keeping this count 9679 * we can at least keep our open and detach routines from racing against 9680 * each other under such conditions. 9681 */ 9682 un->un_opens_in_progress++; 9683 mutex_exit(&sd_detach_mutex); 9684 9685 nodelay = (flag & (FNDELAY | FNONBLOCK)); 9686 part = SDPART(dev); 9687 partmask = 1 << part; 9688 9689 /* 9690 * We use a semaphore here in order to serialize 9691 * open and close requests on the device. 9692 */ 9693 sema_p(&un->un_semoclose); 9694 9695 mutex_enter(SD_MUTEX(un)); 9696 9697 /* 9698 * All device accesses go thru sdstrategy() where we check 9699 * on suspend status but there could be a scsi_poll command, 9700 * which bypasses sdstrategy(), so we need to check pm 9701 * status. 9702 */ 9703 9704 if (!nodelay) { 9705 while ((un->un_state == SD_STATE_SUSPENDED) || 9706 (un->un_state == SD_STATE_PM_CHANGING)) { 9707 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9708 } 9709 9710 mutex_exit(SD_MUTEX(un)); 9711 if (sd_pm_entry(un) != DDI_SUCCESS) { 9712 rval = EIO; 9713 SD_ERROR(SD_LOG_OPEN_CLOSE, un, 9714 "sdopen: sd_pm_entry failed\n"); 9715 goto open_failed_with_pm; 9716 } 9717 mutex_enter(SD_MUTEX(un)); 9718 } 9719 9720 /* check for previous exclusive open */ 9721 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); 9722 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9723 "sdopen: exclopen=%x, flag=%x, regopen=%x\n", 9724 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 9725 9726 if (un->un_exclopen & (partmask)) { 9727 goto excl_open_fail; 9728 } 9729 9730 if (flag & FEXCL) { 9731 int i; 9732 if (un->un_ocmap.lyropen[part]) { 9733 goto excl_open_fail; 9734 } 9735 for (i = 0; i < (OTYPCNT - 1); i++) { 9736 if (un->un_ocmap.regopen[i] & (partmask)) { 9737 goto excl_open_fail; 9738 } 9739 } 9740 } 9741 9742 /* 9743 * Check the write permission if this is a removable media device, 9744 * NDELAY has not been set, and writable permission is requested. 9745 * 9746 * Note: If NDELAY was set and this is write-protected media the WRITE 9747 * attempt will fail with EIO as part of the I/O processing. This is a 9748 * more permissive implementation that allows the open to succeed and 9749 * WRITE attempts to fail when appropriate. 9750 */ 9751 if (un->un_f_chk_wp_open) { 9752 if ((flag & FWRITE) && (!nodelay)) { 9753 mutex_exit(SD_MUTEX(un)); 9754 /* 9755 * Defer the check for write permission on writable 9756 * DVD drive till sdstrategy and will not fail open even 9757 * if FWRITE is set as the device can be writable 9758 * depending upon the media and the media can change 9759 * after the call to open(). 9760 */ 9761 if (un->un_f_dvdram_writable_device == FALSE) { 9762 if (ISCD(un) || sr_check_wp(dev)) { 9763 rval = EROFS; 9764 mutex_enter(SD_MUTEX(un)); 9765 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9766 "write to cd or write protected media\n"); 9767 goto open_fail; 9768 } 9769 } 9770 mutex_enter(SD_MUTEX(un)); 9771 } 9772 } 9773 9774 /* 9775 * If opening in NDELAY/NONBLOCK mode, just return. 9776 * Check if disk is ready and has a valid geometry later. 9777 */ 9778 if (!nodelay) { 9779 sd_ssc_t *ssc; 9780 9781 mutex_exit(SD_MUTEX(un)); 9782 ssc = sd_ssc_init(un); 9783 rval = sd_ready_and_valid(ssc, part); 9784 sd_ssc_fini(ssc); 9785 mutex_enter(SD_MUTEX(un)); 9786 /* 9787 * Fail if device is not ready or if the number of disk 9788 * blocks is zero or negative for non CD devices. 9789 */ 9790 9791 nblks = 0; 9792 9793 if (rval == SD_READY_VALID && (!ISCD(un))) { 9794 /* if cmlb_partinfo fails, nblks remains 0 */ 9795 mutex_exit(SD_MUTEX(un)); 9796 (void) cmlb_partinfo(un->un_cmlbhandle, part, &nblks, 9797 NULL, NULL, NULL, (void *)SD_PATH_DIRECT); 9798 mutex_enter(SD_MUTEX(un)); 9799 } 9800 9801 if ((rval != SD_READY_VALID) || 9802 (!ISCD(un) && nblks <= 0)) { 9803 rval = un->un_f_has_removable_media ? ENXIO : EIO; 9804 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9805 "device not ready or invalid disk block value\n"); 9806 goto open_fail; 9807 } 9808 #if defined(__i386) || defined(__amd64) 9809 } else { 9810 uchar_t *cp; 9811 /* 9812 * x86 requires special nodelay handling, so that p0 is 9813 * always defined and accessible. 9814 * Invalidate geometry only if device is not already open. 9815 */ 9816 cp = &un->un_ocmap.chkd[0]; 9817 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9818 if (*cp != (uchar_t)0) { 9819 break; 9820 } 9821 cp++; 9822 } 9823 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9824 mutex_exit(SD_MUTEX(un)); 9825 cmlb_invalidate(un->un_cmlbhandle, 9826 (void *)SD_PATH_DIRECT); 9827 mutex_enter(SD_MUTEX(un)); 9828 } 9829 9830 #endif 9831 } 9832 9833 if (otyp == OTYP_LYR) { 9834 un->un_ocmap.lyropen[part]++; 9835 } else { 9836 un->un_ocmap.regopen[otyp] |= partmask; 9837 } 9838 9839 /* Set up open and exclusive open flags */ 9840 if (flag & FEXCL) { 9841 un->un_exclopen |= (partmask); 9842 } 9843 9844 /* 9845 * If the lun is EFI labeled and lun capacity is greater than the 9846 * capacity contained in the label, log a sys-event to notify the 9847 * interested module. 9848 * To avoid an infinite loop of logging sys-event, we only log the 9849 * event when the lun is not opened in NDELAY mode. The event handler 9850 * should open the lun in NDELAY mode. 9851 */ 9852 if (!(flag & FNDELAY)) { 9853 mutex_exit(SD_MUTEX(un)); 9854 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 9855 (void*)SD_PATH_DIRECT) == 0) { 9856 mutex_enter(SD_MUTEX(un)); 9857 if (un->un_f_blockcount_is_valid && 9858 un->un_blockcount > label_cap) { 9859 mutex_exit(SD_MUTEX(un)); 9860 sd_log_lun_expansion_event(un, 9861 (nodelay ? KM_NOSLEEP : KM_SLEEP)); 9862 mutex_enter(SD_MUTEX(un)); 9863 } 9864 } else { 9865 mutex_enter(SD_MUTEX(un)); 9866 } 9867 } 9868 9869 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9870 "open of part %d type %d\n", part, otyp); 9871 9872 mutex_exit(SD_MUTEX(un)); 9873 if (!nodelay) { 9874 sd_pm_exit(un); 9875 } 9876 9877 sema_v(&un->un_semoclose); 9878 9879 mutex_enter(&sd_detach_mutex); 9880 un->un_opens_in_progress--; 9881 mutex_exit(&sd_detach_mutex); 9882 9883 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n"); 9884 return (DDI_SUCCESS); 9885 9886 excl_open_fail: 9887 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n"); 9888 rval = EBUSY; 9889 9890 open_fail: 9891 mutex_exit(SD_MUTEX(un)); 9892 9893 /* 9894 * On a failed open we must exit the pm management. 9895 */ 9896 if (!nodelay) { 9897 sd_pm_exit(un); 9898 } 9899 open_failed_with_pm: 9900 sema_v(&un->un_semoclose); 9901 9902 mutex_enter(&sd_detach_mutex); 9903 un->un_opens_in_progress--; 9904 if (otyp == OTYP_LYR) { 9905 un->un_layer_count--; 9906 } 9907 mutex_exit(&sd_detach_mutex); 9908 9909 return (rval); 9910 } 9911 9912 9913 /* 9914 * Function: sdclose 9915 * 9916 * Description: Driver's close(9e) entry point function. 9917 * 9918 * Arguments: dev - device number 9919 * flag - file status flag, informational only 9920 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9921 * cred_p - user credential pointer 9922 * 9923 * Return Code: ENXIO 9924 * 9925 * Context: Kernel thread context 9926 */ 9927 /* ARGSUSED */ 9928 static int 9929 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 9930 { 9931 struct sd_lun *un; 9932 uchar_t *cp; 9933 int part; 9934 int nodelay; 9935 int rval = 0; 9936 9937 /* Validate the open type */ 9938 if (otyp >= OTYPCNT) { 9939 return (ENXIO); 9940 } 9941 9942 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 9943 return (ENXIO); 9944 } 9945 9946 part = SDPART(dev); 9947 nodelay = flag & (FNDELAY | FNONBLOCK); 9948 9949 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9950 "sdclose: close of part %d type %d\n", part, otyp); 9951 9952 /* 9953 * We use a semaphore here in order to serialize 9954 * open and close requests on the device. 9955 */ 9956 sema_p(&un->un_semoclose); 9957 9958 mutex_enter(SD_MUTEX(un)); 9959 9960 /* Don't proceed if power is being changed. */ 9961 while (un->un_state == SD_STATE_PM_CHANGING) { 9962 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9963 } 9964 9965 if (un->un_exclopen & (1 << part)) { 9966 un->un_exclopen &= ~(1 << part); 9967 } 9968 9969 /* Update the open partition map */ 9970 if (otyp == OTYP_LYR) { 9971 un->un_ocmap.lyropen[part] -= 1; 9972 } else { 9973 un->un_ocmap.regopen[otyp] &= ~(1 << part); 9974 } 9975 9976 cp = &un->un_ocmap.chkd[0]; 9977 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 9978 if (*cp != NULL) { 9979 break; 9980 } 9981 cp++; 9982 } 9983 9984 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 9985 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n"); 9986 9987 /* 9988 * We avoid persistance upon the last close, and set 9989 * the throttle back to the maximum. 9990 */ 9991 un->un_throttle = un->un_saved_throttle; 9992 9993 if (un->un_state == SD_STATE_OFFLINE) { 9994 if (un->un_f_is_fibre == FALSE) { 9995 scsi_log(SD_DEVINFO(un), sd_label, 9996 CE_WARN, "offline\n"); 9997 } 9998 mutex_exit(SD_MUTEX(un)); 9999 cmlb_invalidate(un->un_cmlbhandle, 10000 (void *)SD_PATH_DIRECT); 10001 mutex_enter(SD_MUTEX(un)); 10002 10003 } else { 10004 /* 10005 * Flush any outstanding writes in NVRAM cache. 10006 * Note: SYNCHRONIZE CACHE is an optional SCSI-2 10007 * cmd, it may not work for non-Pluto devices. 10008 * SYNCHRONIZE CACHE is not required for removables, 10009 * except DVD-RAM drives. 10010 * 10011 * Also note: because SYNCHRONIZE CACHE is currently 10012 * the only command issued here that requires the 10013 * drive be powered up, only do the power up before 10014 * sending the Sync Cache command. If additional 10015 * commands are added which require a powered up 10016 * drive, the following sequence may have to change. 10017 * 10018 * And finally, note that parallel SCSI on SPARC 10019 * only issues a Sync Cache to DVD-RAM, a newly 10020 * supported device. 10021 */ 10022 #if defined(__i386) || defined(__amd64) 10023 if ((un->un_f_sync_cache_supported && 10024 un->un_f_sync_cache_required) || 10025 un->un_f_dvdram_writable_device == TRUE) { 10026 #else 10027 if (un->un_f_dvdram_writable_device == TRUE) { 10028 #endif 10029 mutex_exit(SD_MUTEX(un)); 10030 if (sd_pm_entry(un) == DDI_SUCCESS) { 10031 rval = 10032 sd_send_scsi_SYNCHRONIZE_CACHE(un, 10033 NULL); 10034 /* ignore error if not supported */ 10035 if (rval == ENOTSUP) { 10036 rval = 0; 10037 } else if (rval != 0) { 10038 rval = EIO; 10039 } 10040 sd_pm_exit(un); 10041 } else { 10042 rval = EIO; 10043 } 10044 mutex_enter(SD_MUTEX(un)); 10045 } 10046 10047 /* 10048 * For devices which supports DOOR_LOCK, send an ALLOW 10049 * MEDIA REMOVAL command, but don't get upset if it 10050 * fails. We need to raise the power of the drive before 10051 * we can call sd_send_scsi_DOORLOCK() 10052 */ 10053 if (un->un_f_doorlock_supported) { 10054 mutex_exit(SD_MUTEX(un)); 10055 if (sd_pm_entry(un) == DDI_SUCCESS) { 10056 sd_ssc_t *ssc; 10057 10058 ssc = sd_ssc_init(un); 10059 rval = sd_send_scsi_DOORLOCK(ssc, 10060 SD_REMOVAL_ALLOW, SD_PATH_DIRECT); 10061 if (rval != 0) 10062 sd_ssc_assessment(ssc, 10063 SD_FMT_IGNORE); 10064 sd_ssc_fini(ssc); 10065 10066 sd_pm_exit(un); 10067 if (ISCD(un) && (rval != 0) && 10068 (nodelay != 0)) { 10069 rval = ENXIO; 10070 } 10071 } else { 10072 rval = EIO; 10073 } 10074 mutex_enter(SD_MUTEX(un)); 10075 } 10076 10077 /* 10078 * If a device has removable media, invalidate all 10079 * parameters related to media, such as geometry, 10080 * blocksize, and blockcount. 10081 */ 10082 if (un->un_f_has_removable_media) { 10083 sr_ejected(un); 10084 } 10085 10086 /* 10087 * Destroy the cache (if it exists) which was 10088 * allocated for the write maps since this is 10089 * the last close for this media. 10090 */ 10091 if (un->un_wm_cache) { 10092 /* 10093 * Check if there are pending commands. 10094 * and if there are give a warning and 10095 * do not destroy the cache. 10096 */ 10097 if (un->un_ncmds_in_driver > 0) { 10098 scsi_log(SD_DEVINFO(un), 10099 sd_label, CE_WARN, 10100 "Unable to clean up memory " 10101 "because of pending I/O\n"); 10102 } else { 10103 kmem_cache_destroy( 10104 un->un_wm_cache); 10105 un->un_wm_cache = NULL; 10106 } 10107 } 10108 } 10109 } 10110 10111 mutex_exit(SD_MUTEX(un)); 10112 sema_v(&un->un_semoclose); 10113 10114 if (otyp == OTYP_LYR) { 10115 mutex_enter(&sd_detach_mutex); 10116 /* 10117 * The detach routine may run when the layer count 10118 * drops to zero. 10119 */ 10120 un->un_layer_count--; 10121 mutex_exit(&sd_detach_mutex); 10122 } 10123 10124 return (rval); 10125 } 10126 10127 10128 /* 10129 * Function: sd_ready_and_valid 10130 * 10131 * Description: Test if device is ready and has a valid geometry. 10132 * 10133 * Arguments: ssc - sd_ssc_t will contain un 10134 * un - driver soft state (unit) structure 10135 * 10136 * Return Code: SD_READY_VALID ready and valid label 10137 * SD_NOT_READY_VALID not ready, no label 10138 * SD_RESERVED_BY_OTHERS reservation conflict 10139 * 10140 * Context: Never called at interrupt context. 10141 */ 10142 10143 static int 10144 sd_ready_and_valid(sd_ssc_t *ssc, int part) 10145 { 10146 struct sd_errstats *stp; 10147 uint64_t capacity; 10148 uint_t lbasize; 10149 int rval = SD_READY_VALID; 10150 char name_str[48]; 10151 int is_valid; 10152 struct sd_lun *un; 10153 int status; 10154 10155 ASSERT(ssc != NULL); 10156 un = ssc->ssc_un; 10157 ASSERT(un != NULL); 10158 ASSERT(!mutex_owned(SD_MUTEX(un))); 10159 10160 mutex_enter(SD_MUTEX(un)); 10161 /* 10162 * If a device has removable media, we must check if media is 10163 * ready when checking if this device is ready and valid. 10164 */ 10165 if (un->un_f_has_removable_media) { 10166 mutex_exit(SD_MUTEX(un)); 10167 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10168 10169 if (status != 0) { 10170 rval = SD_NOT_READY_VALID; 10171 mutex_enter(SD_MUTEX(un)); 10172 10173 /* Ignore all failed status for removalbe media */ 10174 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10175 10176 goto done; 10177 } 10178 10179 is_valid = SD_IS_VALID_LABEL(un); 10180 mutex_enter(SD_MUTEX(un)); 10181 if (!is_valid || 10182 (un->un_f_blockcount_is_valid == FALSE) || 10183 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 10184 10185 /* capacity has to be read every open. */ 10186 mutex_exit(SD_MUTEX(un)); 10187 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity, 10188 &lbasize, SD_PATH_DIRECT); 10189 10190 if (status != 0) { 10191 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10192 10193 cmlb_invalidate(un->un_cmlbhandle, 10194 (void *)SD_PATH_DIRECT); 10195 mutex_enter(SD_MUTEX(un)); 10196 rval = SD_NOT_READY_VALID; 10197 10198 goto done; 10199 } else { 10200 mutex_enter(SD_MUTEX(un)); 10201 sd_update_block_info(un, lbasize, capacity); 10202 } 10203 } 10204 10205 /* 10206 * Check if the media in the device is writable or not. 10207 */ 10208 if (!is_valid && ISCD(un)) { 10209 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT); 10210 } 10211 10212 } else { 10213 /* 10214 * Do a test unit ready to clear any unit attention from non-cd 10215 * devices. 10216 */ 10217 mutex_exit(SD_MUTEX(un)); 10218 10219 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10220 if (status != 0) { 10221 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10222 } 10223 10224 mutex_enter(SD_MUTEX(un)); 10225 } 10226 10227 10228 /* 10229 * If this is a non 512 block device, allocate space for 10230 * the wmap cache. This is being done here since every time 10231 * a media is changed this routine will be called and the 10232 * block size is a function of media rather than device. 10233 */ 10234 if (un->un_f_non_devbsize_supported && NOT_DEVBSIZE(un)) { 10235 if (!(un->un_wm_cache)) { 10236 (void) snprintf(name_str, sizeof (name_str), 10237 "%s%d_cache", 10238 ddi_driver_name(SD_DEVINFO(un)), 10239 ddi_get_instance(SD_DEVINFO(un))); 10240 un->un_wm_cache = kmem_cache_create( 10241 name_str, sizeof (struct sd_w_map), 10242 8, sd_wm_cache_constructor, 10243 sd_wm_cache_destructor, NULL, 10244 (void *)un, NULL, 0); 10245 if (!(un->un_wm_cache)) { 10246 rval = ENOMEM; 10247 goto done; 10248 } 10249 } 10250 } 10251 10252 if (un->un_state == SD_STATE_NORMAL) { 10253 /* 10254 * If the target is not yet ready here (defined by a TUR 10255 * failure), invalidate the geometry and print an 'offline' 10256 * message. This is a legacy message, as the state of the 10257 * target is not actually changed to SD_STATE_OFFLINE. 10258 * 10259 * If the TUR fails for EACCES (Reservation Conflict), 10260 * SD_RESERVED_BY_OTHERS will be returned to indicate 10261 * reservation conflict. If the TUR fails for other 10262 * reasons, SD_NOT_READY_VALID will be returned. 10263 */ 10264 int err; 10265 10266 mutex_exit(SD_MUTEX(un)); 10267 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 10268 mutex_enter(SD_MUTEX(un)); 10269 10270 if (err != 0) { 10271 mutex_exit(SD_MUTEX(un)); 10272 cmlb_invalidate(un->un_cmlbhandle, 10273 (void *)SD_PATH_DIRECT); 10274 mutex_enter(SD_MUTEX(un)); 10275 if (err == EACCES) { 10276 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10277 "reservation conflict\n"); 10278 rval = SD_RESERVED_BY_OTHERS; 10279 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10280 } else { 10281 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10282 "drive offline\n"); 10283 rval = SD_NOT_READY_VALID; 10284 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 10285 } 10286 goto done; 10287 } 10288 } 10289 10290 if (un->un_f_format_in_progress == FALSE) { 10291 mutex_exit(SD_MUTEX(un)); 10292 10293 (void) cmlb_validate(un->un_cmlbhandle, 0, 10294 (void *)SD_PATH_DIRECT); 10295 if (cmlb_partinfo(un->un_cmlbhandle, part, NULL, NULL, NULL, 10296 NULL, (void *) SD_PATH_DIRECT) != 0) { 10297 rval = SD_NOT_READY_VALID; 10298 mutex_enter(SD_MUTEX(un)); 10299 10300 goto done; 10301 } 10302 if (un->un_f_pkstats_enabled) { 10303 sd_set_pstats(un); 10304 SD_TRACE(SD_LOG_IO_PARTITION, un, 10305 "sd_ready_and_valid: un:0x%p pstats created and " 10306 "set\n", un); 10307 } 10308 mutex_enter(SD_MUTEX(un)); 10309 } 10310 10311 /* 10312 * If this device supports DOOR_LOCK command, try and send 10313 * this command to PREVENT MEDIA REMOVAL, but don't get upset 10314 * if it fails. For a CD, however, it is an error 10315 */ 10316 if (un->un_f_doorlock_supported) { 10317 mutex_exit(SD_MUTEX(un)); 10318 status = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 10319 SD_PATH_DIRECT); 10320 10321 if ((status != 0) && ISCD(un)) { 10322 rval = SD_NOT_READY_VALID; 10323 mutex_enter(SD_MUTEX(un)); 10324 10325 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10326 10327 goto done; 10328 } else if (status != 0) 10329 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 10330 mutex_enter(SD_MUTEX(un)); 10331 } 10332 10333 /* The state has changed, inform the media watch routines */ 10334 un->un_mediastate = DKIO_INSERTED; 10335 cv_broadcast(&un->un_state_cv); 10336 rval = SD_READY_VALID; 10337 10338 done: 10339 10340 /* 10341 * Initialize the capacity kstat value, if no media previously 10342 * (capacity kstat is 0) and a media has been inserted 10343 * (un_blockcount > 0). 10344 */ 10345 if (un->un_errstats != NULL) { 10346 stp = (struct sd_errstats *)un->un_errstats->ks_data; 10347 if ((stp->sd_capacity.value.ui64 == 0) && 10348 (un->un_f_blockcount_is_valid == TRUE)) { 10349 stp->sd_capacity.value.ui64 = 10350 (uint64_t)((uint64_t)un->un_blockcount * 10351 un->un_sys_blocksize); 10352 } 10353 } 10354 10355 mutex_exit(SD_MUTEX(un)); 10356 return (rval); 10357 } 10358 10359 10360 /* 10361 * Function: sdmin 10362 * 10363 * Description: Routine to limit the size of a data transfer. Used in 10364 * conjunction with physio(9F). 10365 * 10366 * Arguments: bp - pointer to the indicated buf(9S) struct. 10367 * 10368 * Context: Kernel thread context. 10369 */ 10370 10371 static void 10372 sdmin(struct buf *bp) 10373 { 10374 struct sd_lun *un; 10375 int instance; 10376 10377 instance = SDUNIT(bp->b_edev); 10378 10379 un = ddi_get_soft_state(sd_state, instance); 10380 ASSERT(un != NULL); 10381 10382 /* 10383 * We depend on DMA partial or buf breakup to restrict 10384 * IO size if any of them enabled. 10385 */ 10386 if (un->un_partial_dma_supported || 10387 un->un_buf_breakup_supported) { 10388 return; 10389 } 10390 10391 if (bp->b_bcount > un->un_max_xfer_size) { 10392 bp->b_bcount = un->un_max_xfer_size; 10393 } 10394 } 10395 10396 10397 /* 10398 * Function: sdread 10399 * 10400 * Description: Driver's read(9e) entry point function. 10401 * 10402 * Arguments: dev - device number 10403 * uio - structure pointer describing where data is to be stored 10404 * in user's space 10405 * cred_p - user credential pointer 10406 * 10407 * Return Code: ENXIO 10408 * EIO 10409 * EINVAL 10410 * value returned by physio 10411 * 10412 * Context: Kernel thread context. 10413 */ 10414 /* ARGSUSED */ 10415 static int 10416 sdread(dev_t dev, struct uio *uio, cred_t *cred_p) 10417 { 10418 struct sd_lun *un = NULL; 10419 int secmask; 10420 int err = 0; 10421 sd_ssc_t *ssc; 10422 10423 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10424 return (ENXIO); 10425 } 10426 10427 ASSERT(!mutex_owned(SD_MUTEX(un))); 10428 10429 10430 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10431 mutex_enter(SD_MUTEX(un)); 10432 /* 10433 * Because the call to sd_ready_and_valid will issue I/O we 10434 * must wait here if either the device is suspended or 10435 * if it's power level is changing. 10436 */ 10437 while ((un->un_state == SD_STATE_SUSPENDED) || 10438 (un->un_state == SD_STATE_PM_CHANGING)) { 10439 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10440 } 10441 un->un_ncmds_in_driver++; 10442 mutex_exit(SD_MUTEX(un)); 10443 10444 /* Initialize sd_ssc_t for internal uscsi commands */ 10445 ssc = sd_ssc_init(un); 10446 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10447 err = EIO; 10448 } else { 10449 err = 0; 10450 } 10451 sd_ssc_fini(ssc); 10452 10453 mutex_enter(SD_MUTEX(un)); 10454 un->un_ncmds_in_driver--; 10455 ASSERT(un->un_ncmds_in_driver >= 0); 10456 mutex_exit(SD_MUTEX(un)); 10457 if (err != 0) 10458 return (err); 10459 } 10460 10461 /* 10462 * Read requests are restricted to multiples of the system block size. 10463 */ 10464 secmask = un->un_sys_blocksize - 1; 10465 10466 if (uio->uio_loffset & ((offset_t)(secmask))) { 10467 SD_ERROR(SD_LOG_READ_WRITE, un, 10468 "sdread: file offset not modulo %d\n", 10469 un->un_sys_blocksize); 10470 err = EINVAL; 10471 } else if (uio->uio_iov->iov_len & (secmask)) { 10472 SD_ERROR(SD_LOG_READ_WRITE, un, 10473 "sdread: transfer length not modulo %d\n", 10474 un->un_sys_blocksize); 10475 err = EINVAL; 10476 } else { 10477 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio); 10478 } 10479 10480 return (err); 10481 } 10482 10483 10484 /* 10485 * Function: sdwrite 10486 * 10487 * Description: Driver's write(9e) entry point function. 10488 * 10489 * Arguments: dev - device number 10490 * uio - structure pointer describing where data is stored in 10491 * user's space 10492 * cred_p - user credential pointer 10493 * 10494 * Return Code: ENXIO 10495 * EIO 10496 * EINVAL 10497 * value returned by physio 10498 * 10499 * Context: Kernel thread context. 10500 */ 10501 /* ARGSUSED */ 10502 static int 10503 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 10504 { 10505 struct sd_lun *un = NULL; 10506 int secmask; 10507 int err = 0; 10508 sd_ssc_t *ssc; 10509 10510 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10511 return (ENXIO); 10512 } 10513 10514 ASSERT(!mutex_owned(SD_MUTEX(un))); 10515 10516 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10517 mutex_enter(SD_MUTEX(un)); 10518 /* 10519 * Because the call to sd_ready_and_valid will issue I/O we 10520 * must wait here if either the device is suspended or 10521 * if it's power level is changing. 10522 */ 10523 while ((un->un_state == SD_STATE_SUSPENDED) || 10524 (un->un_state == SD_STATE_PM_CHANGING)) { 10525 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10526 } 10527 un->un_ncmds_in_driver++; 10528 mutex_exit(SD_MUTEX(un)); 10529 10530 /* Initialize sd_ssc_t for internal uscsi commands */ 10531 ssc = sd_ssc_init(un); 10532 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10533 err = EIO; 10534 } else { 10535 err = 0; 10536 } 10537 sd_ssc_fini(ssc); 10538 10539 mutex_enter(SD_MUTEX(un)); 10540 un->un_ncmds_in_driver--; 10541 ASSERT(un->un_ncmds_in_driver >= 0); 10542 mutex_exit(SD_MUTEX(un)); 10543 if (err != 0) 10544 return (err); 10545 } 10546 10547 /* 10548 * Write requests are restricted to multiples of the system block size. 10549 */ 10550 secmask = un->un_sys_blocksize - 1; 10551 10552 if (uio->uio_loffset & ((offset_t)(secmask))) { 10553 SD_ERROR(SD_LOG_READ_WRITE, un, 10554 "sdwrite: file offset not modulo %d\n", 10555 un->un_sys_blocksize); 10556 err = EINVAL; 10557 } else if (uio->uio_iov->iov_len & (secmask)) { 10558 SD_ERROR(SD_LOG_READ_WRITE, un, 10559 "sdwrite: transfer length not modulo %d\n", 10560 un->un_sys_blocksize); 10561 err = EINVAL; 10562 } else { 10563 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio); 10564 } 10565 10566 return (err); 10567 } 10568 10569 10570 /* 10571 * Function: sdaread 10572 * 10573 * Description: Driver's aread(9e) entry point function. 10574 * 10575 * Arguments: dev - device number 10576 * aio - structure pointer describing where data is to be stored 10577 * cred_p - user credential pointer 10578 * 10579 * Return Code: ENXIO 10580 * EIO 10581 * EINVAL 10582 * value returned by aphysio 10583 * 10584 * Context: Kernel thread context. 10585 */ 10586 /* ARGSUSED */ 10587 static int 10588 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10589 { 10590 struct sd_lun *un = NULL; 10591 struct uio *uio = aio->aio_uio; 10592 int secmask; 10593 int err = 0; 10594 sd_ssc_t *ssc; 10595 10596 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10597 return (ENXIO); 10598 } 10599 10600 ASSERT(!mutex_owned(SD_MUTEX(un))); 10601 10602 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10603 mutex_enter(SD_MUTEX(un)); 10604 /* 10605 * Because the call to sd_ready_and_valid will issue I/O we 10606 * must wait here if either the device is suspended or 10607 * if it's power level is changing. 10608 */ 10609 while ((un->un_state == SD_STATE_SUSPENDED) || 10610 (un->un_state == SD_STATE_PM_CHANGING)) { 10611 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10612 } 10613 un->un_ncmds_in_driver++; 10614 mutex_exit(SD_MUTEX(un)); 10615 10616 /* Initialize sd_ssc_t for internal uscsi commands */ 10617 ssc = sd_ssc_init(un); 10618 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10619 err = EIO; 10620 } else { 10621 err = 0; 10622 } 10623 sd_ssc_fini(ssc); 10624 10625 mutex_enter(SD_MUTEX(un)); 10626 un->un_ncmds_in_driver--; 10627 ASSERT(un->un_ncmds_in_driver >= 0); 10628 mutex_exit(SD_MUTEX(un)); 10629 if (err != 0) 10630 return (err); 10631 } 10632 10633 /* 10634 * Read requests are restricted to multiples of the system block size. 10635 */ 10636 secmask = un->un_sys_blocksize - 1; 10637 10638 if (uio->uio_loffset & ((offset_t)(secmask))) { 10639 SD_ERROR(SD_LOG_READ_WRITE, un, 10640 "sdaread: file offset not modulo %d\n", 10641 un->un_sys_blocksize); 10642 err = EINVAL; 10643 } else if (uio->uio_iov->iov_len & (secmask)) { 10644 SD_ERROR(SD_LOG_READ_WRITE, un, 10645 "sdaread: transfer length not modulo %d\n", 10646 un->un_sys_blocksize); 10647 err = EINVAL; 10648 } else { 10649 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio); 10650 } 10651 10652 return (err); 10653 } 10654 10655 10656 /* 10657 * Function: sdawrite 10658 * 10659 * Description: Driver's awrite(9e) entry point function. 10660 * 10661 * Arguments: dev - device number 10662 * aio - structure pointer describing where data is stored 10663 * cred_p - user credential pointer 10664 * 10665 * Return Code: ENXIO 10666 * EIO 10667 * EINVAL 10668 * value returned by aphysio 10669 * 10670 * Context: Kernel thread context. 10671 */ 10672 /* ARGSUSED */ 10673 static int 10674 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10675 { 10676 struct sd_lun *un = NULL; 10677 struct uio *uio = aio->aio_uio; 10678 int secmask; 10679 int err = 0; 10680 sd_ssc_t *ssc; 10681 10682 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10683 return (ENXIO); 10684 } 10685 10686 ASSERT(!mutex_owned(SD_MUTEX(un))); 10687 10688 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) { 10689 mutex_enter(SD_MUTEX(un)); 10690 /* 10691 * Because the call to sd_ready_and_valid will issue I/O we 10692 * must wait here if either the device is suspended or 10693 * if it's power level is changing. 10694 */ 10695 while ((un->un_state == SD_STATE_SUSPENDED) || 10696 (un->un_state == SD_STATE_PM_CHANGING)) { 10697 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10698 } 10699 un->un_ncmds_in_driver++; 10700 mutex_exit(SD_MUTEX(un)); 10701 10702 /* Initialize sd_ssc_t for internal uscsi commands */ 10703 ssc = sd_ssc_init(un); 10704 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) { 10705 err = EIO; 10706 } else { 10707 err = 0; 10708 } 10709 sd_ssc_fini(ssc); 10710 10711 mutex_enter(SD_MUTEX(un)); 10712 un->un_ncmds_in_driver--; 10713 ASSERT(un->un_ncmds_in_driver >= 0); 10714 mutex_exit(SD_MUTEX(un)); 10715 if (err != 0) 10716 return (err); 10717 } 10718 10719 /* 10720 * Write requests are restricted to multiples of the system block size. 10721 */ 10722 secmask = un->un_sys_blocksize - 1; 10723 10724 if (uio->uio_loffset & ((offset_t)(secmask))) { 10725 SD_ERROR(SD_LOG_READ_WRITE, un, 10726 "sdawrite: file offset not modulo %d\n", 10727 un->un_sys_blocksize); 10728 err = EINVAL; 10729 } else if (uio->uio_iov->iov_len & (secmask)) { 10730 SD_ERROR(SD_LOG_READ_WRITE, un, 10731 "sdawrite: transfer length not modulo %d\n", 10732 un->un_sys_blocksize); 10733 err = EINVAL; 10734 } else { 10735 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio); 10736 } 10737 10738 return (err); 10739 } 10740 10741 10742 10743 10744 10745 /* 10746 * Driver IO processing follows the following sequence: 10747 * 10748 * sdioctl(9E) sdstrategy(9E) biodone(9F) 10749 * | | ^ 10750 * v v | 10751 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+ 10752 * | | | | 10753 * v | | | 10754 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone() 10755 * | | ^ ^ 10756 * v v | | 10757 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | | 10758 * | | | | 10759 * +---+ | +------------+ +-------+ 10760 * | | | | 10761 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10762 * | v | | 10763 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() | 10764 * | | ^ | 10765 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10766 * | v | | 10767 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() | 10768 * | | ^ | 10769 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10770 * | v | | 10771 * | sd_checksum_iostart() sd_checksum_iodone() | 10772 * | | ^ | 10773 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+ 10774 * | v | | 10775 * | sd_pm_iostart() sd_pm_iodone() | 10776 * | | ^ | 10777 * | | | | 10778 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+ 10779 * | ^ 10780 * v | 10781 * sd_core_iostart() | 10782 * | | 10783 * | +------>(*destroypkt)() 10784 * +-> sd_start_cmds() <-+ | | 10785 * | | | v 10786 * | | | scsi_destroy_pkt(9F) 10787 * | | | 10788 * +->(*initpkt)() +- sdintr() 10789 * | | | | 10790 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx() 10791 * | +-> scsi_setup_cdb(9F) | 10792 * | | 10793 * +--> scsi_transport(9F) | 10794 * | | 10795 * +----> SCSA ---->+ 10796 * 10797 * 10798 * This code is based upon the following presumptions: 10799 * 10800 * - iostart and iodone functions operate on buf(9S) structures. These 10801 * functions perform the necessary operations on the buf(9S) and pass 10802 * them along to the next function in the chain by using the macros 10803 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE() 10804 * (for iodone side functions). 10805 * 10806 * - The iostart side functions may sleep. The iodone side functions 10807 * are called under interrupt context and may NOT sleep. Therefore 10808 * iodone side functions also may not call iostart side functions. 10809 * (NOTE: iostart side functions should NOT sleep for memory, as 10810 * this could result in deadlock.) 10811 * 10812 * - An iostart side function may call its corresponding iodone side 10813 * function directly (if necessary). 10814 * 10815 * - In the event of an error, an iostart side function can return a buf(9S) 10816 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and 10817 * b_error in the usual way of course). 10818 * 10819 * - The taskq mechanism may be used by the iodone side functions to dispatch 10820 * requests to the iostart side functions. The iostart side functions in 10821 * this case would be called under the context of a taskq thread, so it's 10822 * OK for them to block/sleep/spin in this case. 10823 * 10824 * - iostart side functions may allocate "shadow" buf(9S) structs and 10825 * pass them along to the next function in the chain. The corresponding 10826 * iodone side functions must coalesce the "shadow" bufs and return 10827 * the "original" buf to the next higher layer. 10828 * 10829 * - The b_private field of the buf(9S) struct holds a pointer to 10830 * an sd_xbuf struct, which contains information needed to 10831 * construct the scsi_pkt for the command. 10832 * 10833 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each 10834 * layer must acquire & release the SD_MUTEX(un) as needed. 10835 */ 10836 10837 10838 /* 10839 * Create taskq for all targets in the system. This is created at 10840 * _init(9E) and destroyed at _fini(9E). 10841 * 10842 * Note: here we set the minalloc to a reasonably high number to ensure that 10843 * we will have an adequate supply of task entries available at interrupt time. 10844 * This is used in conjunction with the TASKQ_PREPOPULATE flag in 10845 * sd_create_taskq(). Since we do not want to sleep for allocations at 10846 * interrupt time, set maxalloc equal to minalloc. That way we will just fail 10847 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq 10848 * requests any one instant in time. 10849 */ 10850 #define SD_TASKQ_NUMTHREADS 8 10851 #define SD_TASKQ_MINALLOC 256 10852 #define SD_TASKQ_MAXALLOC 256 10853 10854 static taskq_t *sd_tq = NULL; 10855 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq)) 10856 10857 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; 10858 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; 10859 10860 /* 10861 * The following task queue is being created for the write part of 10862 * read-modify-write of non-512 block size devices. 10863 * Limit the number of threads to 1 for now. This number has been chosen 10864 * considering the fact that it applies only to dvd ram drives/MO drives 10865 * currently. Performance for which is not main criteria at this stage. 10866 * Note: It needs to be explored if we can use a single taskq in future 10867 */ 10868 #define SD_WMR_TASKQ_NUMTHREADS 1 10869 static taskq_t *sd_wmr_tq = NULL; 10870 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq)) 10871 10872 /* 10873 * Function: sd_taskq_create 10874 * 10875 * Description: Create taskq thread(s) and preallocate task entries 10876 * 10877 * Return Code: Returns a pointer to the allocated taskq_t. 10878 * 10879 * Context: Can sleep. Requires blockable context. 10880 * 10881 * Notes: - The taskq() facility currently is NOT part of the DDI. 10882 * (definitely NOT recommeded for 3rd-party drivers!) :-) 10883 * - taskq_create() will block for memory, also it will panic 10884 * if it cannot create the requested number of threads. 10885 * - Currently taskq_create() creates threads that cannot be 10886 * swapped. 10887 * - We use TASKQ_PREPOPULATE to ensure we have an adequate 10888 * supply of taskq entries at interrupt time (ie, so that we 10889 * do not have to sleep for memory) 10890 */ 10891 10892 static void 10893 sd_taskq_create(void) 10894 { 10895 char taskq_name[TASKQ_NAMELEN]; 10896 10897 ASSERT(sd_tq == NULL); 10898 ASSERT(sd_wmr_tq == NULL); 10899 10900 (void) snprintf(taskq_name, sizeof (taskq_name), 10901 "%s_drv_taskq", sd_label); 10902 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS, 10903 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 10904 TASKQ_PREPOPULATE)); 10905 10906 (void) snprintf(taskq_name, sizeof (taskq_name), 10907 "%s_rmw_taskq", sd_label); 10908 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS, 10909 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 10910 TASKQ_PREPOPULATE)); 10911 } 10912 10913 10914 /* 10915 * Function: sd_taskq_delete 10916 * 10917 * Description: Complementary cleanup routine for sd_taskq_create(). 10918 * 10919 * Context: Kernel thread context. 10920 */ 10921 10922 static void 10923 sd_taskq_delete(void) 10924 { 10925 ASSERT(sd_tq != NULL); 10926 ASSERT(sd_wmr_tq != NULL); 10927 taskq_destroy(sd_tq); 10928 taskq_destroy(sd_wmr_tq); 10929 sd_tq = NULL; 10930 sd_wmr_tq = NULL; 10931 } 10932 10933 10934 /* 10935 * Function: sdstrategy 10936 * 10937 * Description: Driver's strategy (9E) entry point function. 10938 * 10939 * Arguments: bp - pointer to buf(9S) 10940 * 10941 * Return Code: Always returns zero 10942 * 10943 * Context: Kernel thread context. 10944 */ 10945 10946 static int 10947 sdstrategy(struct buf *bp) 10948 { 10949 struct sd_lun *un; 10950 10951 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 10952 if (un == NULL) { 10953 bioerror(bp, EIO); 10954 bp->b_resid = bp->b_bcount; 10955 biodone(bp); 10956 return (0); 10957 } 10958 /* As was done in the past, fail new cmds. if state is dumping. */ 10959 if (un->un_state == SD_STATE_DUMPING) { 10960 bioerror(bp, ENXIO); 10961 bp->b_resid = bp->b_bcount; 10962 biodone(bp); 10963 return (0); 10964 } 10965 10966 ASSERT(!mutex_owned(SD_MUTEX(un))); 10967 10968 /* 10969 * Commands may sneak in while we released the mutex in 10970 * DDI_SUSPEND, we should block new commands. However, old 10971 * commands that are still in the driver at this point should 10972 * still be allowed to drain. 10973 */ 10974 mutex_enter(SD_MUTEX(un)); 10975 /* 10976 * Must wait here if either the device is suspended or 10977 * if it's power level is changing. 10978 */ 10979 while ((un->un_state == SD_STATE_SUSPENDED) || 10980 (un->un_state == SD_STATE_PM_CHANGING)) { 10981 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10982 } 10983 10984 un->un_ncmds_in_driver++; 10985 10986 /* 10987 * atapi: Since we are running the CD for now in PIO mode we need to 10988 * call bp_mapin here to avoid bp_mapin called interrupt context under 10989 * the HBA's init_pkt routine. 10990 */ 10991 if (un->un_f_cfg_is_atapi == TRUE) { 10992 mutex_exit(SD_MUTEX(un)); 10993 bp_mapin(bp); 10994 mutex_enter(SD_MUTEX(un)); 10995 } 10996 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n", 10997 un->un_ncmds_in_driver); 10998 10999 if (bp->b_flags & B_WRITE) 11000 un->un_f_sync_cache_required = TRUE; 11001 11002 mutex_exit(SD_MUTEX(un)); 11003 11004 /* 11005 * This will (eventually) allocate the sd_xbuf area and 11006 * call sd_xbuf_strategy(). We just want to return the 11007 * result of ddi_xbuf_qstrategy so that we have an opt- 11008 * imized tail call which saves us a stack frame. 11009 */ 11010 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); 11011 } 11012 11013 11014 /* 11015 * Function: sd_xbuf_strategy 11016 * 11017 * Description: Function for initiating IO operations via the 11018 * ddi_xbuf_qstrategy() mechanism. 11019 * 11020 * Context: Kernel thread context. 11021 */ 11022 11023 static void 11024 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg) 11025 { 11026 struct sd_lun *un = arg; 11027 11028 ASSERT(bp != NULL); 11029 ASSERT(xp != NULL); 11030 ASSERT(un != NULL); 11031 ASSERT(!mutex_owned(SD_MUTEX(un))); 11032 11033 /* 11034 * Initialize the fields in the xbuf and save a pointer to the 11035 * xbuf in bp->b_private. 11036 */ 11037 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL); 11038 11039 /* Send the buf down the iostart chain */ 11040 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp); 11041 } 11042 11043 11044 /* 11045 * Function: sd_xbuf_init 11046 * 11047 * Description: Prepare the given sd_xbuf struct for use. 11048 * 11049 * Arguments: un - ptr to softstate 11050 * bp - ptr to associated buf(9S) 11051 * xp - ptr to associated sd_xbuf 11052 * chain_type - IO chain type to use: 11053 * SD_CHAIN_NULL 11054 * SD_CHAIN_BUFIO 11055 * SD_CHAIN_USCSI 11056 * SD_CHAIN_DIRECT 11057 * SD_CHAIN_DIRECT_PRIORITY 11058 * pktinfop - ptr to private data struct for scsi_pkt(9S) 11059 * initialization; may be NULL if none. 11060 * 11061 * Context: Kernel thread context 11062 */ 11063 11064 static void 11065 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 11066 uchar_t chain_type, void *pktinfop) 11067 { 11068 int index; 11069 11070 ASSERT(un != NULL); 11071 ASSERT(bp != NULL); 11072 ASSERT(xp != NULL); 11073 11074 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n", 11075 bp, chain_type); 11076 11077 xp->xb_un = un; 11078 xp->xb_pktp = NULL; 11079 xp->xb_pktinfo = pktinfop; 11080 xp->xb_private = bp->b_private; 11081 xp->xb_blkno = (daddr_t)bp->b_blkno; 11082 11083 /* 11084 * Set up the iostart and iodone chain indexes in the xbuf, based 11085 * upon the specified chain type to use. 11086 */ 11087 switch (chain_type) { 11088 case SD_CHAIN_NULL: 11089 /* 11090 * Fall thru to just use the values for the buf type, even 11091 * tho for the NULL chain these values will never be used. 11092 */ 11093 /* FALLTHRU */ 11094 case SD_CHAIN_BUFIO: 11095 index = un->un_buf_chain_type; 11096 break; 11097 case SD_CHAIN_USCSI: 11098 index = un->un_uscsi_chain_type; 11099 break; 11100 case SD_CHAIN_DIRECT: 11101 index = un->un_direct_chain_type; 11102 break; 11103 case SD_CHAIN_DIRECT_PRIORITY: 11104 index = un->un_priority_chain_type; 11105 break; 11106 default: 11107 /* We're really broken if we ever get here... */ 11108 panic("sd_xbuf_init: illegal chain type!"); 11109 /*NOTREACHED*/ 11110 } 11111 11112 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index; 11113 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index; 11114 11115 /* 11116 * It might be a bit easier to simply bzero the entire xbuf above, 11117 * but it turns out that since we init a fair number of members anyway, 11118 * we save a fair number cycles by doing explicit assignment of zero. 11119 */ 11120 xp->xb_pkt_flags = 0; 11121 xp->xb_dma_resid = 0; 11122 xp->xb_retry_count = 0; 11123 xp->xb_victim_retry_count = 0; 11124 xp->xb_ua_retry_count = 0; 11125 xp->xb_nr_retry_count = 0; 11126 xp->xb_sense_bp = NULL; 11127 xp->xb_sense_status = 0; 11128 xp->xb_sense_state = 0; 11129 xp->xb_sense_resid = 0; 11130 xp->xb_ena = 0; 11131 11132 bp->b_private = xp; 11133 bp->b_flags &= ~(B_DONE | B_ERROR); 11134 bp->b_resid = 0; 11135 bp->av_forw = NULL; 11136 bp->av_back = NULL; 11137 bioerror(bp, 0); 11138 11139 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n"); 11140 } 11141 11142 11143 /* 11144 * Function: sd_uscsi_strategy 11145 * 11146 * Description: Wrapper for calling into the USCSI chain via physio(9F) 11147 * 11148 * Arguments: bp - buf struct ptr 11149 * 11150 * Return Code: Always returns 0 11151 * 11152 * Context: Kernel thread context 11153 */ 11154 11155 static int 11156 sd_uscsi_strategy(struct buf *bp) 11157 { 11158 struct sd_lun *un; 11159 struct sd_uscsi_info *uip; 11160 struct sd_xbuf *xp; 11161 uchar_t chain_type; 11162 uchar_t cmd; 11163 11164 ASSERT(bp != NULL); 11165 11166 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 11167 if (un == NULL) { 11168 bioerror(bp, EIO); 11169 bp->b_resid = bp->b_bcount; 11170 biodone(bp); 11171 return (0); 11172 } 11173 11174 ASSERT(!mutex_owned(SD_MUTEX(un))); 11175 11176 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp); 11177 11178 /* 11179 * A pointer to a struct sd_uscsi_info is expected in bp->b_private 11180 */ 11181 ASSERT(bp->b_private != NULL); 11182 uip = (struct sd_uscsi_info *)bp->b_private; 11183 cmd = ((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_cdb[0]; 11184 11185 mutex_enter(SD_MUTEX(un)); 11186 /* 11187 * atapi: Since we are running the CD for now in PIO mode we need to 11188 * call bp_mapin here to avoid bp_mapin called interrupt context under 11189 * the HBA's init_pkt routine. 11190 */ 11191 if (un->un_f_cfg_is_atapi == TRUE) { 11192 mutex_exit(SD_MUTEX(un)); 11193 bp_mapin(bp); 11194 mutex_enter(SD_MUTEX(un)); 11195 } 11196 un->un_ncmds_in_driver++; 11197 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n", 11198 un->un_ncmds_in_driver); 11199 11200 if ((bp->b_flags & B_WRITE) && (bp->b_bcount != 0) && 11201 (cmd != SCMD_MODE_SELECT) && (cmd != SCMD_MODE_SELECT_G1)) 11202 un->un_f_sync_cache_required = TRUE; 11203 11204 mutex_exit(SD_MUTEX(un)); 11205 11206 switch (uip->ui_flags) { 11207 case SD_PATH_DIRECT: 11208 chain_type = SD_CHAIN_DIRECT; 11209 break; 11210 case SD_PATH_DIRECT_PRIORITY: 11211 chain_type = SD_CHAIN_DIRECT_PRIORITY; 11212 break; 11213 default: 11214 chain_type = SD_CHAIN_USCSI; 11215 break; 11216 } 11217 11218 /* 11219 * We may allocate extra buf for external USCSI commands. If the 11220 * application asks for bigger than 20-byte sense data via USCSI, 11221 * SCSA layer will allocate 252 bytes sense buf for that command. 11222 */ 11223 if (((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_rqlen > 11224 SENSE_LENGTH) { 11225 xp = kmem_zalloc(sizeof (struct sd_xbuf) - SENSE_LENGTH + 11226 MAX_SENSE_LENGTH, KM_SLEEP); 11227 } else { 11228 xp = kmem_zalloc(sizeof (struct sd_xbuf), KM_SLEEP); 11229 } 11230 11231 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp); 11232 11233 /* Use the index obtained within xbuf_init */ 11234 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp); 11235 11236 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp); 11237 11238 return (0); 11239 } 11240 11241 /* 11242 * Function: sd_send_scsi_cmd 11243 * 11244 * Description: Runs a USCSI command for user (when called thru sdioctl), 11245 * or for the driver 11246 * 11247 * Arguments: dev - the dev_t for the device 11248 * incmd - ptr to a valid uscsi_cmd struct 11249 * flag - bit flag, indicating open settings, 32/64 bit type 11250 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 11251 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 11252 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 11253 * to use the USCSI "direct" chain and bypass the normal 11254 * command waitq. 11255 * 11256 * Return Code: 0 - successful completion of the given command 11257 * EIO - scsi_uscsi_handle_command() failed 11258 * ENXIO - soft state not found for specified dev 11259 * EINVAL 11260 * EFAULT - copyin/copyout error 11261 * return code of scsi_uscsi_handle_command(): 11262 * EIO 11263 * ENXIO 11264 * EACCES 11265 * 11266 * Context: Waits for command to complete. Can sleep. 11267 */ 11268 11269 static int 11270 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag, 11271 enum uio_seg dataspace, int path_flag) 11272 { 11273 struct sd_lun *un; 11274 sd_ssc_t *ssc; 11275 int rval; 11276 11277 un = ddi_get_soft_state(sd_state, SDUNIT(dev)); 11278 if (un == NULL) { 11279 return (ENXIO); 11280 } 11281 11282 /* 11283 * Using sd_ssc_send to handle uscsi cmd 11284 */ 11285 ssc = sd_ssc_init(un); 11286 rval = sd_ssc_send(ssc, incmd, flag, dataspace, path_flag); 11287 sd_ssc_fini(ssc); 11288 11289 return (rval); 11290 } 11291 11292 /* 11293 * Function: sd_ssc_init 11294 * 11295 * Description: Uscsi end-user call this function to initialize necessary 11296 * fields, such as uscsi_cmd and sd_uscsi_info struct. 11297 * 11298 * The return value of sd_send_scsi_cmd will be treated as a 11299 * fault in various conditions. Even it is not Zero, some 11300 * callers may ignore the return value. That is to say, we can 11301 * not make an accurate assessment in sdintr, since if a 11302 * command is failed in sdintr it does not mean the caller of 11303 * sd_send_scsi_cmd will treat it as a real failure. 11304 * 11305 * To avoid printing too many error logs for a failed uscsi 11306 * packet that the caller may not treat it as a failure, the 11307 * sd will keep silent for handling all uscsi commands. 11308 * 11309 * During detach->attach and attach-open, for some types of 11310 * problems, the driver should be providing information about 11311 * the problem encountered. Device use USCSI_SILENT, which 11312 * suppresses all driver information. The result is that no 11313 * information about the problem is available. Being 11314 * completely silent during this time is inappropriate. The 11315 * driver needs a more selective filter than USCSI_SILENT, so 11316 * that information related to faults is provided. 11317 * 11318 * To make the accurate accessment, the caller of 11319 * sd_send_scsi_USCSI_CMD should take the ownership and 11320 * get necessary information to print error messages. 11321 * 11322 * If we want to print necessary info of uscsi command, we need to 11323 * keep the uscsi_cmd and sd_uscsi_info till we can make the 11324 * assessment. We use sd_ssc_init to alloc necessary 11325 * structs for sending an uscsi command and we are also 11326 * responsible for free the memory by calling 11327 * sd_ssc_fini. 11328 * 11329 * The calling secquences will look like: 11330 * sd_ssc_init-> 11331 * 11332 * ... 11333 * 11334 * sd_send_scsi_USCSI_CMD-> 11335 * sd_ssc_send-> - - - sdintr 11336 * ... 11337 * 11338 * if we think the return value should be treated as a 11339 * failure, we make the accessment here and print out 11340 * necessary by retrieving uscsi_cmd and sd_uscsi_info' 11341 * 11342 * ... 11343 * 11344 * sd_ssc_fini 11345 * 11346 * 11347 * Arguments: un - pointer to driver soft state (unit) structure for this 11348 * target. 11349 * 11350 * Return code: sd_ssc_t - pointer to allocated sd_ssc_t struct, it contains 11351 * uscsi_cmd and sd_uscsi_info. 11352 * NULL - if can not alloc memory for sd_ssc_t struct 11353 * 11354 * Context: Kernel Thread. 11355 */ 11356 static sd_ssc_t * 11357 sd_ssc_init(struct sd_lun *un) 11358 { 11359 sd_ssc_t *ssc; 11360 struct uscsi_cmd *ucmdp; 11361 struct sd_uscsi_info *uip; 11362 11363 ASSERT(un != NULL); 11364 ASSERT(!mutex_owned(SD_MUTEX(un))); 11365 11366 /* 11367 * Allocate sd_ssc_t structure 11368 */ 11369 ssc = kmem_zalloc(sizeof (sd_ssc_t), KM_SLEEP); 11370 11371 /* 11372 * Allocate uscsi_cmd by calling scsi_uscsi_alloc common routine 11373 */ 11374 ucmdp = scsi_uscsi_alloc(); 11375 11376 /* 11377 * Allocate sd_uscsi_info structure 11378 */ 11379 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 11380 11381 ssc->ssc_uscsi_cmd = ucmdp; 11382 ssc->ssc_uscsi_info = uip; 11383 ssc->ssc_un = un; 11384 11385 return (ssc); 11386 } 11387 11388 /* 11389 * Function: sd_ssc_fini 11390 * 11391 * Description: To free sd_ssc_t and it's hanging off 11392 * 11393 * Arguments: ssc - struct pointer of sd_ssc_t. 11394 */ 11395 static void 11396 sd_ssc_fini(sd_ssc_t *ssc) 11397 { 11398 scsi_uscsi_free(ssc->ssc_uscsi_cmd); 11399 11400 if (ssc->ssc_uscsi_info != NULL) { 11401 kmem_free(ssc->ssc_uscsi_info, sizeof (struct sd_uscsi_info)); 11402 ssc->ssc_uscsi_info = NULL; 11403 } 11404 11405 kmem_free(ssc, sizeof (sd_ssc_t)); 11406 ssc = NULL; 11407 } 11408 11409 /* 11410 * Function: sd_ssc_send 11411 * 11412 * Description: Runs a USCSI command for user when called through sdioctl, 11413 * or for the driver. 11414 * 11415 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11416 * sd_uscsi_info in. 11417 * incmd - ptr to a valid uscsi_cmd struct 11418 * flag - bit flag, indicating open settings, 32/64 bit type 11419 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 11420 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 11421 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 11422 * to use the USCSI "direct" chain and bypass the normal 11423 * command waitq. 11424 * 11425 * Return Code: 0 - successful completion of the given command 11426 * EIO - scsi_uscsi_handle_command() failed 11427 * ENXIO - soft state not found for specified dev 11428 * EINVAL 11429 * EFAULT - copyin/copyout error 11430 * return code of scsi_uscsi_handle_command(): 11431 * EIO 11432 * ENXIO 11433 * EACCES 11434 * 11435 * Context: Kernel Thread; 11436 * Waits for command to complete. Can sleep. 11437 */ 11438 static int 11439 sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, int flag, 11440 enum uio_seg dataspace, int path_flag) 11441 { 11442 struct sd_uscsi_info *uip; 11443 struct uscsi_cmd *uscmd; 11444 struct sd_lun *un; 11445 dev_t dev; 11446 11447 int format = 0; 11448 int rval; 11449 11450 ASSERT(ssc != NULL); 11451 un = ssc->ssc_un; 11452 ASSERT(un != NULL); 11453 uscmd = ssc->ssc_uscsi_cmd; 11454 ASSERT(uscmd != NULL); 11455 ASSERT(!mutex_owned(SD_MUTEX(un))); 11456 if (ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT) { 11457 /* 11458 * If enter here, it indicates that the previous uscsi 11459 * command has not been processed by sd_ssc_assessment. 11460 * This is violating our rules of FMA telemetry processing. 11461 * We should print out this message and the last undisposed 11462 * uscsi command. 11463 */ 11464 if (uscmd->uscsi_cdb != NULL) { 11465 SD_INFO(SD_LOG_SDTEST, un, 11466 "sd_ssc_send is missing the alternative " 11467 "sd_ssc_assessment when running command 0x%x.\n", 11468 uscmd->uscsi_cdb[0]); 11469 } 11470 /* 11471 * Set the ssc_flags to SSC_FLAGS_UNKNOWN, which should be 11472 * the initial status. 11473 */ 11474 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 11475 } 11476 11477 /* 11478 * We need to make sure sd_ssc_send will have sd_ssc_assessment 11479 * followed to avoid missing FMA telemetries. 11480 */ 11481 ssc->ssc_flags |= SSC_FLAGS_NEED_ASSESSMENT; 11482 11483 #ifdef SDDEBUG 11484 switch (dataspace) { 11485 case UIO_USERSPACE: 11486 SD_TRACE(SD_LOG_IO, un, 11487 "sd_ssc_send: entry: un:0x%p UIO_USERSPACE\n", un); 11488 break; 11489 case UIO_SYSSPACE: 11490 SD_TRACE(SD_LOG_IO, un, 11491 "sd_ssc_send: entry: un:0x%p UIO_SYSSPACE\n", un); 11492 break; 11493 default: 11494 SD_TRACE(SD_LOG_IO, un, 11495 "sd_ssc_send: entry: un:0x%p UNEXPECTED SPACE\n", un); 11496 break; 11497 } 11498 #endif 11499 11500 rval = scsi_uscsi_copyin((intptr_t)incmd, flag, 11501 SD_ADDRESS(un), &uscmd); 11502 if (rval != 0) { 11503 SD_TRACE(SD_LOG_IO, un, "sd_sense_scsi_cmd: " 11504 "scsi_uscsi_alloc_and_copyin failed\n", un); 11505 return (rval); 11506 } 11507 11508 if ((uscmd->uscsi_cdb != NULL) && 11509 (uscmd->uscsi_cdb[0] == SCMD_FORMAT)) { 11510 mutex_enter(SD_MUTEX(un)); 11511 un->un_f_format_in_progress = TRUE; 11512 mutex_exit(SD_MUTEX(un)); 11513 format = 1; 11514 } 11515 11516 /* 11517 * Allocate an sd_uscsi_info struct and fill it with the info 11518 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 11519 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 11520 * since we allocate the buf here in this function, we do not 11521 * need to preserve the prior contents of b_private. 11522 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 11523 */ 11524 uip = ssc->ssc_uscsi_info; 11525 uip->ui_flags = path_flag; 11526 uip->ui_cmdp = uscmd; 11527 11528 /* 11529 * Commands sent with priority are intended for error recovery 11530 * situations, and do not have retries performed. 11531 */ 11532 if (path_flag == SD_PATH_DIRECT_PRIORITY) { 11533 uscmd->uscsi_flags |= USCSI_DIAGNOSE; 11534 } 11535 uscmd->uscsi_flags &= ~USCSI_NOINTR; 11536 11537 dev = SD_GET_DEV(un); 11538 rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd, 11539 sd_uscsi_strategy, NULL, uip); 11540 11541 /* 11542 * mark ssc_flags right after handle_cmd to make sure 11543 * the uscsi has been sent 11544 */ 11545 ssc->ssc_flags |= SSC_FLAGS_CMD_ISSUED; 11546 11547 #ifdef SDDEBUG 11548 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: " 11549 "uscsi_status: 0x%02x uscsi_resid:0x%x\n", 11550 uscmd->uscsi_status, uscmd->uscsi_resid); 11551 if (uscmd->uscsi_bufaddr != NULL) { 11552 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: " 11553 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n", 11554 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen); 11555 if (dataspace == UIO_SYSSPACE) { 11556 SD_DUMP_MEMORY(un, SD_LOG_IO, 11557 "data", (uchar_t *)uscmd->uscsi_bufaddr, 11558 uscmd->uscsi_buflen, SD_LOG_HEX); 11559 } 11560 } 11561 #endif 11562 11563 if (format == 1) { 11564 mutex_enter(SD_MUTEX(un)); 11565 un->un_f_format_in_progress = FALSE; 11566 mutex_exit(SD_MUTEX(un)); 11567 } 11568 11569 (void) scsi_uscsi_copyout((intptr_t)incmd, uscmd); 11570 11571 return (rval); 11572 } 11573 11574 /* 11575 * Function: sd_ssc_print 11576 * 11577 * Description: Print information available to the console. 11578 * 11579 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11580 * sd_uscsi_info in. 11581 * sd_severity - log level. 11582 * Context: Kernel thread or interrupt context. 11583 */ 11584 static void 11585 sd_ssc_print(sd_ssc_t *ssc, int sd_severity) 11586 { 11587 struct uscsi_cmd *ucmdp; 11588 struct scsi_device *devp; 11589 dev_info_t *devinfo; 11590 uchar_t *sensep; 11591 int senlen; 11592 union scsi_cdb *cdbp; 11593 uchar_t com; 11594 extern struct scsi_key_strings scsi_cmds[]; 11595 11596 ASSERT(ssc != NULL); 11597 ASSERT(ssc->ssc_un != NULL); 11598 11599 if (SD_FM_LOG(ssc->ssc_un) != SD_FM_LOG_EREPORT) 11600 return; 11601 ucmdp = ssc->ssc_uscsi_cmd; 11602 devp = SD_SCSI_DEVP(ssc->ssc_un); 11603 devinfo = SD_DEVINFO(ssc->ssc_un); 11604 ASSERT(ucmdp != NULL); 11605 ASSERT(devp != NULL); 11606 ASSERT(devinfo != NULL); 11607 sensep = (uint8_t *)ucmdp->uscsi_rqbuf; 11608 senlen = ucmdp->uscsi_rqlen - ucmdp->uscsi_rqresid; 11609 cdbp = (union scsi_cdb *)ucmdp->uscsi_cdb; 11610 11611 /* In certain case (like DOORLOCK), the cdb could be NULL. */ 11612 if (cdbp == NULL) 11613 return; 11614 /* We don't print log if no sense data available. */ 11615 if (senlen == 0) 11616 sensep = NULL; 11617 com = cdbp->scc_cmd; 11618 scsi_generic_errmsg(devp, sd_label, sd_severity, 0, 0, com, 11619 scsi_cmds, sensep, ssc->ssc_un->un_additional_codes, NULL); 11620 } 11621 11622 /* 11623 * Function: sd_ssc_assessment 11624 * 11625 * Description: We use this function to make an assessment at the point 11626 * where SD driver may encounter a potential error. 11627 * 11628 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11629 * sd_uscsi_info in. 11630 * tp_assess - a hint of strategy for ereport posting. 11631 * Possible values of tp_assess include: 11632 * SD_FMT_IGNORE - we don't post any ereport because we're 11633 * sure that it is ok to ignore the underlying problems. 11634 * SD_FMT_IGNORE_COMPROMISE - we don't post any ereport for now 11635 * but it might be not correct to ignore the underlying hardware 11636 * error. 11637 * SD_FMT_STATUS_CHECK - we will post an ereport with the 11638 * payload driver-assessment of value "fail" or 11639 * "fatal"(depending on what information we have here). This 11640 * assessment value is usually set when SD driver think there 11641 * is a potential error occurred(Typically, when return value 11642 * of the SCSI command is EIO). 11643 * SD_FMT_STANDARD - we will post an ereport with the payload 11644 * driver-assessment of value "info". This assessment value is 11645 * set when the SCSI command returned successfully and with 11646 * sense data sent back. 11647 * 11648 * Context: Kernel thread. 11649 */ 11650 static void 11651 sd_ssc_assessment(sd_ssc_t *ssc, enum sd_type_assessment tp_assess) 11652 { 11653 int senlen = 0; 11654 struct uscsi_cmd *ucmdp = NULL; 11655 struct sd_lun *un; 11656 11657 ASSERT(ssc != NULL); 11658 un = ssc->ssc_un; 11659 ASSERT(un != NULL); 11660 ucmdp = ssc->ssc_uscsi_cmd; 11661 ASSERT(ucmdp != NULL); 11662 11663 if (ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT) { 11664 ssc->ssc_flags &= ~SSC_FLAGS_NEED_ASSESSMENT; 11665 } else { 11666 /* 11667 * If enter here, it indicates that we have a wrong 11668 * calling sequence of sd_ssc_send and sd_ssc_assessment, 11669 * both of which should be called in a pair in case of 11670 * loss of FMA telemetries. 11671 */ 11672 if (ucmdp->uscsi_cdb != NULL) { 11673 SD_INFO(SD_LOG_SDTEST, un, 11674 "sd_ssc_assessment is missing the " 11675 "alternative sd_ssc_send when running 0x%x, " 11676 "or there are superfluous sd_ssc_assessment for " 11677 "the same sd_ssc_send.\n", 11678 ucmdp->uscsi_cdb[0]); 11679 } 11680 /* 11681 * Set the ssc_flags to the initial value to avoid passing 11682 * down dirty flags to the following sd_ssc_send function. 11683 */ 11684 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 11685 return; 11686 } 11687 11688 /* 11689 * Only handle an issued command which is waiting for assessment. 11690 * A command which is not issued will not have 11691 * SSC_FLAGS_INVALID_DATA set, so it'ok we just return here. 11692 */ 11693 if (!(ssc->ssc_flags & SSC_FLAGS_CMD_ISSUED)) { 11694 sd_ssc_print(ssc, SCSI_ERR_INFO); 11695 return; 11696 } else { 11697 /* 11698 * For an issued command, we should clear this flag in 11699 * order to make the sd_ssc_t structure be used off 11700 * multiple uscsi commands. 11701 */ 11702 ssc->ssc_flags &= ~SSC_FLAGS_CMD_ISSUED; 11703 } 11704 11705 /* 11706 * We will not deal with non-retryable(flag USCSI_DIAGNOSE set) 11707 * commands here. And we should clear the ssc_flags before return. 11708 */ 11709 if (ucmdp->uscsi_flags & USCSI_DIAGNOSE) { 11710 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 11711 return; 11712 } 11713 11714 switch (tp_assess) { 11715 case SD_FMT_IGNORE: 11716 case SD_FMT_IGNORE_COMPROMISE: 11717 break; 11718 case SD_FMT_STATUS_CHECK: 11719 /* 11720 * For a failed command(including the succeeded command 11721 * with invalid data sent back). 11722 */ 11723 sd_ssc_post(ssc, SD_FM_DRV_FATAL); 11724 break; 11725 case SD_FMT_STANDARD: 11726 /* 11727 * Always for the succeeded commands probably with sense 11728 * data sent back. 11729 * Limitation: 11730 * We can only handle a succeeded command with sense 11731 * data sent back when auto-request-sense is enabled. 11732 */ 11733 senlen = ssc->ssc_uscsi_cmd->uscsi_rqlen - 11734 ssc->ssc_uscsi_cmd->uscsi_rqresid; 11735 if ((ssc->ssc_uscsi_info->ui_pkt_state & STATE_ARQ_DONE) && 11736 (un->un_f_arq_enabled == TRUE) && 11737 senlen > 0 && 11738 ssc->ssc_uscsi_cmd->uscsi_rqbuf != NULL) { 11739 sd_ssc_post(ssc, SD_FM_DRV_NOTICE); 11740 } 11741 break; 11742 default: 11743 /* 11744 * Should not have other type of assessment. 11745 */ 11746 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 11747 "sd_ssc_assessment got wrong " 11748 "sd_type_assessment %d.\n", tp_assess); 11749 break; 11750 } 11751 /* 11752 * Clear up the ssc_flags before return. 11753 */ 11754 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 11755 } 11756 11757 /* 11758 * Function: sd_ssc_post 11759 * 11760 * Description: 1. read the driver property to get fm-scsi-log flag. 11761 * 2. print log if fm_log_capable is non-zero. 11762 * 3. call sd_ssc_ereport_post to post ereport if possible. 11763 * 11764 * Context: May be called from kernel thread or interrupt context. 11765 */ 11766 static void 11767 sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess) 11768 { 11769 struct sd_lun *un; 11770 int sd_severity; 11771 11772 ASSERT(ssc != NULL); 11773 un = ssc->ssc_un; 11774 ASSERT(un != NULL); 11775 11776 /* 11777 * We may enter here from sd_ssc_assessment(for USCSI command) or 11778 * by directly called from sdintr context. 11779 * We don't handle a non-disk drive(CD-ROM, removable media). 11780 * Clear the ssc_flags before return in case we've set 11781 * SSC_FLAGS_INVALID_XXX which should be skipped for a non-disk 11782 * driver. 11783 */ 11784 if (ISCD(un) || un->un_f_has_removable_media) { 11785 ssc->ssc_flags = SSC_FLAGS_UNKNOWN; 11786 return; 11787 } 11788 11789 switch (sd_assess) { 11790 case SD_FM_DRV_FATAL: 11791 sd_severity = SCSI_ERR_FATAL; 11792 break; 11793 case SD_FM_DRV_RECOVERY: 11794 sd_severity = SCSI_ERR_RECOVERED; 11795 break; 11796 case SD_FM_DRV_RETRY: 11797 sd_severity = SCSI_ERR_RETRYABLE; 11798 break; 11799 case SD_FM_DRV_NOTICE: 11800 sd_severity = SCSI_ERR_INFO; 11801 break; 11802 default: 11803 sd_severity = SCSI_ERR_UNKNOWN; 11804 } 11805 /* print log */ 11806 sd_ssc_print(ssc, sd_severity); 11807 11808 /* always post ereport */ 11809 sd_ssc_ereport_post(ssc, sd_assess); 11810 } 11811 11812 /* 11813 * Function: sd_ssc_set_info 11814 * 11815 * Description: Mark ssc_flags and set ssc_info which would be the 11816 * payload of uderr ereport. This function will cause 11817 * sd_ssc_ereport_post to post uderr ereport only. 11818 * Besides, when ssc_flags == SSC_FLAGS_INVALID_DATA(USCSI), 11819 * the function will also call SD_ERROR or scsi_log for a 11820 * CDROM/removable-media/DDI_FM_NOT_CAPABLE device. 11821 * 11822 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and 11823 * sd_uscsi_info in. 11824 * ssc_flags - indicate the sub-category of a uderr. 11825 * comp - this argument is meaningful only when 11826 * ssc_flags == SSC_FLAGS_INVALID_DATA, and its possible 11827 * values include: 11828 * > 0, SD_ERROR is used with comp as the driver logging 11829 * component; 11830 * = 0, scsi-log is used to log error telemetries; 11831 * < 0, no log available for this telemetry. 11832 * 11833 * Context: Kernel thread or interrupt context 11834 */ 11835 static void 11836 sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, uint_t comp, const char *fmt, ...) 11837 { 11838 va_list ap; 11839 11840 ASSERT(ssc != NULL); 11841 ASSERT(ssc->ssc_un != NULL); 11842 11843 ssc->ssc_flags |= ssc_flags; 11844 va_start(ap, fmt); 11845 (void) vsnprintf(ssc->ssc_info, sizeof (ssc->ssc_info), fmt, ap); 11846 va_end(ap); 11847 11848 /* 11849 * If SSC_FLAGS_INVALID_DATA is set, it should be a uscsi command 11850 * with invalid data sent back. For non-uscsi command, the 11851 * following code will be bypassed. 11852 */ 11853 if (ssc_flags & SSC_FLAGS_INVALID_DATA) { 11854 if (SD_FM_LOG(ssc->ssc_un) == SD_FM_LOG_NSUP) { 11855 /* 11856 * If the error belong to certain component and we 11857 * do not want it to show up on the console, we 11858 * will use SD_ERROR, otherwise scsi_log is 11859 * preferred. 11860 */ 11861 if (comp > 0) { 11862 SD_ERROR(comp, ssc->ssc_un, ssc->ssc_info); 11863 } else if (comp == 0) { 11864 scsi_log(SD_DEVINFO(ssc->ssc_un), sd_label, 11865 CE_WARN, ssc->ssc_info); 11866 } 11867 } 11868 } 11869 } 11870 11871 /* 11872 * Function: sd_buf_iodone 11873 * 11874 * Description: Frees the sd_xbuf & returns the buf to its originator. 11875 * 11876 * Context: May be called from interrupt context. 11877 */ 11878 /* ARGSUSED */ 11879 static void 11880 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp) 11881 { 11882 struct sd_xbuf *xp; 11883 11884 ASSERT(un != NULL); 11885 ASSERT(bp != NULL); 11886 ASSERT(!mutex_owned(SD_MUTEX(un))); 11887 11888 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n"); 11889 11890 xp = SD_GET_XBUF(bp); 11891 ASSERT(xp != NULL); 11892 11893 /* xbuf is gone after this */ 11894 if (ddi_xbuf_done(bp, un->un_xbuf_attr)) { 11895 mutex_enter(SD_MUTEX(un)); 11896 11897 /* 11898 * Grab time when the cmd completed. 11899 * This is used for determining if the system has been 11900 * idle long enough to make it idle to the PM framework. 11901 * This is for lowering the overhead, and therefore improving 11902 * performance per I/O operation. 11903 */ 11904 un->un_pm_idle_time = ddi_get_time(); 11905 11906 un->un_ncmds_in_driver--; 11907 ASSERT(un->un_ncmds_in_driver >= 0); 11908 SD_INFO(SD_LOG_IO, un, 11909 "sd_buf_iodone: un_ncmds_in_driver = %ld\n", 11910 un->un_ncmds_in_driver); 11911 11912 mutex_exit(SD_MUTEX(un)); 11913 } 11914 11915 biodone(bp); /* bp is gone after this */ 11916 11917 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n"); 11918 } 11919 11920 11921 /* 11922 * Function: sd_uscsi_iodone 11923 * 11924 * Description: Frees the sd_xbuf & returns the buf to its originator. 11925 * 11926 * Context: May be called from interrupt context. 11927 */ 11928 /* ARGSUSED */ 11929 static void 11930 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 11931 { 11932 struct sd_xbuf *xp; 11933 11934 ASSERT(un != NULL); 11935 ASSERT(bp != NULL); 11936 11937 xp = SD_GET_XBUF(bp); 11938 ASSERT(xp != NULL); 11939 ASSERT(!mutex_owned(SD_MUTEX(un))); 11940 11941 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n"); 11942 11943 bp->b_private = xp->xb_private; 11944 11945 mutex_enter(SD_MUTEX(un)); 11946 11947 /* 11948 * Grab time when the cmd completed. 11949 * This is used for determining if the system has been 11950 * idle long enough to make it idle to the PM framework. 11951 * This is for lowering the overhead, and therefore improving 11952 * performance per I/O operation. 11953 */ 11954 un->un_pm_idle_time = ddi_get_time(); 11955 11956 un->un_ncmds_in_driver--; 11957 ASSERT(un->un_ncmds_in_driver >= 0); 11958 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n", 11959 un->un_ncmds_in_driver); 11960 11961 mutex_exit(SD_MUTEX(un)); 11962 11963 if (((struct uscsi_cmd *)(xp->xb_pktinfo))->uscsi_rqlen > 11964 SENSE_LENGTH) { 11965 kmem_free(xp, sizeof (struct sd_xbuf) - SENSE_LENGTH + 11966 MAX_SENSE_LENGTH); 11967 } else { 11968 kmem_free(xp, sizeof (struct sd_xbuf)); 11969 } 11970 11971 biodone(bp); 11972 11973 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n"); 11974 } 11975 11976 11977 /* 11978 * Function: sd_mapblockaddr_iostart 11979 * 11980 * Description: Verify request lies within the partition limits for 11981 * the indicated minor device. Issue "overrun" buf if 11982 * request would exceed partition range. Converts 11983 * partition-relative block address to absolute. 11984 * 11985 * Context: Can sleep 11986 * 11987 * Issues: This follows what the old code did, in terms of accessing 11988 * some of the partition info in the unit struct without holding 11989 * the mutext. This is a general issue, if the partition info 11990 * can be altered while IO is in progress... as soon as we send 11991 * a buf, its partitioning can be invalid before it gets to the 11992 * device. Probably the right fix is to move partitioning out 11993 * of the driver entirely. 11994 */ 11995 11996 static void 11997 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp) 11998 { 11999 diskaddr_t nblocks; /* #blocks in the given partition */ 12000 daddr_t blocknum; /* Block number specified by the buf */ 12001 size_t requested_nblocks; 12002 size_t available_nblocks; 12003 int partition; 12004 diskaddr_t partition_offset; 12005 struct sd_xbuf *xp; 12006 12007 ASSERT(un != NULL); 12008 ASSERT(bp != NULL); 12009 ASSERT(!mutex_owned(SD_MUTEX(un))); 12010 12011 SD_TRACE(SD_LOG_IO_PARTITION, un, 12012 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp); 12013 12014 xp = SD_GET_XBUF(bp); 12015 ASSERT(xp != NULL); 12016 12017 /* 12018 * If the geometry is not indicated as valid, attempt to access 12019 * the unit & verify the geometry/label. This can be the case for 12020 * removable-media devices, of if the device was opened in 12021 * NDELAY/NONBLOCK mode. 12022 */ 12023 partition = SDPART(bp->b_edev); 12024 12025 if (!SD_IS_VALID_LABEL(un)) { 12026 sd_ssc_t *ssc; 12027 /* 12028 * Initialize sd_ssc_t for internal uscsi commands 12029 * In case of potential porformance issue, we need 12030 * to alloc memory only if there is invalid label 12031 */ 12032 ssc = sd_ssc_init(un); 12033 12034 if (sd_ready_and_valid(ssc, partition) != SD_READY_VALID) { 12035 /* 12036 * For removable devices it is possible to start an 12037 * I/O without a media by opening the device in nodelay 12038 * mode. Also for writable CDs there can be many 12039 * scenarios where there is no geometry yet but volume 12040 * manager is trying to issue a read() just because 12041 * it can see TOC on the CD. So do not print a message 12042 * for removables. 12043 */ 12044 if (!un->un_f_has_removable_media) { 12045 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12046 "i/o to invalid geometry\n"); 12047 } 12048 bioerror(bp, EIO); 12049 bp->b_resid = bp->b_bcount; 12050 SD_BEGIN_IODONE(index, un, bp); 12051 12052 sd_ssc_fini(ssc); 12053 return; 12054 } 12055 sd_ssc_fini(ssc); 12056 } 12057 12058 nblocks = 0; 12059 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 12060 &nblocks, &partition_offset, NULL, NULL, (void *)SD_PATH_DIRECT); 12061 12062 /* 12063 * blocknum is the starting block number of the request. At this 12064 * point it is still relative to the start of the minor device. 12065 */ 12066 blocknum = xp->xb_blkno; 12067 12068 /* 12069 * Legacy: If the starting block number is one past the last block 12070 * in the partition, do not set B_ERROR in the buf. 12071 */ 12072 if (blocknum == nblocks) { 12073 goto error_exit; 12074 } 12075 12076 /* 12077 * Confirm that the first block of the request lies within the 12078 * partition limits. Also the requested number of bytes must be 12079 * a multiple of the system block size. 12080 */ 12081 if ((blocknum < 0) || (blocknum >= nblocks) || 12082 ((bp->b_bcount & (un->un_sys_blocksize - 1)) != 0)) { 12083 bp->b_flags |= B_ERROR; 12084 goto error_exit; 12085 } 12086 12087 /* 12088 * If the requsted # blocks exceeds the available # blocks, that 12089 * is an overrun of the partition. 12090 */ 12091 requested_nblocks = SD_BYTES2SYSBLOCKS(un, bp->b_bcount); 12092 available_nblocks = (size_t)(nblocks - blocknum); 12093 ASSERT(nblocks >= blocknum); 12094 12095 if (requested_nblocks > available_nblocks) { 12096 /* 12097 * Allocate an "overrun" buf to allow the request to proceed 12098 * for the amount of space available in the partition. The 12099 * amount not transferred will be added into the b_resid 12100 * when the operation is complete. The overrun buf 12101 * replaces the original buf here, and the original buf 12102 * is saved inside the overrun buf, for later use. 12103 */ 12104 size_t resid = SD_SYSBLOCKS2BYTES(un, 12105 (offset_t)(requested_nblocks - available_nblocks)); 12106 size_t count = bp->b_bcount - resid; 12107 /* 12108 * Note: count is an unsigned entity thus it'll NEVER 12109 * be less than 0 so ASSERT the original values are 12110 * correct. 12111 */ 12112 ASSERT(bp->b_bcount >= resid); 12113 12114 bp = sd_bioclone_alloc(bp, count, blocknum, 12115 (int (*)(struct buf *)) sd_mapblockaddr_iodone); 12116 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */ 12117 ASSERT(xp != NULL); 12118 } 12119 12120 /* At this point there should be no residual for this buf. */ 12121 ASSERT(bp->b_resid == 0); 12122 12123 /* Convert the block number to an absolute address. */ 12124 xp->xb_blkno += partition_offset; 12125 12126 SD_NEXT_IOSTART(index, un, bp); 12127 12128 SD_TRACE(SD_LOG_IO_PARTITION, un, 12129 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp); 12130 12131 return; 12132 12133 error_exit: 12134 bp->b_resid = bp->b_bcount; 12135 SD_BEGIN_IODONE(index, un, bp); 12136 SD_TRACE(SD_LOG_IO_PARTITION, un, 12137 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp); 12138 } 12139 12140 12141 /* 12142 * Function: sd_mapblockaddr_iodone 12143 * 12144 * Description: Completion-side processing for partition management. 12145 * 12146 * Context: May be called under interrupt context 12147 */ 12148 12149 static void 12150 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp) 12151 { 12152 /* int partition; */ /* Not used, see below. */ 12153 ASSERT(un != NULL); 12154 ASSERT(bp != NULL); 12155 ASSERT(!mutex_owned(SD_MUTEX(un))); 12156 12157 SD_TRACE(SD_LOG_IO_PARTITION, un, 12158 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp); 12159 12160 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) { 12161 /* 12162 * We have an "overrun" buf to deal with... 12163 */ 12164 struct sd_xbuf *xp; 12165 struct buf *obp; /* ptr to the original buf */ 12166 12167 xp = SD_GET_XBUF(bp); 12168 ASSERT(xp != NULL); 12169 12170 /* Retrieve the pointer to the original buf */ 12171 obp = (struct buf *)xp->xb_private; 12172 ASSERT(obp != NULL); 12173 12174 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid); 12175 bioerror(obp, bp->b_error); 12176 12177 sd_bioclone_free(bp); 12178 12179 /* 12180 * Get back the original buf. 12181 * Note that since the restoration of xb_blkno below 12182 * was removed, the sd_xbuf is not needed. 12183 */ 12184 bp = obp; 12185 /* 12186 * xp = SD_GET_XBUF(bp); 12187 * ASSERT(xp != NULL); 12188 */ 12189 } 12190 12191 /* 12192 * Convert sd->xb_blkno back to a minor-device relative value. 12193 * Note: this has been commented out, as it is not needed in the 12194 * current implementation of the driver (ie, since this function 12195 * is at the top of the layering chains, so the info will be 12196 * discarded) and it is in the "hot" IO path. 12197 * 12198 * partition = getminor(bp->b_edev) & SDPART_MASK; 12199 * xp->xb_blkno -= un->un_offset[partition]; 12200 */ 12201 12202 SD_NEXT_IODONE(index, un, bp); 12203 12204 SD_TRACE(SD_LOG_IO_PARTITION, un, 12205 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp); 12206 } 12207 12208 12209 /* 12210 * Function: sd_mapblocksize_iostart 12211 * 12212 * Description: Convert between system block size (un->un_sys_blocksize) 12213 * and target block size (un->un_tgt_blocksize). 12214 * 12215 * Context: Can sleep to allocate resources. 12216 * 12217 * Assumptions: A higher layer has already performed any partition validation, 12218 * and converted the xp->xb_blkno to an absolute value relative 12219 * to the start of the device. 12220 * 12221 * It is also assumed that the higher layer has implemented 12222 * an "overrun" mechanism for the case where the request would 12223 * read/write beyond the end of a partition. In this case we 12224 * assume (and ASSERT) that bp->b_resid == 0. 12225 * 12226 * Note: The implementation for this routine assumes the target 12227 * block size remains constant between allocation and transport. 12228 */ 12229 12230 static void 12231 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp) 12232 { 12233 struct sd_mapblocksize_info *bsp; 12234 struct sd_xbuf *xp; 12235 offset_t first_byte; 12236 daddr_t start_block, end_block; 12237 daddr_t request_bytes; 12238 ushort_t is_aligned = FALSE; 12239 12240 ASSERT(un != NULL); 12241 ASSERT(bp != NULL); 12242 ASSERT(!mutex_owned(SD_MUTEX(un))); 12243 ASSERT(bp->b_resid == 0); 12244 12245 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12246 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp); 12247 12248 /* 12249 * For a non-writable CD, a write request is an error 12250 */ 12251 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) && 12252 (un->un_f_mmc_writable_media == FALSE)) { 12253 bioerror(bp, EIO); 12254 bp->b_resid = bp->b_bcount; 12255 SD_BEGIN_IODONE(index, un, bp); 12256 return; 12257 } 12258 12259 /* 12260 * We do not need a shadow buf if the device is using 12261 * un->un_sys_blocksize as its block size or if bcount == 0. 12262 * In this case there is no layer-private data block allocated. 12263 */ 12264 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 12265 (bp->b_bcount == 0)) { 12266 goto done; 12267 } 12268 12269 #if defined(__i386) || defined(__amd64) 12270 /* We do not support non-block-aligned transfers for ROD devices */ 12271 ASSERT(!ISROD(un)); 12272 #endif 12273 12274 xp = SD_GET_XBUF(bp); 12275 ASSERT(xp != NULL); 12276 12277 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12278 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n", 12279 un->un_tgt_blocksize, un->un_sys_blocksize); 12280 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12281 "request start block:0x%x\n", xp->xb_blkno); 12282 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12283 "request len:0x%x\n", bp->b_bcount); 12284 12285 /* 12286 * Allocate the layer-private data area for the mapblocksize layer. 12287 * Layers are allowed to use the xp_private member of the sd_xbuf 12288 * struct to store the pointer to their layer-private data block, but 12289 * each layer also has the responsibility of restoring the prior 12290 * contents of xb_private before returning the buf/xbuf to the 12291 * higher layer that sent it. 12292 * 12293 * Here we save the prior contents of xp->xb_private into the 12294 * bsp->mbs_oprivate field of our layer-private data area. This value 12295 * is restored by sd_mapblocksize_iodone() just prior to freeing up 12296 * the layer-private area and returning the buf/xbuf to the layer 12297 * that sent it. 12298 * 12299 * Note that here we use kmem_zalloc for the allocation as there are 12300 * parts of the mapblocksize code that expect certain fields to be 12301 * zero unless explicitly set to a required value. 12302 */ 12303 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 12304 bsp->mbs_oprivate = xp->xb_private; 12305 xp->xb_private = bsp; 12306 12307 /* 12308 * This treats the data on the disk (target) as an array of bytes. 12309 * first_byte is the byte offset, from the beginning of the device, 12310 * to the location of the request. This is converted from a 12311 * un->un_sys_blocksize block address to a byte offset, and then back 12312 * to a block address based upon a un->un_tgt_blocksize block size. 12313 * 12314 * xp->xb_blkno should be absolute upon entry into this function, 12315 * but, but it is based upon partitions that use the "system" 12316 * block size. It must be adjusted to reflect the block size of 12317 * the target. 12318 * 12319 * Note that end_block is actually the block that follows the last 12320 * block of the request, but that's what is needed for the computation. 12321 */ 12322 first_byte = SD_SYSBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 12323 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize; 12324 end_block = (first_byte + bp->b_bcount + un->un_tgt_blocksize - 1) / 12325 un->un_tgt_blocksize; 12326 12327 /* request_bytes is rounded up to a multiple of the target block size */ 12328 request_bytes = (end_block - start_block) * un->un_tgt_blocksize; 12329 12330 /* 12331 * See if the starting address of the request and the request 12332 * length are aligned on a un->un_tgt_blocksize boundary. If aligned 12333 * then we do not need to allocate a shadow buf to handle the request. 12334 */ 12335 if (((first_byte % un->un_tgt_blocksize) == 0) && 12336 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) { 12337 is_aligned = TRUE; 12338 } 12339 12340 if ((bp->b_flags & B_READ) == 0) { 12341 /* 12342 * Lock the range for a write operation. An aligned request is 12343 * considered a simple write; otherwise the request must be a 12344 * read-modify-write. 12345 */ 12346 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1, 12347 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW); 12348 } 12349 12350 /* 12351 * Alloc a shadow buf if the request is not aligned. Also, this is 12352 * where the READ command is generated for a read-modify-write. (The 12353 * write phase is deferred until after the read completes.) 12354 */ 12355 if (is_aligned == FALSE) { 12356 12357 struct sd_mapblocksize_info *shadow_bsp; 12358 struct sd_xbuf *shadow_xp; 12359 struct buf *shadow_bp; 12360 12361 /* 12362 * Allocate the shadow buf and it associated xbuf. Note that 12363 * after this call the xb_blkno value in both the original 12364 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the 12365 * same: absolute relative to the start of the device, and 12366 * adjusted for the target block size. The b_blkno in the 12367 * shadow buf will also be set to this value. We should never 12368 * change b_blkno in the original bp however. 12369 * 12370 * Note also that the shadow buf will always need to be a 12371 * READ command, regardless of whether the incoming command 12372 * is a READ or a WRITE. 12373 */ 12374 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ, 12375 xp->xb_blkno, 12376 (int (*)(struct buf *)) sd_mapblocksize_iodone); 12377 12378 shadow_xp = SD_GET_XBUF(shadow_bp); 12379 12380 /* 12381 * Allocate the layer-private data for the shadow buf. 12382 * (No need to preserve xb_private in the shadow xbuf.) 12383 */ 12384 shadow_xp->xb_private = shadow_bsp = 12385 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 12386 12387 /* 12388 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone 12389 * to figure out where the start of the user data is (based upon 12390 * the system block size) in the data returned by the READ 12391 * command (which will be based upon the target blocksize). Note 12392 * that this is only really used if the request is unaligned. 12393 */ 12394 bsp->mbs_copy_offset = (ssize_t)(first_byte - 12395 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize)); 12396 ASSERT((bsp->mbs_copy_offset >= 0) && 12397 (bsp->mbs_copy_offset < un->un_tgt_blocksize)); 12398 12399 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset; 12400 12401 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index; 12402 12403 /* Transfer the wmap (if any) to the shadow buf */ 12404 shadow_bsp->mbs_wmp = bsp->mbs_wmp; 12405 bsp->mbs_wmp = NULL; 12406 12407 /* 12408 * The shadow buf goes on from here in place of the 12409 * original buf. 12410 */ 12411 shadow_bsp->mbs_orig_bp = bp; 12412 bp = shadow_bp; 12413 } 12414 12415 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12416 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno); 12417 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12418 "sd_mapblocksize_iostart: tgt request len:0x%x\n", 12419 request_bytes); 12420 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12421 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp); 12422 12423 done: 12424 SD_NEXT_IOSTART(index, un, bp); 12425 12426 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12427 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp); 12428 } 12429 12430 12431 /* 12432 * Function: sd_mapblocksize_iodone 12433 * 12434 * Description: Completion side processing for block-size mapping. 12435 * 12436 * Context: May be called under interrupt context 12437 */ 12438 12439 static void 12440 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp) 12441 { 12442 struct sd_mapblocksize_info *bsp; 12443 struct sd_xbuf *xp; 12444 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */ 12445 struct buf *orig_bp; /* ptr to the original buf */ 12446 offset_t shadow_end; 12447 offset_t request_end; 12448 offset_t shadow_start; 12449 ssize_t copy_offset; 12450 size_t copy_length; 12451 size_t shortfall; 12452 uint_t is_write; /* TRUE if this bp is a WRITE */ 12453 uint_t has_wmap; /* TRUE is this bp has a wmap */ 12454 12455 ASSERT(un != NULL); 12456 ASSERT(bp != NULL); 12457 12458 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12459 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp); 12460 12461 /* 12462 * There is no shadow buf or layer-private data if the target is 12463 * using un->un_sys_blocksize as its block size or if bcount == 0. 12464 */ 12465 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 12466 (bp->b_bcount == 0)) { 12467 goto exit; 12468 } 12469 12470 xp = SD_GET_XBUF(bp); 12471 ASSERT(xp != NULL); 12472 12473 /* Retrieve the pointer to the layer-private data area from the xbuf. */ 12474 bsp = xp->xb_private; 12475 12476 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE; 12477 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE; 12478 12479 if (is_write) { 12480 /* 12481 * For a WRITE request we must free up the block range that 12482 * we have locked up. This holds regardless of whether this is 12483 * an aligned write request or a read-modify-write request. 12484 */ 12485 sd_range_unlock(un, bsp->mbs_wmp); 12486 bsp->mbs_wmp = NULL; 12487 } 12488 12489 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) { 12490 /* 12491 * An aligned read or write command will have no shadow buf; 12492 * there is not much else to do with it. 12493 */ 12494 goto done; 12495 } 12496 12497 orig_bp = bsp->mbs_orig_bp; 12498 ASSERT(orig_bp != NULL); 12499 orig_xp = SD_GET_XBUF(orig_bp); 12500 ASSERT(orig_xp != NULL); 12501 ASSERT(!mutex_owned(SD_MUTEX(un))); 12502 12503 if (!is_write && has_wmap) { 12504 /* 12505 * A READ with a wmap means this is the READ phase of a 12506 * read-modify-write. If an error occurred on the READ then 12507 * we do not proceed with the WRITE phase or copy any data. 12508 * Just release the write maps and return with an error. 12509 */ 12510 if ((bp->b_resid != 0) || (bp->b_error != 0)) { 12511 orig_bp->b_resid = orig_bp->b_bcount; 12512 bioerror(orig_bp, bp->b_error); 12513 sd_range_unlock(un, bsp->mbs_wmp); 12514 goto freebuf_done; 12515 } 12516 } 12517 12518 /* 12519 * Here is where we set up to copy the data from the shadow buf 12520 * into the space associated with the original buf. 12521 * 12522 * To deal with the conversion between block sizes, these 12523 * computations treat the data as an array of bytes, with the 12524 * first byte (byte 0) corresponding to the first byte in the 12525 * first block on the disk. 12526 */ 12527 12528 /* 12529 * shadow_start and shadow_len indicate the location and size of 12530 * the data returned with the shadow IO request. 12531 */ 12532 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 12533 shadow_end = shadow_start + bp->b_bcount - bp->b_resid; 12534 12535 /* 12536 * copy_offset gives the offset (in bytes) from the start of the first 12537 * block of the READ request to the beginning of the data. We retrieve 12538 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved 12539 * there by sd_mapblockize_iostart(). copy_length gives the amount of 12540 * data to be copied (in bytes). 12541 */ 12542 copy_offset = bsp->mbs_copy_offset; 12543 ASSERT((copy_offset >= 0) && (copy_offset < un->un_tgt_blocksize)); 12544 copy_length = orig_bp->b_bcount; 12545 request_end = shadow_start + copy_offset + orig_bp->b_bcount; 12546 12547 /* 12548 * Set up the resid and error fields of orig_bp as appropriate. 12549 */ 12550 if (shadow_end >= request_end) { 12551 /* We got all the requested data; set resid to zero */ 12552 orig_bp->b_resid = 0; 12553 } else { 12554 /* 12555 * We failed to get enough data to fully satisfy the original 12556 * request. Just copy back whatever data we got and set 12557 * up the residual and error code as required. 12558 * 12559 * 'shortfall' is the amount by which the data received with the 12560 * shadow buf has "fallen short" of the requested amount. 12561 */ 12562 shortfall = (size_t)(request_end - shadow_end); 12563 12564 if (shortfall > orig_bp->b_bcount) { 12565 /* 12566 * We did not get enough data to even partially 12567 * fulfill the original request. The residual is 12568 * equal to the amount requested. 12569 */ 12570 orig_bp->b_resid = orig_bp->b_bcount; 12571 } else { 12572 /* 12573 * We did not get all the data that we requested 12574 * from the device, but we will try to return what 12575 * portion we did get. 12576 */ 12577 orig_bp->b_resid = shortfall; 12578 } 12579 ASSERT(copy_length >= orig_bp->b_resid); 12580 copy_length -= orig_bp->b_resid; 12581 } 12582 12583 /* Propagate the error code from the shadow buf to the original buf */ 12584 bioerror(orig_bp, bp->b_error); 12585 12586 if (is_write) { 12587 goto freebuf_done; /* No data copying for a WRITE */ 12588 } 12589 12590 if (has_wmap) { 12591 /* 12592 * This is a READ command from the READ phase of a 12593 * read-modify-write request. We have to copy the data given 12594 * by the user OVER the data returned by the READ command, 12595 * then convert the command from a READ to a WRITE and send 12596 * it back to the target. 12597 */ 12598 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset, 12599 copy_length); 12600 12601 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */ 12602 12603 /* 12604 * Dispatch the WRITE command to the taskq thread, which 12605 * will in turn send the command to the target. When the 12606 * WRITE command completes, we (sd_mapblocksize_iodone()) 12607 * will get called again as part of the iodone chain 12608 * processing for it. Note that we will still be dealing 12609 * with the shadow buf at that point. 12610 */ 12611 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp, 12612 KM_NOSLEEP) != 0) { 12613 /* 12614 * Dispatch was successful so we are done. Return 12615 * without going any higher up the iodone chain. Do 12616 * not free up any layer-private data until after the 12617 * WRITE completes. 12618 */ 12619 return; 12620 } 12621 12622 /* 12623 * Dispatch of the WRITE command failed; set up the error 12624 * condition and send this IO back up the iodone chain. 12625 */ 12626 bioerror(orig_bp, EIO); 12627 orig_bp->b_resid = orig_bp->b_bcount; 12628 12629 } else { 12630 /* 12631 * This is a regular READ request (ie, not a RMW). Copy the 12632 * data from the shadow buf into the original buf. The 12633 * copy_offset compensates for any "misalignment" between the 12634 * shadow buf (with its un->un_tgt_blocksize blocks) and the 12635 * original buf (with its un->un_sys_blocksize blocks). 12636 */ 12637 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr, 12638 copy_length); 12639 } 12640 12641 freebuf_done: 12642 12643 /* 12644 * At this point we still have both the shadow buf AND the original 12645 * buf to deal with, as well as the layer-private data area in each. 12646 * Local variables are as follows: 12647 * 12648 * bp -- points to shadow buf 12649 * xp -- points to xbuf of shadow buf 12650 * bsp -- points to layer-private data area of shadow buf 12651 * orig_bp -- points to original buf 12652 * 12653 * First free the shadow buf and its associated xbuf, then free the 12654 * layer-private data area from the shadow buf. There is no need to 12655 * restore xb_private in the shadow xbuf. 12656 */ 12657 sd_shadow_buf_free(bp); 12658 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 12659 12660 /* 12661 * Now update the local variables to point to the original buf, xbuf, 12662 * and layer-private area. 12663 */ 12664 bp = orig_bp; 12665 xp = SD_GET_XBUF(bp); 12666 ASSERT(xp != NULL); 12667 ASSERT(xp == orig_xp); 12668 bsp = xp->xb_private; 12669 ASSERT(bsp != NULL); 12670 12671 done: 12672 /* 12673 * Restore xb_private to whatever it was set to by the next higher 12674 * layer in the chain, then free the layer-private data area. 12675 */ 12676 xp->xb_private = bsp->mbs_oprivate; 12677 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 12678 12679 exit: 12680 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp), 12681 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp); 12682 12683 SD_NEXT_IODONE(index, un, bp); 12684 } 12685 12686 12687 /* 12688 * Function: sd_checksum_iostart 12689 * 12690 * Description: A stub function for a layer that's currently not used. 12691 * For now just a placeholder. 12692 * 12693 * Context: Kernel thread context 12694 */ 12695 12696 static void 12697 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp) 12698 { 12699 ASSERT(un != NULL); 12700 ASSERT(bp != NULL); 12701 ASSERT(!mutex_owned(SD_MUTEX(un))); 12702 SD_NEXT_IOSTART(index, un, bp); 12703 } 12704 12705 12706 /* 12707 * Function: sd_checksum_iodone 12708 * 12709 * Description: A stub function for a layer that's currently not used. 12710 * For now just a placeholder. 12711 * 12712 * Context: May be called under interrupt context 12713 */ 12714 12715 static void 12716 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp) 12717 { 12718 ASSERT(un != NULL); 12719 ASSERT(bp != NULL); 12720 ASSERT(!mutex_owned(SD_MUTEX(un))); 12721 SD_NEXT_IODONE(index, un, bp); 12722 } 12723 12724 12725 /* 12726 * Function: sd_checksum_uscsi_iostart 12727 * 12728 * Description: A stub function for a layer that's currently not used. 12729 * For now just a placeholder. 12730 * 12731 * Context: Kernel thread context 12732 */ 12733 12734 static void 12735 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp) 12736 { 12737 ASSERT(un != NULL); 12738 ASSERT(bp != NULL); 12739 ASSERT(!mutex_owned(SD_MUTEX(un))); 12740 SD_NEXT_IOSTART(index, un, bp); 12741 } 12742 12743 12744 /* 12745 * Function: sd_checksum_uscsi_iodone 12746 * 12747 * Description: A stub function for a layer that's currently not used. 12748 * For now just a placeholder. 12749 * 12750 * Context: May be called under interrupt context 12751 */ 12752 12753 static void 12754 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 12755 { 12756 ASSERT(un != NULL); 12757 ASSERT(bp != NULL); 12758 ASSERT(!mutex_owned(SD_MUTEX(un))); 12759 SD_NEXT_IODONE(index, un, bp); 12760 } 12761 12762 12763 /* 12764 * Function: sd_pm_iostart 12765 * 12766 * Description: iostart-side routine for Power mangement. 12767 * 12768 * Context: Kernel thread context 12769 */ 12770 12771 static void 12772 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp) 12773 { 12774 ASSERT(un != NULL); 12775 ASSERT(bp != NULL); 12776 ASSERT(!mutex_owned(SD_MUTEX(un))); 12777 ASSERT(!mutex_owned(&un->un_pm_mutex)); 12778 12779 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n"); 12780 12781 if (sd_pm_entry(un) != DDI_SUCCESS) { 12782 /* 12783 * Set up to return the failed buf back up the 'iodone' 12784 * side of the calling chain. 12785 */ 12786 bioerror(bp, EIO); 12787 bp->b_resid = bp->b_bcount; 12788 12789 SD_BEGIN_IODONE(index, un, bp); 12790 12791 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 12792 return; 12793 } 12794 12795 SD_NEXT_IOSTART(index, un, bp); 12796 12797 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 12798 } 12799 12800 12801 /* 12802 * Function: sd_pm_iodone 12803 * 12804 * Description: iodone-side routine for power mangement. 12805 * 12806 * Context: may be called from interrupt context 12807 */ 12808 12809 static void 12810 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp) 12811 { 12812 ASSERT(un != NULL); 12813 ASSERT(bp != NULL); 12814 ASSERT(!mutex_owned(&un->un_pm_mutex)); 12815 12816 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n"); 12817 12818 /* 12819 * After attach the following flag is only read, so don't 12820 * take the penalty of acquiring a mutex for it. 12821 */ 12822 if (un->un_f_pm_is_enabled == TRUE) { 12823 sd_pm_exit(un); 12824 } 12825 12826 SD_NEXT_IODONE(index, un, bp); 12827 12828 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n"); 12829 } 12830 12831 12832 /* 12833 * Function: sd_core_iostart 12834 * 12835 * Description: Primary driver function for enqueuing buf(9S) structs from 12836 * the system and initiating IO to the target device 12837 * 12838 * Context: Kernel thread context. Can sleep. 12839 * 12840 * Assumptions: - The given xp->xb_blkno is absolute 12841 * (ie, relative to the start of the device). 12842 * - The IO is to be done using the native blocksize of 12843 * the device, as specified in un->un_tgt_blocksize. 12844 */ 12845 /* ARGSUSED */ 12846 static void 12847 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp) 12848 { 12849 struct sd_xbuf *xp; 12850 12851 ASSERT(un != NULL); 12852 ASSERT(bp != NULL); 12853 ASSERT(!mutex_owned(SD_MUTEX(un))); 12854 ASSERT(bp->b_resid == 0); 12855 12856 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp); 12857 12858 xp = SD_GET_XBUF(bp); 12859 ASSERT(xp != NULL); 12860 12861 mutex_enter(SD_MUTEX(un)); 12862 12863 /* 12864 * If we are currently in the failfast state, fail any new IO 12865 * that has B_FAILFAST set, then return. 12866 */ 12867 if ((bp->b_flags & B_FAILFAST) && 12868 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) { 12869 mutex_exit(SD_MUTEX(un)); 12870 bioerror(bp, EIO); 12871 bp->b_resid = bp->b_bcount; 12872 SD_BEGIN_IODONE(index, un, bp); 12873 return; 12874 } 12875 12876 if (SD_IS_DIRECT_PRIORITY(xp)) { 12877 /* 12878 * Priority command -- transport it immediately. 12879 * 12880 * Note: We may want to assert that USCSI_DIAGNOSE is set, 12881 * because all direct priority commands should be associated 12882 * with error recovery actions which we don't want to retry. 12883 */ 12884 sd_start_cmds(un, bp); 12885 } else { 12886 /* 12887 * Normal command -- add it to the wait queue, then start 12888 * transporting commands from the wait queue. 12889 */ 12890 sd_add_buf_to_waitq(un, bp); 12891 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 12892 sd_start_cmds(un, NULL); 12893 } 12894 12895 mutex_exit(SD_MUTEX(un)); 12896 12897 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp); 12898 } 12899 12900 12901 /* 12902 * Function: sd_init_cdb_limits 12903 * 12904 * Description: This is to handle scsi_pkt initialization differences 12905 * between the driver platforms. 12906 * 12907 * Legacy behaviors: 12908 * 12909 * If the block number or the sector count exceeds the 12910 * capabilities of a Group 0 command, shift over to a 12911 * Group 1 command. We don't blindly use Group 1 12912 * commands because a) some drives (CDC Wren IVs) get a 12913 * bit confused, and b) there is probably a fair amount 12914 * of speed difference for a target to receive and decode 12915 * a 10 byte command instead of a 6 byte command. 12916 * 12917 * The xfer time difference of 6 vs 10 byte CDBs is 12918 * still significant so this code is still worthwhile. 12919 * 10 byte CDBs are very inefficient with the fas HBA driver 12920 * and older disks. Each CDB byte took 1 usec with some 12921 * popular disks. 12922 * 12923 * Context: Must be called at attach time 12924 */ 12925 12926 static void 12927 sd_init_cdb_limits(struct sd_lun *un) 12928 { 12929 int hba_cdb_limit; 12930 12931 /* 12932 * Use CDB_GROUP1 commands for most devices except for 12933 * parallel SCSI fixed drives in which case we get better 12934 * performance using CDB_GROUP0 commands (where applicable). 12935 */ 12936 un->un_mincdb = SD_CDB_GROUP1; 12937 #if !defined(__fibre) 12938 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) && 12939 !un->un_f_has_removable_media) { 12940 un->un_mincdb = SD_CDB_GROUP0; 12941 } 12942 #endif 12943 12944 /* 12945 * Try to read the max-cdb-length supported by HBA. 12946 */ 12947 un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1); 12948 if (0 >= un->un_max_hba_cdb) { 12949 un->un_max_hba_cdb = CDB_GROUP4; 12950 hba_cdb_limit = SD_CDB_GROUP4; 12951 } else if (0 < un->un_max_hba_cdb && 12952 un->un_max_hba_cdb < CDB_GROUP1) { 12953 hba_cdb_limit = SD_CDB_GROUP0; 12954 } else if (CDB_GROUP1 <= un->un_max_hba_cdb && 12955 un->un_max_hba_cdb < CDB_GROUP5) { 12956 hba_cdb_limit = SD_CDB_GROUP1; 12957 } else if (CDB_GROUP5 <= un->un_max_hba_cdb && 12958 un->un_max_hba_cdb < CDB_GROUP4) { 12959 hba_cdb_limit = SD_CDB_GROUP5; 12960 } else { 12961 hba_cdb_limit = SD_CDB_GROUP4; 12962 } 12963 12964 /* 12965 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4 12966 * commands for fixed disks unless we are building for a 32 bit 12967 * kernel. 12968 */ 12969 #ifdef _LP64 12970 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 12971 min(hba_cdb_limit, SD_CDB_GROUP4); 12972 #else 12973 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 : 12974 min(hba_cdb_limit, SD_CDB_GROUP1); 12975 #endif 12976 12977 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) 12978 ? sizeof (struct scsi_arq_status) : 1); 12979 un->un_cmd_timeout = (ushort_t)sd_io_time; 12980 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; 12981 } 12982 12983 12984 /* 12985 * Function: sd_initpkt_for_buf 12986 * 12987 * Description: Allocate and initialize for transport a scsi_pkt struct, 12988 * based upon the info specified in the given buf struct. 12989 * 12990 * Assumes the xb_blkno in the request is absolute (ie, 12991 * relative to the start of the device (NOT partition!). 12992 * Also assumes that the request is using the native block 12993 * size of the device (as returned by the READ CAPACITY 12994 * command). 12995 * 12996 * Return Code: SD_PKT_ALLOC_SUCCESS 12997 * SD_PKT_ALLOC_FAILURE 12998 * SD_PKT_ALLOC_FAILURE_NO_DMA 12999 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13000 * 13001 * Context: Kernel thread and may be called from software interrupt context 13002 * as part of a sdrunout callback. This function may not block or 13003 * call routines that block 13004 */ 13005 13006 static int 13007 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp) 13008 { 13009 struct sd_xbuf *xp; 13010 struct scsi_pkt *pktp = NULL; 13011 struct sd_lun *un; 13012 size_t blockcount; 13013 daddr_t startblock; 13014 int rval; 13015 int cmd_flags; 13016 13017 ASSERT(bp != NULL); 13018 ASSERT(pktpp != NULL); 13019 xp = SD_GET_XBUF(bp); 13020 ASSERT(xp != NULL); 13021 un = SD_GET_UN(bp); 13022 ASSERT(un != NULL); 13023 ASSERT(mutex_owned(SD_MUTEX(un))); 13024 ASSERT(bp->b_resid == 0); 13025 13026 SD_TRACE(SD_LOG_IO_CORE, un, 13027 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp); 13028 13029 mutex_exit(SD_MUTEX(un)); 13030 13031 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13032 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) { 13033 /* 13034 * Already have a scsi_pkt -- just need DMA resources. 13035 * We must recompute the CDB in case the mapping returns 13036 * a nonzero pkt_resid. 13037 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer 13038 * that is being retried, the unmap/remap of the DMA resouces 13039 * will result in the entire transfer starting over again 13040 * from the very first block. 13041 */ 13042 ASSERT(xp->xb_pktp != NULL); 13043 pktp = xp->xb_pktp; 13044 } else { 13045 pktp = NULL; 13046 } 13047 #endif /* __i386 || __amd64 */ 13048 13049 startblock = xp->xb_blkno; /* Absolute block num. */ 13050 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 13051 13052 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK); 13053 13054 /* 13055 * sd_setup_rw_pkt will determine the appropriate CDB group to use, 13056 * call scsi_init_pkt, and build the CDB. 13057 */ 13058 rval = sd_setup_rw_pkt(un, &pktp, bp, 13059 cmd_flags, sdrunout, (caddr_t)un, 13060 startblock, blockcount); 13061 13062 if (rval == 0) { 13063 /* 13064 * Success. 13065 * 13066 * If partial DMA is being used and required for this transfer. 13067 * set it up here. 13068 */ 13069 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 && 13070 (pktp->pkt_resid != 0)) { 13071 13072 /* 13073 * Save the CDB length and pkt_resid for the 13074 * next xfer 13075 */ 13076 xp->xb_dma_resid = pktp->pkt_resid; 13077 13078 /* rezero resid */ 13079 pktp->pkt_resid = 0; 13080 13081 } else { 13082 xp->xb_dma_resid = 0; 13083 } 13084 13085 pktp->pkt_flags = un->un_tagflags; 13086 pktp->pkt_time = un->un_cmd_timeout; 13087 pktp->pkt_comp = sdintr; 13088 13089 pktp->pkt_private = bp; 13090 *pktpp = pktp; 13091 13092 SD_TRACE(SD_LOG_IO_CORE, un, 13093 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp); 13094 13095 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13096 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED; 13097 #endif 13098 13099 mutex_enter(SD_MUTEX(un)); 13100 return (SD_PKT_ALLOC_SUCCESS); 13101 13102 } 13103 13104 /* 13105 * SD_PKT_ALLOC_FAILURE is the only expected failure code 13106 * from sd_setup_rw_pkt. 13107 */ 13108 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 13109 13110 if (rval == SD_PKT_ALLOC_FAILURE) { 13111 *pktpp = NULL; 13112 /* 13113 * Set the driver state to RWAIT to indicate the driver 13114 * is waiting on resource allocations. The driver will not 13115 * suspend, pm_suspend, or detatch while the state is RWAIT. 13116 */ 13117 mutex_enter(SD_MUTEX(un)); 13118 New_state(un, SD_STATE_RWAIT); 13119 13120 SD_ERROR(SD_LOG_IO_CORE, un, 13121 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp); 13122 13123 if ((bp->b_flags & B_ERROR) != 0) { 13124 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 13125 } 13126 return (SD_PKT_ALLOC_FAILURE); 13127 } else { 13128 /* 13129 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13130 * 13131 * This should never happen. Maybe someone messed with the 13132 * kernel's minphys? 13133 */ 13134 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13135 "Request rejected: too large for CDB: " 13136 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount); 13137 SD_ERROR(SD_LOG_IO_CORE, un, 13138 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp); 13139 mutex_enter(SD_MUTEX(un)); 13140 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13141 13142 } 13143 } 13144 13145 13146 /* 13147 * Function: sd_destroypkt_for_buf 13148 * 13149 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing). 13150 * 13151 * Context: Kernel thread or interrupt context 13152 */ 13153 13154 static void 13155 sd_destroypkt_for_buf(struct buf *bp) 13156 { 13157 ASSERT(bp != NULL); 13158 ASSERT(SD_GET_UN(bp) != NULL); 13159 13160 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13161 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp); 13162 13163 ASSERT(SD_GET_PKTP(bp) != NULL); 13164 scsi_destroy_pkt(SD_GET_PKTP(bp)); 13165 13166 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13167 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp); 13168 } 13169 13170 /* 13171 * Function: sd_setup_rw_pkt 13172 * 13173 * Description: Determines appropriate CDB group for the requested LBA 13174 * and transfer length, calls scsi_init_pkt, and builds 13175 * the CDB. Do not use for partial DMA transfers except 13176 * for the initial transfer since the CDB size must 13177 * remain constant. 13178 * 13179 * Context: Kernel thread and may be called from software interrupt 13180 * context as part of a sdrunout callback. This function may not 13181 * block or call routines that block 13182 */ 13183 13184 13185 int 13186 sd_setup_rw_pkt(struct sd_lun *un, 13187 struct scsi_pkt **pktpp, struct buf *bp, int flags, 13188 int (*callback)(caddr_t), caddr_t callback_arg, 13189 diskaddr_t lba, uint32_t blockcount) 13190 { 13191 struct scsi_pkt *return_pktp; 13192 union scsi_cdb *cdbp; 13193 struct sd_cdbinfo *cp = NULL; 13194 int i; 13195 13196 /* 13197 * See which size CDB to use, based upon the request. 13198 */ 13199 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) { 13200 13201 /* 13202 * Check lba and block count against sd_cdbtab limits. 13203 * In the partial DMA case, we have to use the same size 13204 * CDB for all the transfers. Check lba + blockcount 13205 * against the max LBA so we know that segment of the 13206 * transfer can use the CDB we select. 13207 */ 13208 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) && 13209 (blockcount <= sd_cdbtab[i].sc_maxlen)) { 13210 13211 /* 13212 * The command will fit into the CDB type 13213 * specified by sd_cdbtab[i]. 13214 */ 13215 cp = sd_cdbtab + i; 13216 13217 /* 13218 * Call scsi_init_pkt so we can fill in the 13219 * CDB. 13220 */ 13221 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp, 13222 bp, cp->sc_grpcode, un->un_status_len, 0, 13223 flags, callback, callback_arg); 13224 13225 if (return_pktp != NULL) { 13226 13227 /* 13228 * Return new value of pkt 13229 */ 13230 *pktpp = return_pktp; 13231 13232 /* 13233 * To be safe, zero the CDB insuring there is 13234 * no leftover data from a previous command. 13235 */ 13236 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode); 13237 13238 /* 13239 * Handle partial DMA mapping 13240 */ 13241 if (return_pktp->pkt_resid != 0) { 13242 13243 /* 13244 * Not going to xfer as many blocks as 13245 * originally expected 13246 */ 13247 blockcount -= 13248 SD_BYTES2TGTBLOCKS(un, 13249 return_pktp->pkt_resid); 13250 } 13251 13252 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp; 13253 13254 /* 13255 * Set command byte based on the CDB 13256 * type we matched. 13257 */ 13258 cdbp->scc_cmd = cp->sc_grpmask | 13259 ((bp->b_flags & B_READ) ? 13260 SCMD_READ : SCMD_WRITE); 13261 13262 SD_FILL_SCSI1_LUN(un, return_pktp); 13263 13264 /* 13265 * Fill in LBA and length 13266 */ 13267 ASSERT((cp->sc_grpcode == CDB_GROUP1) || 13268 (cp->sc_grpcode == CDB_GROUP4) || 13269 (cp->sc_grpcode == CDB_GROUP0) || 13270 (cp->sc_grpcode == CDB_GROUP5)); 13271 13272 if (cp->sc_grpcode == CDB_GROUP1) { 13273 FORMG1ADDR(cdbp, lba); 13274 FORMG1COUNT(cdbp, blockcount); 13275 return (0); 13276 } else if (cp->sc_grpcode == CDB_GROUP4) { 13277 FORMG4LONGADDR(cdbp, lba); 13278 FORMG4COUNT(cdbp, blockcount); 13279 return (0); 13280 } else if (cp->sc_grpcode == CDB_GROUP0) { 13281 FORMG0ADDR(cdbp, lba); 13282 FORMG0COUNT(cdbp, blockcount); 13283 return (0); 13284 } else if (cp->sc_grpcode == CDB_GROUP5) { 13285 FORMG5ADDR(cdbp, lba); 13286 FORMG5COUNT(cdbp, blockcount); 13287 return (0); 13288 } 13289 13290 /* 13291 * It should be impossible to not match one 13292 * of the CDB types above, so we should never 13293 * reach this point. Set the CDB command byte 13294 * to test-unit-ready to avoid writing 13295 * to somewhere we don't intend. 13296 */ 13297 cdbp->scc_cmd = SCMD_TEST_UNIT_READY; 13298 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13299 } else { 13300 /* 13301 * Couldn't get scsi_pkt 13302 */ 13303 return (SD_PKT_ALLOC_FAILURE); 13304 } 13305 } 13306 } 13307 13308 /* 13309 * None of the available CDB types were suitable. This really 13310 * should never happen: on a 64 bit system we support 13311 * READ16/WRITE16 which will hold an entire 64 bit disk address 13312 * and on a 32 bit system we will refuse to bind to a device 13313 * larger than 2TB so addresses will never be larger than 32 bits. 13314 */ 13315 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13316 } 13317 13318 /* 13319 * Function: sd_setup_next_rw_pkt 13320 * 13321 * Description: Setup packet for partial DMA transfers, except for the 13322 * initial transfer. sd_setup_rw_pkt should be used for 13323 * the initial transfer. 13324 * 13325 * Context: Kernel thread and may be called from interrupt context. 13326 */ 13327 13328 int 13329 sd_setup_next_rw_pkt(struct sd_lun *un, 13330 struct scsi_pkt *pktp, struct buf *bp, 13331 diskaddr_t lba, uint32_t blockcount) 13332 { 13333 uchar_t com; 13334 union scsi_cdb *cdbp; 13335 uchar_t cdb_group_id; 13336 13337 ASSERT(pktp != NULL); 13338 ASSERT(pktp->pkt_cdbp != NULL); 13339 13340 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 13341 com = cdbp->scc_cmd; 13342 cdb_group_id = CDB_GROUPID(com); 13343 13344 ASSERT((cdb_group_id == CDB_GROUPID_0) || 13345 (cdb_group_id == CDB_GROUPID_1) || 13346 (cdb_group_id == CDB_GROUPID_4) || 13347 (cdb_group_id == CDB_GROUPID_5)); 13348 13349 /* 13350 * Move pkt to the next portion of the xfer. 13351 * func is NULL_FUNC so we do not have to release 13352 * the disk mutex here. 13353 */ 13354 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0, 13355 NULL_FUNC, NULL) == pktp) { 13356 /* Success. Handle partial DMA */ 13357 if (pktp->pkt_resid != 0) { 13358 blockcount -= 13359 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid); 13360 } 13361 13362 cdbp->scc_cmd = com; 13363 SD_FILL_SCSI1_LUN(un, pktp); 13364 if (cdb_group_id == CDB_GROUPID_1) { 13365 FORMG1ADDR(cdbp, lba); 13366 FORMG1COUNT(cdbp, blockcount); 13367 return (0); 13368 } else if (cdb_group_id == CDB_GROUPID_4) { 13369 FORMG4LONGADDR(cdbp, lba); 13370 FORMG4COUNT(cdbp, blockcount); 13371 return (0); 13372 } else if (cdb_group_id == CDB_GROUPID_0) { 13373 FORMG0ADDR(cdbp, lba); 13374 FORMG0COUNT(cdbp, blockcount); 13375 return (0); 13376 } else if (cdb_group_id == CDB_GROUPID_5) { 13377 FORMG5ADDR(cdbp, lba); 13378 FORMG5COUNT(cdbp, blockcount); 13379 return (0); 13380 } 13381 13382 /* Unreachable */ 13383 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13384 } 13385 13386 /* 13387 * Error setting up next portion of cmd transfer. 13388 * Something is definitely very wrong and this 13389 * should not happen. 13390 */ 13391 return (SD_PKT_ALLOC_FAILURE); 13392 } 13393 13394 /* 13395 * Function: sd_initpkt_for_uscsi 13396 * 13397 * Description: Allocate and initialize for transport a scsi_pkt struct, 13398 * based upon the info specified in the given uscsi_cmd struct. 13399 * 13400 * Return Code: SD_PKT_ALLOC_SUCCESS 13401 * SD_PKT_ALLOC_FAILURE 13402 * SD_PKT_ALLOC_FAILURE_NO_DMA 13403 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13404 * 13405 * Context: Kernel thread and may be called from software interrupt context 13406 * as part of a sdrunout callback. This function may not block or 13407 * call routines that block 13408 */ 13409 13410 static int 13411 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp) 13412 { 13413 struct uscsi_cmd *uscmd; 13414 struct sd_xbuf *xp; 13415 struct scsi_pkt *pktp; 13416 struct sd_lun *un; 13417 uint32_t flags = 0; 13418 13419 ASSERT(bp != NULL); 13420 ASSERT(pktpp != NULL); 13421 xp = SD_GET_XBUF(bp); 13422 ASSERT(xp != NULL); 13423 un = SD_GET_UN(bp); 13424 ASSERT(un != NULL); 13425 ASSERT(mutex_owned(SD_MUTEX(un))); 13426 13427 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 13428 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 13429 ASSERT(uscmd != NULL); 13430 13431 SD_TRACE(SD_LOG_IO_CORE, un, 13432 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp); 13433 13434 /* 13435 * Allocate the scsi_pkt for the command. 13436 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path 13437 * during scsi_init_pkt time and will continue to use the 13438 * same path as long as the same scsi_pkt is used without 13439 * intervening scsi_dma_free(). Since uscsi command does 13440 * not call scsi_dmafree() before retry failed command, it 13441 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT 13442 * set such that scsi_vhci can use other available path for 13443 * retry. Besides, ucsci command does not allow DMA breakup, 13444 * so there is no need to set PKT_DMA_PARTIAL flag. 13445 */ 13446 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 13447 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 13448 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 13449 ((int)(uscmd->uscsi_rqlen) + sizeof (struct scsi_arq_status) 13450 - sizeof (struct scsi_extended_sense)), 0, 13451 (un->un_pkt_flags & ~PKT_DMA_PARTIAL) | PKT_XARQ, 13452 sdrunout, (caddr_t)un); 13453 } else { 13454 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 13455 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 13456 sizeof (struct scsi_arq_status), 0, 13457 (un->un_pkt_flags & ~PKT_DMA_PARTIAL), 13458 sdrunout, (caddr_t)un); 13459 } 13460 13461 if (pktp == NULL) { 13462 *pktpp = NULL; 13463 /* 13464 * Set the driver state to RWAIT to indicate the driver 13465 * is waiting on resource allocations. The driver will not 13466 * suspend, pm_suspend, or detatch while the state is RWAIT. 13467 */ 13468 New_state(un, SD_STATE_RWAIT); 13469 13470 SD_ERROR(SD_LOG_IO_CORE, un, 13471 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp); 13472 13473 if ((bp->b_flags & B_ERROR) != 0) { 13474 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 13475 } 13476 return (SD_PKT_ALLOC_FAILURE); 13477 } 13478 13479 /* 13480 * We do not do DMA breakup for USCSI commands, so return failure 13481 * here if all the needed DMA resources were not allocated. 13482 */ 13483 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) && 13484 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) { 13485 scsi_destroy_pkt(pktp); 13486 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: " 13487 "No partial DMA for USCSI. exit: buf:0x%p\n", bp); 13488 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL); 13489 } 13490 13491 /* Init the cdb from the given uscsi struct */ 13492 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp, 13493 uscmd->uscsi_cdb[0], 0, 0, 0); 13494 13495 SD_FILL_SCSI1_LUN(un, pktp); 13496 13497 /* 13498 * Set up the optional USCSI flags. See the uscsi (7I) man page 13499 * for listing of the supported flags. 13500 */ 13501 13502 if (uscmd->uscsi_flags & USCSI_SILENT) { 13503 flags |= FLAG_SILENT; 13504 } 13505 13506 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) { 13507 flags |= FLAG_DIAGNOSE; 13508 } 13509 13510 if (uscmd->uscsi_flags & USCSI_ISOLATE) { 13511 flags |= FLAG_ISOLATE; 13512 } 13513 13514 if (un->un_f_is_fibre == FALSE) { 13515 if (uscmd->uscsi_flags & USCSI_RENEGOT) { 13516 flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 13517 } 13518 } 13519 13520 /* 13521 * Set the pkt flags here so we save time later. 13522 * Note: These flags are NOT in the uscsi man page!!! 13523 */ 13524 if (uscmd->uscsi_flags & USCSI_HEAD) { 13525 flags |= FLAG_HEAD; 13526 } 13527 13528 if (uscmd->uscsi_flags & USCSI_NOINTR) { 13529 flags |= FLAG_NOINTR; 13530 } 13531 13532 /* 13533 * For tagged queueing, things get a bit complicated. 13534 * Check first for head of queue and last for ordered queue. 13535 * If neither head nor order, use the default driver tag flags. 13536 */ 13537 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) { 13538 if (uscmd->uscsi_flags & USCSI_HTAG) { 13539 flags |= FLAG_HTAG; 13540 } else if (uscmd->uscsi_flags & USCSI_OTAG) { 13541 flags |= FLAG_OTAG; 13542 } else { 13543 flags |= un->un_tagflags & FLAG_TAGMASK; 13544 } 13545 } 13546 13547 if (uscmd->uscsi_flags & USCSI_NODISCON) { 13548 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON; 13549 } 13550 13551 pktp->pkt_flags = flags; 13552 13553 /* Transfer uscsi information to scsi_pkt */ 13554 (void) scsi_uscsi_pktinit(uscmd, pktp); 13555 13556 /* Copy the caller's CDB into the pkt... */ 13557 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen); 13558 13559 if (uscmd->uscsi_timeout == 0) { 13560 pktp->pkt_time = un->un_uscsi_timeout; 13561 } else { 13562 pktp->pkt_time = uscmd->uscsi_timeout; 13563 } 13564 13565 /* need it later to identify USCSI request in sdintr */ 13566 xp->xb_pkt_flags |= SD_XB_USCSICMD; 13567 13568 xp->xb_sense_resid = uscmd->uscsi_rqresid; 13569 13570 pktp->pkt_private = bp; 13571 pktp->pkt_comp = sdintr; 13572 *pktpp = pktp; 13573 13574 SD_TRACE(SD_LOG_IO_CORE, un, 13575 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp); 13576 13577 return (SD_PKT_ALLOC_SUCCESS); 13578 } 13579 13580 13581 /* 13582 * Function: sd_destroypkt_for_uscsi 13583 * 13584 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi 13585 * IOs.. Also saves relevant info into the associated uscsi_cmd 13586 * struct. 13587 * 13588 * Context: May be called under interrupt context 13589 */ 13590 13591 static void 13592 sd_destroypkt_for_uscsi(struct buf *bp) 13593 { 13594 struct uscsi_cmd *uscmd; 13595 struct sd_xbuf *xp; 13596 struct scsi_pkt *pktp; 13597 struct sd_lun *un; 13598 struct sd_uscsi_info *suip; 13599 13600 ASSERT(bp != NULL); 13601 xp = SD_GET_XBUF(bp); 13602 ASSERT(xp != NULL); 13603 un = SD_GET_UN(bp); 13604 ASSERT(un != NULL); 13605 ASSERT(!mutex_owned(SD_MUTEX(un))); 13606 pktp = SD_GET_PKTP(bp); 13607 ASSERT(pktp != NULL); 13608 13609 SD_TRACE(SD_LOG_IO_CORE, un, 13610 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp); 13611 13612 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 13613 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 13614 ASSERT(uscmd != NULL); 13615 13616 /* Save the status and the residual into the uscsi_cmd struct */ 13617 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 13618 uscmd->uscsi_resid = bp->b_resid; 13619 13620 /* Transfer scsi_pkt information to uscsi */ 13621 (void) scsi_uscsi_pktfini(pktp, uscmd); 13622 13623 /* 13624 * If enabled, copy any saved sense data into the area specified 13625 * by the uscsi command. 13626 */ 13627 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 13628 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 13629 /* 13630 * Note: uscmd->uscsi_rqbuf should always point to a buffer 13631 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd()) 13632 */ 13633 uscmd->uscsi_rqstatus = xp->xb_sense_status; 13634 uscmd->uscsi_rqresid = xp->xb_sense_resid; 13635 if (uscmd->uscsi_rqlen > SENSE_LENGTH) { 13636 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 13637 MAX_SENSE_LENGTH); 13638 } else { 13639 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, 13640 SENSE_LENGTH); 13641 } 13642 } 13643 /* 13644 * The following assignments are for SCSI FMA. 13645 */ 13646 ASSERT(xp->xb_private != NULL); 13647 suip = (struct sd_uscsi_info *)xp->xb_private; 13648 suip->ui_pkt_reason = pktp->pkt_reason; 13649 suip->ui_pkt_state = pktp->pkt_state; 13650 suip->ui_pkt_statistics = pktp->pkt_statistics; 13651 suip->ui_lba = (uint64_t)SD_GET_BLKNO(bp); 13652 13653 /* We are done with the scsi_pkt; free it now */ 13654 ASSERT(SD_GET_PKTP(bp) != NULL); 13655 scsi_destroy_pkt(SD_GET_PKTP(bp)); 13656 13657 SD_TRACE(SD_LOG_IO_CORE, un, 13658 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp); 13659 } 13660 13661 13662 /* 13663 * Function: sd_bioclone_alloc 13664 * 13665 * Description: Allocate a buf(9S) and init it as per the given buf 13666 * and the various arguments. The associated sd_xbuf 13667 * struct is (nearly) duplicated. The struct buf *bp 13668 * argument is saved in new_xp->xb_private. 13669 * 13670 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 13671 * datalen - size of data area for the shadow bp 13672 * blkno - starting LBA 13673 * func - function pointer for b_iodone in the shadow buf. (May 13674 * be NULL if none.) 13675 * 13676 * Return Code: Pointer to allocates buf(9S) struct 13677 * 13678 * Context: Can sleep. 13679 */ 13680 13681 static struct buf * 13682 sd_bioclone_alloc(struct buf *bp, size_t datalen, 13683 daddr_t blkno, int (*func)(struct buf *)) 13684 { 13685 struct sd_lun *un; 13686 struct sd_xbuf *xp; 13687 struct sd_xbuf *new_xp; 13688 struct buf *new_bp; 13689 13690 ASSERT(bp != NULL); 13691 xp = SD_GET_XBUF(bp); 13692 ASSERT(xp != NULL); 13693 un = SD_GET_UN(bp); 13694 ASSERT(un != NULL); 13695 ASSERT(!mutex_owned(SD_MUTEX(un))); 13696 13697 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func, 13698 NULL, KM_SLEEP); 13699 13700 new_bp->b_lblkno = blkno; 13701 13702 /* 13703 * Allocate an xbuf for the shadow bp and copy the contents of the 13704 * original xbuf into it. 13705 */ 13706 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 13707 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 13708 13709 /* 13710 * The given bp is automatically saved in the xb_private member 13711 * of the new xbuf. Callers are allowed to depend on this. 13712 */ 13713 new_xp->xb_private = bp; 13714 13715 new_bp->b_private = new_xp; 13716 13717 return (new_bp); 13718 } 13719 13720 /* 13721 * Function: sd_shadow_buf_alloc 13722 * 13723 * Description: Allocate a buf(9S) and init it as per the given buf 13724 * and the various arguments. The associated sd_xbuf 13725 * struct is (nearly) duplicated. The struct buf *bp 13726 * argument is saved in new_xp->xb_private. 13727 * 13728 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 13729 * datalen - size of data area for the shadow bp 13730 * bflags - B_READ or B_WRITE (pseudo flag) 13731 * blkno - starting LBA 13732 * func - function pointer for b_iodone in the shadow buf. (May 13733 * be NULL if none.) 13734 * 13735 * Return Code: Pointer to allocates buf(9S) struct 13736 * 13737 * Context: Can sleep. 13738 */ 13739 13740 static struct buf * 13741 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags, 13742 daddr_t blkno, int (*func)(struct buf *)) 13743 { 13744 struct sd_lun *un; 13745 struct sd_xbuf *xp; 13746 struct sd_xbuf *new_xp; 13747 struct buf *new_bp; 13748 13749 ASSERT(bp != NULL); 13750 xp = SD_GET_XBUF(bp); 13751 ASSERT(xp != NULL); 13752 un = SD_GET_UN(bp); 13753 ASSERT(un != NULL); 13754 ASSERT(!mutex_owned(SD_MUTEX(un))); 13755 13756 if (bp->b_flags & (B_PAGEIO | B_PHYS)) { 13757 bp_mapin(bp); 13758 } 13759 13760 bflags &= (B_READ | B_WRITE); 13761 #if defined(__i386) || defined(__amd64) 13762 new_bp = getrbuf(KM_SLEEP); 13763 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP); 13764 new_bp->b_bcount = datalen; 13765 new_bp->b_flags = bflags | 13766 (bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW)); 13767 #else 13768 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL, 13769 datalen, bflags, SLEEP_FUNC, NULL); 13770 #endif 13771 new_bp->av_forw = NULL; 13772 new_bp->av_back = NULL; 13773 new_bp->b_dev = bp->b_dev; 13774 new_bp->b_blkno = blkno; 13775 new_bp->b_iodone = func; 13776 new_bp->b_edev = bp->b_edev; 13777 new_bp->b_resid = 0; 13778 13779 /* We need to preserve the B_FAILFAST flag */ 13780 if (bp->b_flags & B_FAILFAST) { 13781 new_bp->b_flags |= B_FAILFAST; 13782 } 13783 13784 /* 13785 * Allocate an xbuf for the shadow bp and copy the contents of the 13786 * original xbuf into it. 13787 */ 13788 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 13789 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 13790 13791 /* Need later to copy data between the shadow buf & original buf! */ 13792 new_xp->xb_pkt_flags |= PKT_CONSISTENT; 13793 13794 /* 13795 * The given bp is automatically saved in the xb_private member 13796 * of the new xbuf. Callers are allowed to depend on this. 13797 */ 13798 new_xp->xb_private = bp; 13799 13800 new_bp->b_private = new_xp; 13801 13802 return (new_bp); 13803 } 13804 13805 /* 13806 * Function: sd_bioclone_free 13807 * 13808 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations 13809 * in the larger than partition operation. 13810 * 13811 * Context: May be called under interrupt context 13812 */ 13813 13814 static void 13815 sd_bioclone_free(struct buf *bp) 13816 { 13817 struct sd_xbuf *xp; 13818 13819 ASSERT(bp != NULL); 13820 xp = SD_GET_XBUF(bp); 13821 ASSERT(xp != NULL); 13822 13823 /* 13824 * Call bp_mapout() before freeing the buf, in case a lower 13825 * layer or HBA had done a bp_mapin(). we must do this here 13826 * as we are the "originator" of the shadow buf. 13827 */ 13828 bp_mapout(bp); 13829 13830 /* 13831 * Null out b_iodone before freeing the bp, to ensure that the driver 13832 * never gets confused by a stale value in this field. (Just a little 13833 * extra defensiveness here.) 13834 */ 13835 bp->b_iodone = NULL; 13836 13837 freerbuf(bp); 13838 13839 kmem_free(xp, sizeof (struct sd_xbuf)); 13840 } 13841 13842 /* 13843 * Function: sd_shadow_buf_free 13844 * 13845 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations. 13846 * 13847 * Context: May be called under interrupt context 13848 */ 13849 13850 static void 13851 sd_shadow_buf_free(struct buf *bp) 13852 { 13853 struct sd_xbuf *xp; 13854 13855 ASSERT(bp != NULL); 13856 xp = SD_GET_XBUF(bp); 13857 ASSERT(xp != NULL); 13858 13859 #if defined(__sparc) 13860 /* 13861 * Call bp_mapout() before freeing the buf, in case a lower 13862 * layer or HBA had done a bp_mapin(). we must do this here 13863 * as we are the "originator" of the shadow buf. 13864 */ 13865 bp_mapout(bp); 13866 #endif 13867 13868 /* 13869 * Null out b_iodone before freeing the bp, to ensure that the driver 13870 * never gets confused by a stale value in this field. (Just a little 13871 * extra defensiveness here.) 13872 */ 13873 bp->b_iodone = NULL; 13874 13875 #if defined(__i386) || defined(__amd64) 13876 kmem_free(bp->b_un.b_addr, bp->b_bcount); 13877 freerbuf(bp); 13878 #else 13879 scsi_free_consistent_buf(bp); 13880 #endif 13881 13882 kmem_free(xp, sizeof (struct sd_xbuf)); 13883 } 13884 13885 13886 /* 13887 * Function: sd_print_transport_rejected_message 13888 * 13889 * Description: This implements the ludicrously complex rules for printing 13890 * a "transport rejected" message. This is to address the 13891 * specific problem of having a flood of this error message 13892 * produced when a failover occurs. 13893 * 13894 * Context: Any. 13895 */ 13896 13897 static void 13898 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp, 13899 int code) 13900 { 13901 ASSERT(un != NULL); 13902 ASSERT(mutex_owned(SD_MUTEX(un))); 13903 ASSERT(xp != NULL); 13904 13905 /* 13906 * Print the "transport rejected" message under the following 13907 * conditions: 13908 * 13909 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set 13910 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR. 13911 * - If the error code IS a TRAN_FATAL_ERROR, then the message is 13912 * printed the FIRST time a TRAN_FATAL_ERROR is returned from 13913 * scsi_transport(9F) (which indicates that the target might have 13914 * gone off-line). This uses the un->un_tran_fatal_count 13915 * count, which is incremented whenever a TRAN_FATAL_ERROR is 13916 * received, and reset to zero whenver a TRAN_ACCEPT is returned 13917 * from scsi_transport(). 13918 * 13919 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of 13920 * the preceeding cases in order for the message to be printed. 13921 */ 13922 if (((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) && 13923 (SD_FM_LOG(un) == SD_FM_LOG_NSUP)) { 13924 if ((sd_level_mask & SD_LOGMASK_DIAG) || 13925 (code != TRAN_FATAL_ERROR) || 13926 (un->un_tran_fatal_count == 1)) { 13927 switch (code) { 13928 case TRAN_BADPKT: 13929 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13930 "transport rejected bad packet\n"); 13931 break; 13932 case TRAN_FATAL_ERROR: 13933 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13934 "transport rejected fatal error\n"); 13935 break; 13936 default: 13937 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13938 "transport rejected (%d)\n", code); 13939 break; 13940 } 13941 } 13942 } 13943 } 13944 13945 13946 /* 13947 * Function: sd_add_buf_to_waitq 13948 * 13949 * Description: Add the given buf(9S) struct to the wait queue for the 13950 * instance. If sorting is enabled, then the buf is added 13951 * to the queue via an elevator sort algorithm (a la 13952 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key. 13953 * If sorting is not enabled, then the buf is just added 13954 * to the end of the wait queue. 13955 * 13956 * Return Code: void 13957 * 13958 * Context: Does not sleep/block, therefore technically can be called 13959 * from any context. However if sorting is enabled then the 13960 * execution time is indeterminate, and may take long if 13961 * the wait queue grows large. 13962 */ 13963 13964 static void 13965 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp) 13966 { 13967 struct buf *ap; 13968 13969 ASSERT(bp != NULL); 13970 ASSERT(un != NULL); 13971 ASSERT(mutex_owned(SD_MUTEX(un))); 13972 13973 /* If the queue is empty, add the buf as the only entry & return. */ 13974 if (un->un_waitq_headp == NULL) { 13975 ASSERT(un->un_waitq_tailp == NULL); 13976 un->un_waitq_headp = un->un_waitq_tailp = bp; 13977 bp->av_forw = NULL; 13978 return; 13979 } 13980 13981 ASSERT(un->un_waitq_tailp != NULL); 13982 13983 /* 13984 * If sorting is disabled, just add the buf to the tail end of 13985 * the wait queue and return. 13986 */ 13987 if (un->un_f_disksort_disabled) { 13988 un->un_waitq_tailp->av_forw = bp; 13989 un->un_waitq_tailp = bp; 13990 bp->av_forw = NULL; 13991 return; 13992 } 13993 13994 /* 13995 * Sort thru the list of requests currently on the wait queue 13996 * and add the new buf request at the appropriate position. 13997 * 13998 * The un->un_waitq_headp is an activity chain pointer on which 13999 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The 14000 * first queue holds those requests which are positioned after 14001 * the current SD_GET_BLKNO() (in the first request); the second holds 14002 * requests which came in after their SD_GET_BLKNO() number was passed. 14003 * Thus we implement a one way scan, retracting after reaching 14004 * the end of the drive to the first request on the second 14005 * queue, at which time it becomes the first queue. 14006 * A one-way scan is natural because of the way UNIX read-ahead 14007 * blocks are allocated. 14008 * 14009 * If we lie after the first request, then we must locate the 14010 * second request list and add ourselves to it. 14011 */ 14012 ap = un->un_waitq_headp; 14013 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) { 14014 while (ap->av_forw != NULL) { 14015 /* 14016 * Look for an "inversion" in the (normally 14017 * ascending) block numbers. This indicates 14018 * the start of the second request list. 14019 */ 14020 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) { 14021 /* 14022 * Search the second request list for the 14023 * first request at a larger block number. 14024 * We go before that; however if there is 14025 * no such request, we go at the end. 14026 */ 14027 do { 14028 if (SD_GET_BLKNO(bp) < 14029 SD_GET_BLKNO(ap->av_forw)) { 14030 goto insert; 14031 } 14032 ap = ap->av_forw; 14033 } while (ap->av_forw != NULL); 14034 goto insert; /* after last */ 14035 } 14036 ap = ap->av_forw; 14037 } 14038 14039 /* 14040 * No inversions... we will go after the last, and 14041 * be the first request in the second request list. 14042 */ 14043 goto insert; 14044 } 14045 14046 /* 14047 * Request is at/after the current request... 14048 * sort in the first request list. 14049 */ 14050 while (ap->av_forw != NULL) { 14051 /* 14052 * We want to go after the current request (1) if 14053 * there is an inversion after it (i.e. it is the end 14054 * of the first request list), or (2) if the next 14055 * request is a larger block no. than our request. 14056 */ 14057 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) || 14058 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) { 14059 goto insert; 14060 } 14061 ap = ap->av_forw; 14062 } 14063 14064 /* 14065 * Neither a second list nor a larger request, therefore 14066 * we go at the end of the first list (which is the same 14067 * as the end of the whole schebang). 14068 */ 14069 insert: 14070 bp->av_forw = ap->av_forw; 14071 ap->av_forw = bp; 14072 14073 /* 14074 * If we inserted onto the tail end of the waitq, make sure the 14075 * tail pointer is updated. 14076 */ 14077 if (ap == un->un_waitq_tailp) { 14078 un->un_waitq_tailp = bp; 14079 } 14080 } 14081 14082 14083 /* 14084 * Function: sd_start_cmds 14085 * 14086 * Description: Remove and transport cmds from the driver queues. 14087 * 14088 * Arguments: un - pointer to the unit (soft state) struct for the target. 14089 * 14090 * immed_bp - ptr to a buf to be transported immediately. Only 14091 * the immed_bp is transported; bufs on the waitq are not 14092 * processed and the un_retry_bp is not checked. If immed_bp is 14093 * NULL, then normal queue processing is performed. 14094 * 14095 * Context: May be called from kernel thread context, interrupt context, 14096 * or runout callback context. This function may not block or 14097 * call routines that block. 14098 */ 14099 14100 static void 14101 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp) 14102 { 14103 struct sd_xbuf *xp; 14104 struct buf *bp; 14105 void (*statp)(kstat_io_t *); 14106 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14107 void (*saved_statp)(kstat_io_t *); 14108 #endif 14109 int rval; 14110 struct sd_fm_internal *sfip = NULL; 14111 14112 ASSERT(un != NULL); 14113 ASSERT(mutex_owned(SD_MUTEX(un))); 14114 ASSERT(un->un_ncmds_in_transport >= 0); 14115 ASSERT(un->un_throttle >= 0); 14116 14117 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); 14118 14119 do { 14120 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14121 saved_statp = NULL; 14122 #endif 14123 14124 /* 14125 * If we are syncing or dumping, fail the command to 14126 * avoid recursively calling back into scsi_transport(). 14127 * The dump I/O itself uses a separate code path so this 14128 * only prevents non-dump I/O from being sent while dumping. 14129 * File system sync takes place before dumping begins. 14130 * During panic, filesystem I/O is allowed provided 14131 * un_in_callback is <= 1. This is to prevent recursion 14132 * such as sd_start_cmds -> scsi_transport -> sdintr -> 14133 * sd_start_cmds and so on. See panic.c for more information 14134 * about the states the system can be in during panic. 14135 */ 14136 if ((un->un_state == SD_STATE_DUMPING) || 14137 (ddi_in_panic() && (un->un_in_callback > 1))) { 14138 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14139 "sd_start_cmds: panicking\n"); 14140 goto exit; 14141 } 14142 14143 if ((bp = immed_bp) != NULL) { 14144 /* 14145 * We have a bp that must be transported immediately. 14146 * It's OK to transport the immed_bp here without doing 14147 * the throttle limit check because the immed_bp is 14148 * always used in a retry/recovery case. This means 14149 * that we know we are not at the throttle limit by 14150 * virtue of the fact that to get here we must have 14151 * already gotten a command back via sdintr(). This also 14152 * relies on (1) the command on un_retry_bp preventing 14153 * further commands from the waitq from being issued; 14154 * and (2) the code in sd_retry_command checking the 14155 * throttle limit before issuing a delayed or immediate 14156 * retry. This holds even if the throttle limit is 14157 * currently ratcheted down from its maximum value. 14158 */ 14159 statp = kstat_runq_enter; 14160 if (bp == un->un_retry_bp) { 14161 ASSERT((un->un_retry_statp == NULL) || 14162 (un->un_retry_statp == kstat_waitq_enter) || 14163 (un->un_retry_statp == 14164 kstat_runq_back_to_waitq)); 14165 /* 14166 * If the waitq kstat was incremented when 14167 * sd_set_retry_bp() queued this bp for a retry, 14168 * then we must set up statp so that the waitq 14169 * count will get decremented correctly below. 14170 * Also we must clear un->un_retry_statp to 14171 * ensure that we do not act on a stale value 14172 * in this field. 14173 */ 14174 if ((un->un_retry_statp == kstat_waitq_enter) || 14175 (un->un_retry_statp == 14176 kstat_runq_back_to_waitq)) { 14177 statp = kstat_waitq_to_runq; 14178 } 14179 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14180 saved_statp = un->un_retry_statp; 14181 #endif 14182 un->un_retry_statp = NULL; 14183 14184 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14185 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p " 14186 "un_throttle:%d un_ncmds_in_transport:%d\n", 14187 un, un->un_retry_bp, un->un_throttle, 14188 un->un_ncmds_in_transport); 14189 } else { 14190 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: " 14191 "processing priority bp:0x%p\n", bp); 14192 } 14193 14194 } else if ((bp = un->un_waitq_headp) != NULL) { 14195 /* 14196 * A command on the waitq is ready to go, but do not 14197 * send it if: 14198 * 14199 * (1) the throttle limit has been reached, or 14200 * (2) a retry is pending, or 14201 * (3) a START_STOP_UNIT callback pending, or 14202 * (4) a callback for a SD_PATH_DIRECT_PRIORITY 14203 * command is pending. 14204 * 14205 * For all of these conditions, IO processing will 14206 * restart after the condition is cleared. 14207 */ 14208 if (un->un_ncmds_in_transport >= un->un_throttle) { 14209 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14210 "sd_start_cmds: exiting, " 14211 "throttle limit reached!\n"); 14212 goto exit; 14213 } 14214 if (un->un_retry_bp != NULL) { 14215 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14216 "sd_start_cmds: exiting, retry pending!\n"); 14217 goto exit; 14218 } 14219 if (un->un_startstop_timeid != NULL) { 14220 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14221 "sd_start_cmds: exiting, " 14222 "START_STOP pending!\n"); 14223 goto exit; 14224 } 14225 if (un->un_direct_priority_timeid != NULL) { 14226 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14227 "sd_start_cmds: exiting, " 14228 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n"); 14229 goto exit; 14230 } 14231 14232 /* Dequeue the command */ 14233 un->un_waitq_headp = bp->av_forw; 14234 if (un->un_waitq_headp == NULL) { 14235 un->un_waitq_tailp = NULL; 14236 } 14237 bp->av_forw = NULL; 14238 statp = kstat_waitq_to_runq; 14239 SD_TRACE(SD_LOG_IO_CORE, un, 14240 "sd_start_cmds: processing waitq bp:0x%p\n", bp); 14241 14242 } else { 14243 /* No work to do so bail out now */ 14244 SD_TRACE(SD_LOG_IO_CORE, un, 14245 "sd_start_cmds: no more work, exiting!\n"); 14246 goto exit; 14247 } 14248 14249 /* 14250 * Reset the state to normal. This is the mechanism by which 14251 * the state transitions from either SD_STATE_RWAIT or 14252 * SD_STATE_OFFLINE to SD_STATE_NORMAL. 14253 * If state is SD_STATE_PM_CHANGING then this command is 14254 * part of the device power control and the state must 14255 * not be put back to normal. Doing so would would 14256 * allow new commands to proceed when they shouldn't, 14257 * the device may be going off. 14258 */ 14259 if ((un->un_state != SD_STATE_SUSPENDED) && 14260 (un->un_state != SD_STATE_PM_CHANGING)) { 14261 New_state(un, SD_STATE_NORMAL); 14262 } 14263 14264 xp = SD_GET_XBUF(bp); 14265 ASSERT(xp != NULL); 14266 14267 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14268 /* 14269 * Allocate the scsi_pkt if we need one, or attach DMA 14270 * resources if we have a scsi_pkt that needs them. The 14271 * latter should only occur for commands that are being 14272 * retried. 14273 */ 14274 if ((xp->xb_pktp == NULL) || 14275 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { 14276 #else 14277 if (xp->xb_pktp == NULL) { 14278 #endif 14279 /* 14280 * There is no scsi_pkt allocated for this buf. Call 14281 * the initpkt function to allocate & init one. 14282 * 14283 * The scsi_init_pkt runout callback functionality is 14284 * implemented as follows: 14285 * 14286 * 1) The initpkt function always calls 14287 * scsi_init_pkt(9F) with sdrunout specified as the 14288 * callback routine. 14289 * 2) A successful packet allocation is initialized and 14290 * the I/O is transported. 14291 * 3) The I/O associated with an allocation resource 14292 * failure is left on its queue to be retried via 14293 * runout or the next I/O. 14294 * 4) The I/O associated with a DMA error is removed 14295 * from the queue and failed with EIO. Processing of 14296 * the transport queues is also halted to be 14297 * restarted via runout or the next I/O. 14298 * 5) The I/O associated with a CDB size or packet 14299 * size error is removed from the queue and failed 14300 * with EIO. Processing of the transport queues is 14301 * continued. 14302 * 14303 * Note: there is no interface for canceling a runout 14304 * callback. To prevent the driver from detaching or 14305 * suspending while a runout is pending the driver 14306 * state is set to SD_STATE_RWAIT 14307 * 14308 * Note: using the scsi_init_pkt callback facility can 14309 * result in an I/O request persisting at the head of 14310 * the list which cannot be satisfied even after 14311 * multiple retries. In the future the driver may 14312 * implement some kind of maximum runout count before 14313 * failing an I/O. 14314 * 14315 * Note: the use of funcp below may seem superfluous, 14316 * but it helps warlock figure out the correct 14317 * initpkt function calls (see [s]sd.wlcmd). 14318 */ 14319 struct scsi_pkt *pktp; 14320 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp); 14321 14322 ASSERT(bp != un->un_rqs_bp); 14323 14324 funcp = sd_initpkt_map[xp->xb_chain_iostart]; 14325 switch ((*funcp)(bp, &pktp)) { 14326 case SD_PKT_ALLOC_SUCCESS: 14327 xp->xb_pktp = pktp; 14328 SD_TRACE(SD_LOG_IO_CORE, un, 14329 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n", 14330 pktp); 14331 goto got_pkt; 14332 14333 case SD_PKT_ALLOC_FAILURE: 14334 /* 14335 * Temporary (hopefully) resource depletion. 14336 * Since retries and RQS commands always have a 14337 * scsi_pkt allocated, these cases should never 14338 * get here. So the only cases this needs to 14339 * handle is a bp from the waitq (which we put 14340 * back onto the waitq for sdrunout), or a bp 14341 * sent as an immed_bp (which we just fail). 14342 */ 14343 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14344 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n"); 14345 14346 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14347 14348 if (bp == immed_bp) { 14349 /* 14350 * If SD_XB_DMA_FREED is clear, then 14351 * this is a failure to allocate a 14352 * scsi_pkt, and we must fail the 14353 * command. 14354 */ 14355 if ((xp->xb_pkt_flags & 14356 SD_XB_DMA_FREED) == 0) { 14357 break; 14358 } 14359 14360 /* 14361 * If this immediate command is NOT our 14362 * un_retry_bp, then we must fail it. 14363 */ 14364 if (bp != un->un_retry_bp) { 14365 break; 14366 } 14367 14368 /* 14369 * We get here if this cmd is our 14370 * un_retry_bp that was DMAFREED, but 14371 * scsi_init_pkt() failed to reallocate 14372 * DMA resources when we attempted to 14373 * retry it. This can happen when an 14374 * mpxio failover is in progress, but 14375 * we don't want to just fail the 14376 * command in this case. 14377 * 14378 * Use timeout(9F) to restart it after 14379 * a 100ms delay. We don't want to 14380 * let sdrunout() restart it, because 14381 * sdrunout() is just supposed to start 14382 * commands that are sitting on the 14383 * wait queue. The un_retry_bp stays 14384 * set until the command completes, but 14385 * sdrunout can be called many times 14386 * before that happens. Since sdrunout 14387 * cannot tell if the un_retry_bp is 14388 * already in the transport, it could 14389 * end up calling scsi_transport() for 14390 * the un_retry_bp multiple times. 14391 * 14392 * Also: don't schedule the callback 14393 * if some other callback is already 14394 * pending. 14395 */ 14396 if (un->un_retry_statp == NULL) { 14397 /* 14398 * restore the kstat pointer to 14399 * keep kstat counts coherent 14400 * when we do retry the command. 14401 */ 14402 un->un_retry_statp = 14403 saved_statp; 14404 } 14405 14406 if ((un->un_startstop_timeid == NULL) && 14407 (un->un_retry_timeid == NULL) && 14408 (un->un_direct_priority_timeid == 14409 NULL)) { 14410 14411 un->un_retry_timeid = 14412 timeout( 14413 sd_start_retry_command, 14414 un, SD_RESTART_TIMEOUT); 14415 } 14416 goto exit; 14417 } 14418 14419 #else 14420 if (bp == immed_bp) { 14421 break; /* Just fail the command */ 14422 } 14423 #endif 14424 14425 /* Add the buf back to the head of the waitq */ 14426 bp->av_forw = un->un_waitq_headp; 14427 un->un_waitq_headp = bp; 14428 if (un->un_waitq_tailp == NULL) { 14429 un->un_waitq_tailp = bp; 14430 } 14431 goto exit; 14432 14433 case SD_PKT_ALLOC_FAILURE_NO_DMA: 14434 /* 14435 * HBA DMA resource failure. Fail the command 14436 * and continue processing of the queues. 14437 */ 14438 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14439 "sd_start_cmds: " 14440 "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); 14441 break; 14442 14443 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: 14444 /* 14445 * Note:x86: Partial DMA mapping not supported 14446 * for USCSI commands, and all the needed DMA 14447 * resources were not allocated. 14448 */ 14449 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14450 "sd_start_cmds: " 14451 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); 14452 break; 14453 14454 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: 14455 /* 14456 * Note:x86: Request cannot fit into CDB based 14457 * on lba and len. 14458 */ 14459 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14460 "sd_start_cmds: " 14461 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); 14462 break; 14463 14464 default: 14465 /* Should NEVER get here! */ 14466 panic("scsi_initpkt error"); 14467 /*NOTREACHED*/ 14468 } 14469 14470 /* 14471 * Fatal error in allocating a scsi_pkt for this buf. 14472 * Update kstats & return the buf with an error code. 14473 * We must use sd_return_failed_command_no_restart() to 14474 * avoid a recursive call back into sd_start_cmds(). 14475 * However this also means that we must keep processing 14476 * the waitq here in order to avoid stalling. 14477 */ 14478 if (statp == kstat_waitq_to_runq) { 14479 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 14480 } 14481 sd_return_failed_command_no_restart(un, bp, EIO); 14482 if (bp == immed_bp) { 14483 /* immed_bp is gone by now, so clear this */ 14484 immed_bp = NULL; 14485 } 14486 continue; 14487 } 14488 got_pkt: 14489 if (bp == immed_bp) { 14490 /* goto the head of the class.... */ 14491 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 14492 } 14493 14494 un->un_ncmds_in_transport++; 14495 SD_UPDATE_KSTATS(un, statp, bp); 14496 14497 /* 14498 * Call scsi_transport() to send the command to the target. 14499 * According to SCSA architecture, we must drop the mutex here 14500 * before calling scsi_transport() in order to avoid deadlock. 14501 * Note that the scsi_pkt's completion routine can be executed 14502 * (from interrupt context) even before the call to 14503 * scsi_transport() returns. 14504 */ 14505 SD_TRACE(SD_LOG_IO_CORE, un, 14506 "sd_start_cmds: calling scsi_transport()\n"); 14507 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp); 14508 14509 mutex_exit(SD_MUTEX(un)); 14510 rval = scsi_transport(xp->xb_pktp); 14511 mutex_enter(SD_MUTEX(un)); 14512 14513 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14514 "sd_start_cmds: scsi_transport() returned %d\n", rval); 14515 14516 switch (rval) { 14517 case TRAN_ACCEPT: 14518 /* Clear this with every pkt accepted by the HBA */ 14519 un->un_tran_fatal_count = 0; 14520 break; /* Success; try the next cmd (if any) */ 14521 14522 case TRAN_BUSY: 14523 un->un_ncmds_in_transport--; 14524 ASSERT(un->un_ncmds_in_transport >= 0); 14525 14526 /* 14527 * Don't retry request sense, the sense data 14528 * is lost when another request is sent. 14529 * Free up the rqs buf and retry 14530 * the original failed cmd. Update kstat. 14531 */ 14532 if (bp == un->un_rqs_bp) { 14533 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14534 bp = sd_mark_rqs_idle(un, xp); 14535 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 14536 NULL, NULL, EIO, un->un_busy_timeout / 500, 14537 kstat_waitq_enter); 14538 goto exit; 14539 } 14540 14541 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14542 /* 14543 * Free the DMA resources for the scsi_pkt. This will 14544 * allow mpxio to select another path the next time 14545 * we call scsi_transport() with this scsi_pkt. 14546 * See sdintr() for the rationalization behind this. 14547 */ 14548 if ((un->un_f_is_fibre == TRUE) && 14549 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 14550 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) { 14551 scsi_dmafree(xp->xb_pktp); 14552 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 14553 } 14554 #endif 14555 14556 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) { 14557 /* 14558 * Commands that are SD_PATH_DIRECT_PRIORITY 14559 * are for error recovery situations. These do 14560 * not use the normal command waitq, so if they 14561 * get a TRAN_BUSY we cannot put them back onto 14562 * the waitq for later retry. One possible 14563 * problem is that there could already be some 14564 * other command on un_retry_bp that is waiting 14565 * for this one to complete, so we would be 14566 * deadlocked if we put this command back onto 14567 * the waitq for later retry (since un_retry_bp 14568 * must complete before the driver gets back to 14569 * commands on the waitq). 14570 * 14571 * To avoid deadlock we must schedule a callback 14572 * that will restart this command after a set 14573 * interval. This should keep retrying for as 14574 * long as the underlying transport keeps 14575 * returning TRAN_BUSY (just like for other 14576 * commands). Use the same timeout interval as 14577 * for the ordinary TRAN_BUSY retry. 14578 */ 14579 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14580 "sd_start_cmds: scsi_transport() returned " 14581 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n"); 14582 14583 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14584 un->un_direct_priority_timeid = 14585 timeout(sd_start_direct_priority_command, 14586 bp, un->un_busy_timeout / 500); 14587 14588 goto exit; 14589 } 14590 14591 /* 14592 * For TRAN_BUSY, we want to reduce the throttle value, 14593 * unless we are retrying a command. 14594 */ 14595 if (bp != un->un_retry_bp) { 14596 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); 14597 } 14598 14599 /* 14600 * Set up the bp to be tried again 10 ms later. 14601 * Note:x86: Is there a timeout value in the sd_lun 14602 * for this condition? 14603 */ 14604 sd_set_retry_bp(un, bp, un->un_busy_timeout / 500, 14605 kstat_runq_back_to_waitq); 14606 goto exit; 14607 14608 case TRAN_FATAL_ERROR: 14609 un->un_tran_fatal_count++; 14610 /* FALLTHRU */ 14611 14612 case TRAN_BADPKT: 14613 default: 14614 un->un_ncmds_in_transport--; 14615 ASSERT(un->un_ncmds_in_transport >= 0); 14616 14617 /* 14618 * If this is our REQUEST SENSE command with a 14619 * transport error, we must get back the pointers 14620 * to the original buf, and mark the REQUEST 14621 * SENSE command as "available". 14622 */ 14623 if (bp == un->un_rqs_bp) { 14624 bp = sd_mark_rqs_idle(un, xp); 14625 xp = SD_GET_XBUF(bp); 14626 } else { 14627 /* 14628 * Legacy behavior: do not update transport 14629 * error count for request sense commands. 14630 */ 14631 SD_UPDATE_ERRSTATS(un, sd_transerrs); 14632 } 14633 14634 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14635 sd_print_transport_rejected_message(un, xp, rval); 14636 14637 /* 14638 * This command will be terminated by SD driver due 14639 * to a fatal transport error. We should post 14640 * ereport.io.scsi.cmd.disk.tran with driver-assessment 14641 * of "fail" for any command to indicate this 14642 * situation. 14643 */ 14644 if (xp->xb_ena > 0) { 14645 ASSERT(un->un_fm_private != NULL); 14646 sfip = un->un_fm_private; 14647 sfip->fm_ssc.ssc_flags |= SSC_FLAGS_TRAN_ABORT; 14648 sd_ssc_extract_info(&sfip->fm_ssc, un, 14649 xp->xb_pktp, bp, xp); 14650 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL); 14651 } 14652 14653 /* 14654 * We must use sd_return_failed_command_no_restart() to 14655 * avoid a recursive call back into sd_start_cmds(). 14656 * However this also means that we must keep processing 14657 * the waitq here in order to avoid stalling. 14658 */ 14659 sd_return_failed_command_no_restart(un, bp, EIO); 14660 14661 /* 14662 * Notify any threads waiting in sd_ddi_suspend() that 14663 * a command completion has occurred. 14664 */ 14665 if (un->un_state == SD_STATE_SUSPENDED) { 14666 cv_broadcast(&un->un_disk_busy_cv); 14667 } 14668 14669 if (bp == immed_bp) { 14670 /* immed_bp is gone by now, so clear this */ 14671 immed_bp = NULL; 14672 } 14673 break; 14674 } 14675 14676 } while (immed_bp == NULL); 14677 14678 exit: 14679 ASSERT(mutex_owned(SD_MUTEX(un))); 14680 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n"); 14681 } 14682 14683 14684 /* 14685 * Function: sd_return_command 14686 * 14687 * Description: Returns a command to its originator (with or without an 14688 * error). Also starts commands waiting to be transported 14689 * to the target. 14690 * 14691 * Context: May be called from interrupt, kernel, or timeout context 14692 */ 14693 14694 static void 14695 sd_return_command(struct sd_lun *un, struct buf *bp) 14696 { 14697 struct sd_xbuf *xp; 14698 struct scsi_pkt *pktp; 14699 struct sd_fm_internal *sfip; 14700 14701 ASSERT(bp != NULL); 14702 ASSERT(un != NULL); 14703 ASSERT(mutex_owned(SD_MUTEX(un))); 14704 ASSERT(bp != un->un_rqs_bp); 14705 xp = SD_GET_XBUF(bp); 14706 ASSERT(xp != NULL); 14707 14708 pktp = SD_GET_PKTP(bp); 14709 sfip = (struct sd_fm_internal *)un->un_fm_private; 14710 ASSERT(sfip != NULL); 14711 14712 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n"); 14713 14714 /* 14715 * Note: check for the "sdrestart failed" case. 14716 */ 14717 if ((un->un_partial_dma_supported == 1) && 14718 ((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) && 14719 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) && 14720 (xp->xb_pktp->pkt_resid == 0)) { 14721 14722 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) { 14723 /* 14724 * Successfully set up next portion of cmd 14725 * transfer, try sending it 14726 */ 14727 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 14728 NULL, NULL, 0, (clock_t)0, NULL); 14729 sd_start_cmds(un, NULL); 14730 return; /* Note:x86: need a return here? */ 14731 } 14732 } 14733 14734 /* 14735 * If this is the failfast bp, clear it from un_failfast_bp. This 14736 * can happen if upon being re-tried the failfast bp either 14737 * succeeded or encountered another error (possibly even a different 14738 * error than the one that precipitated the failfast state, but in 14739 * that case it would have had to exhaust retries as well). Regardless, 14740 * this should not occur whenever the instance is in the active 14741 * failfast state. 14742 */ 14743 if (bp == un->un_failfast_bp) { 14744 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 14745 un->un_failfast_bp = NULL; 14746 } 14747 14748 /* 14749 * Clear the failfast state upon successful completion of ANY cmd. 14750 */ 14751 if (bp->b_error == 0) { 14752 un->un_failfast_state = SD_FAILFAST_INACTIVE; 14753 /* 14754 * If this is a successful command, but used to be retried, 14755 * we will take it as a recovered command and post an 14756 * ereport with driver-assessment of "recovered". 14757 */ 14758 if (xp->xb_ena > 0) { 14759 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 14760 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RECOVERY); 14761 } 14762 } else { 14763 /* 14764 * If this is a failed non-USCSI command we will post an 14765 * ereport with driver-assessment set accordingly("fail" or 14766 * "fatal"). 14767 */ 14768 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 14769 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 14770 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL); 14771 } 14772 } 14773 14774 /* 14775 * This is used if the command was retried one or more times. Show that 14776 * we are done with it, and allow processing of the waitq to resume. 14777 */ 14778 if (bp == un->un_retry_bp) { 14779 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14780 "sd_return_command: un:0x%p: " 14781 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 14782 un->un_retry_bp = NULL; 14783 un->un_retry_statp = NULL; 14784 } 14785 14786 SD_UPDATE_RDWR_STATS(un, bp); 14787 SD_UPDATE_PARTITION_STATS(un, bp); 14788 14789 switch (un->un_state) { 14790 case SD_STATE_SUSPENDED: 14791 /* 14792 * Notify any threads waiting in sd_ddi_suspend() that 14793 * a command completion has occurred. 14794 */ 14795 cv_broadcast(&un->un_disk_busy_cv); 14796 break; 14797 default: 14798 sd_start_cmds(un, NULL); 14799 break; 14800 } 14801 14802 /* Return this command up the iodone chain to its originator. */ 14803 mutex_exit(SD_MUTEX(un)); 14804 14805 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 14806 xp->xb_pktp = NULL; 14807 14808 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 14809 14810 ASSERT(!mutex_owned(SD_MUTEX(un))); 14811 mutex_enter(SD_MUTEX(un)); 14812 14813 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n"); 14814 } 14815 14816 14817 /* 14818 * Function: sd_return_failed_command 14819 * 14820 * Description: Command completion when an error occurred. 14821 * 14822 * Context: May be called from interrupt context 14823 */ 14824 14825 static void 14826 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode) 14827 { 14828 ASSERT(bp != NULL); 14829 ASSERT(un != NULL); 14830 ASSERT(mutex_owned(SD_MUTEX(un))); 14831 14832 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14833 "sd_return_failed_command: entry\n"); 14834 14835 /* 14836 * b_resid could already be nonzero due to a partial data 14837 * transfer, so do not change it here. 14838 */ 14839 SD_BIOERROR(bp, errcode); 14840 14841 sd_return_command(un, bp); 14842 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14843 "sd_return_failed_command: exit\n"); 14844 } 14845 14846 14847 /* 14848 * Function: sd_return_failed_command_no_restart 14849 * 14850 * Description: Same as sd_return_failed_command, but ensures that no 14851 * call back into sd_start_cmds will be issued. 14852 * 14853 * Context: May be called from interrupt context 14854 */ 14855 14856 static void 14857 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp, 14858 int errcode) 14859 { 14860 struct sd_xbuf *xp; 14861 14862 ASSERT(bp != NULL); 14863 ASSERT(un != NULL); 14864 ASSERT(mutex_owned(SD_MUTEX(un))); 14865 xp = SD_GET_XBUF(bp); 14866 ASSERT(xp != NULL); 14867 ASSERT(errcode != 0); 14868 14869 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14870 "sd_return_failed_command_no_restart: entry\n"); 14871 14872 /* 14873 * b_resid could already be nonzero due to a partial data 14874 * transfer, so do not change it here. 14875 */ 14876 SD_BIOERROR(bp, errcode); 14877 14878 /* 14879 * If this is the failfast bp, clear it. This can happen if the 14880 * failfast bp encounterd a fatal error when we attempted to 14881 * re-try it (such as a scsi_transport(9F) failure). However 14882 * we should NOT be in an active failfast state if the failfast 14883 * bp is not NULL. 14884 */ 14885 if (bp == un->un_failfast_bp) { 14886 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 14887 un->un_failfast_bp = NULL; 14888 } 14889 14890 if (bp == un->un_retry_bp) { 14891 /* 14892 * This command was retried one or more times. Show that we are 14893 * done with it, and allow processing of the waitq to resume. 14894 */ 14895 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14896 "sd_return_failed_command_no_restart: " 14897 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 14898 un->un_retry_bp = NULL; 14899 un->un_retry_statp = NULL; 14900 } 14901 14902 SD_UPDATE_RDWR_STATS(un, bp); 14903 SD_UPDATE_PARTITION_STATS(un, bp); 14904 14905 mutex_exit(SD_MUTEX(un)); 14906 14907 if (xp->xb_pktp != NULL) { 14908 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 14909 xp->xb_pktp = NULL; 14910 } 14911 14912 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 14913 14914 mutex_enter(SD_MUTEX(un)); 14915 14916 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14917 "sd_return_failed_command_no_restart: exit\n"); 14918 } 14919 14920 14921 /* 14922 * Function: sd_retry_command 14923 * 14924 * Description: queue up a command for retry, or (optionally) fail it 14925 * if retry counts are exhausted. 14926 * 14927 * Arguments: un - Pointer to the sd_lun struct for the target. 14928 * 14929 * bp - Pointer to the buf for the command to be retried. 14930 * 14931 * retry_check_flag - Flag to see which (if any) of the retry 14932 * counts should be decremented/checked. If the indicated 14933 * retry count is exhausted, then the command will not be 14934 * retried; it will be failed instead. This should use a 14935 * value equal to one of the following: 14936 * 14937 * SD_RETRIES_NOCHECK 14938 * SD_RESD_RETRIES_STANDARD 14939 * SD_RETRIES_VICTIM 14940 * 14941 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE 14942 * if the check should be made to see of FLAG_ISOLATE is set 14943 * in the pkt. If FLAG_ISOLATE is set, then the command is 14944 * not retried, it is simply failed. 14945 * 14946 * user_funcp - Ptr to function to call before dispatching the 14947 * command. May be NULL if no action needs to be performed. 14948 * (Primarily intended for printing messages.) 14949 * 14950 * user_arg - Optional argument to be passed along to 14951 * the user_funcp call. 14952 * 14953 * failure_code - errno return code to set in the bp if the 14954 * command is going to be failed. 14955 * 14956 * retry_delay - Retry delay interval in (clock_t) units. May 14957 * be zero which indicates that the retry should be retried 14958 * immediately (ie, without an intervening delay). 14959 * 14960 * statp - Ptr to kstat function to be updated if the command 14961 * is queued for a delayed retry. May be NULL if no kstat 14962 * update is desired. 14963 * 14964 * Context: May be called from interrupt context. 14965 */ 14966 14967 static void 14968 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag, 14969 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int 14970 code), void *user_arg, int failure_code, clock_t retry_delay, 14971 void (*statp)(kstat_io_t *)) 14972 { 14973 struct sd_xbuf *xp; 14974 struct scsi_pkt *pktp; 14975 struct sd_fm_internal *sfip; 14976 14977 ASSERT(un != NULL); 14978 ASSERT(mutex_owned(SD_MUTEX(un))); 14979 ASSERT(bp != NULL); 14980 xp = SD_GET_XBUF(bp); 14981 ASSERT(xp != NULL); 14982 pktp = SD_GET_PKTP(bp); 14983 ASSERT(pktp != NULL); 14984 14985 sfip = (struct sd_fm_internal *)un->un_fm_private; 14986 ASSERT(sfip != NULL); 14987 14988 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14989 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp); 14990 14991 /* 14992 * If we are syncing or dumping, fail the command to avoid 14993 * recursively calling back into scsi_transport(). 14994 */ 14995 if (ddi_in_panic()) { 14996 goto fail_command_no_log; 14997 } 14998 14999 /* 15000 * We should never be be retrying a command with FLAG_DIAGNOSE set, so 15001 * log an error and fail the command. 15002 */ 15003 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 15004 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 15005 "ERROR, retrying FLAG_DIAGNOSE command.\n"); 15006 sd_dump_memory(un, SD_LOG_IO, "CDB", 15007 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 15008 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 15009 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 15010 goto fail_command; 15011 } 15012 15013 /* 15014 * If we are suspended, then put the command onto head of the 15015 * wait queue since we don't want to start more commands, and 15016 * clear the un_retry_bp. Next time when we are resumed, will 15017 * handle the command in the wait queue. 15018 */ 15019 switch (un->un_state) { 15020 case SD_STATE_SUSPENDED: 15021 case SD_STATE_DUMPING: 15022 bp->av_forw = un->un_waitq_headp; 15023 un->un_waitq_headp = bp; 15024 if (un->un_waitq_tailp == NULL) { 15025 un->un_waitq_tailp = bp; 15026 } 15027 if (bp == un->un_retry_bp) { 15028 un->un_retry_bp = NULL; 15029 un->un_retry_statp = NULL; 15030 } 15031 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 15032 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: " 15033 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp); 15034 return; 15035 default: 15036 break; 15037 } 15038 15039 /* 15040 * If the caller wants us to check FLAG_ISOLATE, then see if that 15041 * is set; if it is then we do not want to retry the command. 15042 * Normally, FLAG_ISOLATE is only used with USCSI cmds. 15043 */ 15044 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) { 15045 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) { 15046 goto fail_command; 15047 } 15048 } 15049 15050 15051 /* 15052 * If SD_RETRIES_FAILFAST is set, it indicates that either a 15053 * command timeout or a selection timeout has occurred. This means 15054 * that we were unable to establish an kind of communication with 15055 * the target, and subsequent retries and/or commands are likely 15056 * to encounter similar results and take a long time to complete. 15057 * 15058 * If this is a failfast error condition, we need to update the 15059 * failfast state, even if this bp does not have B_FAILFAST set. 15060 */ 15061 if (retry_check_flag & SD_RETRIES_FAILFAST) { 15062 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) { 15063 ASSERT(un->un_failfast_bp == NULL); 15064 /* 15065 * If we are already in the active failfast state, and 15066 * another failfast error condition has been detected, 15067 * then fail this command if it has B_FAILFAST set. 15068 * If B_FAILFAST is clear, then maintain the legacy 15069 * behavior of retrying heroically, even tho this will 15070 * take a lot more time to fail the command. 15071 */ 15072 if (bp->b_flags & B_FAILFAST) { 15073 goto fail_command; 15074 } 15075 } else { 15076 /* 15077 * We're not in the active failfast state, but we 15078 * have a failfast error condition, so we must begin 15079 * transition to the next state. We do this regardless 15080 * of whether or not this bp has B_FAILFAST set. 15081 */ 15082 if (un->un_failfast_bp == NULL) { 15083 /* 15084 * This is the first bp to meet a failfast 15085 * condition so save it on un_failfast_bp & 15086 * do normal retry processing. Do not enter 15087 * active failfast state yet. This marks 15088 * entry into the "failfast pending" state. 15089 */ 15090 un->un_failfast_bp = bp; 15091 15092 } else if (un->un_failfast_bp == bp) { 15093 /* 15094 * This is the second time *this* bp has 15095 * encountered a failfast error condition, 15096 * so enter active failfast state & flush 15097 * queues as appropriate. 15098 */ 15099 un->un_failfast_state = SD_FAILFAST_ACTIVE; 15100 un->un_failfast_bp = NULL; 15101 sd_failfast_flushq(un); 15102 15103 /* 15104 * Fail this bp now if B_FAILFAST set; 15105 * otherwise continue with retries. (It would 15106 * be pretty ironic if this bp succeeded on a 15107 * subsequent retry after we just flushed all 15108 * the queues). 15109 */ 15110 if (bp->b_flags & B_FAILFAST) { 15111 goto fail_command; 15112 } 15113 15114 #if !defined(lint) && !defined(__lint) 15115 } else { 15116 /* 15117 * If neither of the preceeding conditionals 15118 * was true, it means that there is some 15119 * *other* bp that has met an inital failfast 15120 * condition and is currently either being 15121 * retried or is waiting to be retried. In 15122 * that case we should perform normal retry 15123 * processing on *this* bp, since there is a 15124 * chance that the current failfast condition 15125 * is transient and recoverable. If that does 15126 * not turn out to be the case, then retries 15127 * will be cleared when the wait queue is 15128 * flushed anyway. 15129 */ 15130 #endif 15131 } 15132 } 15133 } else { 15134 /* 15135 * SD_RETRIES_FAILFAST is clear, which indicates that we 15136 * likely were able to at least establish some level of 15137 * communication with the target and subsequent commands 15138 * and/or retries are likely to get through to the target, 15139 * In this case we want to be aggressive about clearing 15140 * the failfast state. Note that this does not affect 15141 * the "failfast pending" condition. 15142 */ 15143 un->un_failfast_state = SD_FAILFAST_INACTIVE; 15144 } 15145 15146 15147 /* 15148 * Check the specified retry count to see if we can still do 15149 * any retries with this pkt before we should fail it. 15150 */ 15151 switch (retry_check_flag & SD_RETRIES_MASK) { 15152 case SD_RETRIES_VICTIM: 15153 /* 15154 * Check the victim retry count. If exhausted, then fall 15155 * thru & check against the standard retry count. 15156 */ 15157 if (xp->xb_victim_retry_count < un->un_victim_retry_count) { 15158 /* Increment count & proceed with the retry */ 15159 xp->xb_victim_retry_count++; 15160 break; 15161 } 15162 /* Victim retries exhausted, fall back to std. retries... */ 15163 /* FALLTHRU */ 15164 15165 case SD_RETRIES_STANDARD: 15166 if (xp->xb_retry_count >= un->un_retry_count) { 15167 /* Retries exhausted, fail the command */ 15168 SD_TRACE(SD_LOG_IO_CORE, un, 15169 "sd_retry_command: retries exhausted!\n"); 15170 /* 15171 * update b_resid for failed SCMD_READ & SCMD_WRITE 15172 * commands with nonzero pkt_resid. 15173 */ 15174 if ((pktp->pkt_reason == CMD_CMPLT) && 15175 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) && 15176 (pktp->pkt_resid != 0)) { 15177 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F; 15178 if ((op == SCMD_READ) || (op == SCMD_WRITE)) { 15179 SD_UPDATE_B_RESID(bp, pktp); 15180 } 15181 } 15182 goto fail_command; 15183 } 15184 xp->xb_retry_count++; 15185 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15186 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 15187 break; 15188 15189 case SD_RETRIES_UA: 15190 if (xp->xb_ua_retry_count >= sd_ua_retry_count) { 15191 /* Retries exhausted, fail the command */ 15192 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15193 "Unit Attention retries exhausted. " 15194 "Check the target.\n"); 15195 goto fail_command; 15196 } 15197 xp->xb_ua_retry_count++; 15198 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15199 "sd_retry_command: retry count:%d\n", 15200 xp->xb_ua_retry_count); 15201 break; 15202 15203 case SD_RETRIES_BUSY: 15204 if (xp->xb_retry_count >= un->un_busy_retry_count) { 15205 /* Retries exhausted, fail the command */ 15206 SD_TRACE(SD_LOG_IO_CORE, un, 15207 "sd_retry_command: retries exhausted!\n"); 15208 goto fail_command; 15209 } 15210 xp->xb_retry_count++; 15211 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15212 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 15213 break; 15214 15215 case SD_RETRIES_NOCHECK: 15216 default: 15217 /* No retry count to check. Just proceed with the retry */ 15218 break; 15219 } 15220 15221 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 15222 15223 /* 15224 * If this is a non-USCSI command being retried 15225 * during execution last time, we should post an ereport with 15226 * driver-assessment of the value "retry". 15227 * For partial DMA, request sense and STATUS_QFULL, there are no 15228 * hardware errors, we bypass ereport posting. 15229 */ 15230 if (failure_code != 0) { 15231 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 15232 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp); 15233 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RETRY); 15234 } 15235 } 15236 15237 /* 15238 * If we were given a zero timeout, we must attempt to retry the 15239 * command immediately (ie, without a delay). 15240 */ 15241 if (retry_delay == 0) { 15242 /* 15243 * Check some limiting conditions to see if we can actually 15244 * do the immediate retry. If we cannot, then we must 15245 * fall back to queueing up a delayed retry. 15246 */ 15247 if (un->un_ncmds_in_transport >= un->un_throttle) { 15248 /* 15249 * We are at the throttle limit for the target, 15250 * fall back to delayed retry. 15251 */ 15252 retry_delay = un->un_busy_timeout; 15253 statp = kstat_waitq_enter; 15254 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15255 "sd_retry_command: immed. retry hit " 15256 "throttle!\n"); 15257 } else { 15258 /* 15259 * We're clear to proceed with the immediate retry. 15260 * First call the user-provided function (if any) 15261 */ 15262 if (user_funcp != NULL) { 15263 (*user_funcp)(un, bp, user_arg, 15264 SD_IMMEDIATE_RETRY_ISSUED); 15265 #ifdef __lock_lint 15266 sd_print_incomplete_msg(un, bp, user_arg, 15267 SD_IMMEDIATE_RETRY_ISSUED); 15268 sd_print_cmd_incomplete_msg(un, bp, user_arg, 15269 SD_IMMEDIATE_RETRY_ISSUED); 15270 sd_print_sense_failed_msg(un, bp, user_arg, 15271 SD_IMMEDIATE_RETRY_ISSUED); 15272 #endif 15273 } 15274 15275 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15276 "sd_retry_command: issuing immediate retry\n"); 15277 15278 /* 15279 * Call sd_start_cmds() to transport the command to 15280 * the target. 15281 */ 15282 sd_start_cmds(un, bp); 15283 15284 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15285 "sd_retry_command exit\n"); 15286 return; 15287 } 15288 } 15289 15290 /* 15291 * Set up to retry the command after a delay. 15292 * First call the user-provided function (if any) 15293 */ 15294 if (user_funcp != NULL) { 15295 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED); 15296 } 15297 15298 sd_set_retry_bp(un, bp, retry_delay, statp); 15299 15300 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15301 return; 15302 15303 fail_command: 15304 15305 if (user_funcp != NULL) { 15306 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED); 15307 } 15308 15309 fail_command_no_log: 15310 15311 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15312 "sd_retry_command: returning failed command\n"); 15313 15314 sd_return_failed_command(un, bp, failure_code); 15315 15316 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15317 } 15318 15319 15320 /* 15321 * Function: sd_set_retry_bp 15322 * 15323 * Description: Set up the given bp for retry. 15324 * 15325 * Arguments: un - ptr to associated softstate 15326 * bp - ptr to buf(9S) for the command 15327 * retry_delay - time interval before issuing retry (may be 0) 15328 * statp - optional pointer to kstat function 15329 * 15330 * Context: May be called under interrupt context 15331 */ 15332 15333 static void 15334 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, 15335 void (*statp)(kstat_io_t *)) 15336 { 15337 ASSERT(un != NULL); 15338 ASSERT(mutex_owned(SD_MUTEX(un))); 15339 ASSERT(bp != NULL); 15340 15341 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15342 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp); 15343 15344 /* 15345 * Indicate that the command is being retried. This will not allow any 15346 * other commands on the wait queue to be transported to the target 15347 * until this command has been completed (success or failure). The 15348 * "retry command" is not transported to the target until the given 15349 * time delay expires, unless the user specified a 0 retry_delay. 15350 * 15351 * Note: the timeout(9F) callback routine is what actually calls 15352 * sd_start_cmds() to transport the command, with the exception of a 15353 * zero retry_delay. The only current implementor of a zero retry delay 15354 * is the case where a START_STOP_UNIT is sent to spin-up a device. 15355 */ 15356 if (un->un_retry_bp == NULL) { 15357 ASSERT(un->un_retry_statp == NULL); 15358 un->un_retry_bp = bp; 15359 15360 /* 15361 * If the user has not specified a delay the command should 15362 * be queued and no timeout should be scheduled. 15363 */ 15364 if (retry_delay == 0) { 15365 /* 15366 * Save the kstat pointer that will be used in the 15367 * call to SD_UPDATE_KSTATS() below, so that 15368 * sd_start_cmds() can correctly decrement the waitq 15369 * count when it is time to transport this command. 15370 */ 15371 un->un_retry_statp = statp; 15372 goto done; 15373 } 15374 } 15375 15376 if (un->un_retry_bp == bp) { 15377 /* 15378 * Save the kstat pointer that will be used in the call to 15379 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can 15380 * correctly decrement the waitq count when it is time to 15381 * transport this command. 15382 */ 15383 un->un_retry_statp = statp; 15384 15385 /* 15386 * Schedule a timeout if: 15387 * 1) The user has specified a delay. 15388 * 2) There is not a START_STOP_UNIT callback pending. 15389 * 15390 * If no delay has been specified, then it is up to the caller 15391 * to ensure that IO processing continues without stalling. 15392 * Effectively, this means that the caller will issue the 15393 * required call to sd_start_cmds(). The START_STOP_UNIT 15394 * callback does this after the START STOP UNIT command has 15395 * completed. In either of these cases we should not schedule 15396 * a timeout callback here. Also don't schedule the timeout if 15397 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart. 15398 */ 15399 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) && 15400 (un->un_direct_priority_timeid == NULL)) { 15401 un->un_retry_timeid = 15402 timeout(sd_start_retry_command, un, retry_delay); 15403 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15404 "sd_set_retry_bp: setting timeout: un: 0x%p" 15405 " bp:0x%p un_retry_timeid:0x%p\n", 15406 un, bp, un->un_retry_timeid); 15407 } 15408 } else { 15409 /* 15410 * We only get in here if there is already another command 15411 * waiting to be retried. In this case, we just put the 15412 * given command onto the wait queue, so it can be transported 15413 * after the current retry command has completed. 15414 * 15415 * Also we have to make sure that if the command at the head 15416 * of the wait queue is the un_failfast_bp, that we do not 15417 * put ahead of it any other commands that are to be retried. 15418 */ 15419 if ((un->un_failfast_bp != NULL) && 15420 (un->un_failfast_bp == un->un_waitq_headp)) { 15421 /* 15422 * Enqueue this command AFTER the first command on 15423 * the wait queue (which is also un_failfast_bp). 15424 */ 15425 bp->av_forw = un->un_waitq_headp->av_forw; 15426 un->un_waitq_headp->av_forw = bp; 15427 if (un->un_waitq_headp == un->un_waitq_tailp) { 15428 un->un_waitq_tailp = bp; 15429 } 15430 } else { 15431 /* Enqueue this command at the head of the waitq. */ 15432 bp->av_forw = un->un_waitq_headp; 15433 un->un_waitq_headp = bp; 15434 if (un->un_waitq_tailp == NULL) { 15435 un->un_waitq_tailp = bp; 15436 } 15437 } 15438 15439 if (statp == NULL) { 15440 statp = kstat_waitq_enter; 15441 } 15442 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15443 "sd_set_retry_bp: un:0x%p already delayed retry\n", un); 15444 } 15445 15446 done: 15447 if (statp != NULL) { 15448 SD_UPDATE_KSTATS(un, statp, bp); 15449 } 15450 15451 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15452 "sd_set_retry_bp: exit un:0x%p\n", un); 15453 } 15454 15455 15456 /* 15457 * Function: sd_start_retry_command 15458 * 15459 * Description: Start the command that has been waiting on the target's 15460 * retry queue. Called from timeout(9F) context after the 15461 * retry delay interval has expired. 15462 * 15463 * Arguments: arg - pointer to associated softstate for the device. 15464 * 15465 * Context: timeout(9F) thread context. May not sleep. 15466 */ 15467 15468 static void 15469 sd_start_retry_command(void *arg) 15470 { 15471 struct sd_lun *un = arg; 15472 15473 ASSERT(un != NULL); 15474 ASSERT(!mutex_owned(SD_MUTEX(un))); 15475 15476 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15477 "sd_start_retry_command: entry\n"); 15478 15479 mutex_enter(SD_MUTEX(un)); 15480 15481 un->un_retry_timeid = NULL; 15482 15483 if (un->un_retry_bp != NULL) { 15484 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15485 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n", 15486 un, un->un_retry_bp); 15487 sd_start_cmds(un, un->un_retry_bp); 15488 } 15489 15490 mutex_exit(SD_MUTEX(un)); 15491 15492 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15493 "sd_start_retry_command: exit\n"); 15494 } 15495 15496 15497 /* 15498 * Function: sd_start_direct_priority_command 15499 * 15500 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had 15501 * received TRAN_BUSY when we called scsi_transport() to send it 15502 * to the underlying HBA. This function is called from timeout(9F) 15503 * context after the delay interval has expired. 15504 * 15505 * Arguments: arg - pointer to associated buf(9S) to be restarted. 15506 * 15507 * Context: timeout(9F) thread context. May not sleep. 15508 */ 15509 15510 static void 15511 sd_start_direct_priority_command(void *arg) 15512 { 15513 struct buf *priority_bp = arg; 15514 struct sd_lun *un; 15515 15516 ASSERT(priority_bp != NULL); 15517 un = SD_GET_UN(priority_bp); 15518 ASSERT(un != NULL); 15519 ASSERT(!mutex_owned(SD_MUTEX(un))); 15520 15521 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15522 "sd_start_direct_priority_command: entry\n"); 15523 15524 mutex_enter(SD_MUTEX(un)); 15525 un->un_direct_priority_timeid = NULL; 15526 sd_start_cmds(un, priority_bp); 15527 mutex_exit(SD_MUTEX(un)); 15528 15529 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15530 "sd_start_direct_priority_command: exit\n"); 15531 } 15532 15533 15534 /* 15535 * Function: sd_send_request_sense_command 15536 * 15537 * Description: Sends a REQUEST SENSE command to the target 15538 * 15539 * Context: May be called from interrupt context. 15540 */ 15541 15542 static void 15543 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 15544 struct scsi_pkt *pktp) 15545 { 15546 ASSERT(bp != NULL); 15547 ASSERT(un != NULL); 15548 ASSERT(mutex_owned(SD_MUTEX(un))); 15549 15550 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: " 15551 "entry: buf:0x%p\n", bp); 15552 15553 /* 15554 * If we are syncing or dumping, then fail the command to avoid a 15555 * recursive callback into scsi_transport(). Also fail the command 15556 * if we are suspended (legacy behavior). 15557 */ 15558 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 15559 (un->un_state == SD_STATE_DUMPING)) { 15560 sd_return_failed_command(un, bp, EIO); 15561 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15562 "sd_send_request_sense_command: syncing/dumping, exit\n"); 15563 return; 15564 } 15565 15566 /* 15567 * Retry the failed command and don't issue the request sense if: 15568 * 1) the sense buf is busy 15569 * 2) we have 1 or more outstanding commands on the target 15570 * (the sense data will be cleared or invalidated any way) 15571 * 15572 * Note: There could be an issue with not checking a retry limit here, 15573 * the problem is determining which retry limit to check. 15574 */ 15575 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { 15576 /* Don't retry if the command is flagged as non-retryable */ 15577 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15578 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 15579 NULL, NULL, 0, un->un_busy_timeout, 15580 kstat_waitq_enter); 15581 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15582 "sd_send_request_sense_command: " 15583 "at full throttle, retrying exit\n"); 15584 } else { 15585 sd_return_failed_command(un, bp, EIO); 15586 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15587 "sd_send_request_sense_command: " 15588 "at full throttle, non-retryable exit\n"); 15589 } 15590 return; 15591 } 15592 15593 sd_mark_rqs_busy(un, bp); 15594 sd_start_cmds(un, un->un_rqs_bp); 15595 15596 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15597 "sd_send_request_sense_command: exit\n"); 15598 } 15599 15600 15601 /* 15602 * Function: sd_mark_rqs_busy 15603 * 15604 * Description: Indicate that the request sense bp for this instance is 15605 * in use. 15606 * 15607 * Context: May be called under interrupt context 15608 */ 15609 15610 static void 15611 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp) 15612 { 15613 struct sd_xbuf *sense_xp; 15614 15615 ASSERT(un != NULL); 15616 ASSERT(bp != NULL); 15617 ASSERT(mutex_owned(SD_MUTEX(un))); 15618 ASSERT(un->un_sense_isbusy == 0); 15619 15620 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: " 15621 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un); 15622 15623 sense_xp = SD_GET_XBUF(un->un_rqs_bp); 15624 ASSERT(sense_xp != NULL); 15625 15626 SD_INFO(SD_LOG_IO, un, 15627 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp); 15628 15629 ASSERT(sense_xp->xb_pktp != NULL); 15630 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) 15631 == (FLAG_SENSING | FLAG_HEAD)); 15632 15633 un->un_sense_isbusy = 1; 15634 un->un_rqs_bp->b_resid = 0; 15635 sense_xp->xb_pktp->pkt_resid = 0; 15636 sense_xp->xb_pktp->pkt_reason = 0; 15637 15638 /* So we can get back the bp at interrupt time! */ 15639 sense_xp->xb_sense_bp = bp; 15640 15641 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH); 15642 15643 /* 15644 * Mark this buf as awaiting sense data. (This is already set in 15645 * the pkt_flags for the RQS packet.) 15646 */ 15647 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING; 15648 15649 /* Request sense down same path */ 15650 if (scsi_pkt_allocated_correctly((SD_GET_XBUF(bp))->xb_pktp) && 15651 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance) 15652 sense_xp->xb_pktp->pkt_path_instance = 15653 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance; 15654 15655 sense_xp->xb_retry_count = 0; 15656 sense_xp->xb_victim_retry_count = 0; 15657 sense_xp->xb_ua_retry_count = 0; 15658 sense_xp->xb_nr_retry_count = 0; 15659 sense_xp->xb_dma_resid = 0; 15660 15661 /* Clean up the fields for auto-request sense */ 15662 sense_xp->xb_sense_status = 0; 15663 sense_xp->xb_sense_state = 0; 15664 sense_xp->xb_sense_resid = 0; 15665 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data)); 15666 15667 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n"); 15668 } 15669 15670 15671 /* 15672 * Function: sd_mark_rqs_idle 15673 * 15674 * Description: SD_MUTEX must be held continuously through this routine 15675 * to prevent reuse of the rqs struct before the caller can 15676 * complete it's processing. 15677 * 15678 * Return Code: Pointer to the RQS buf 15679 * 15680 * Context: May be called under interrupt context 15681 */ 15682 15683 static struct buf * 15684 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp) 15685 { 15686 struct buf *bp; 15687 ASSERT(un != NULL); 15688 ASSERT(sense_xp != NULL); 15689 ASSERT(mutex_owned(SD_MUTEX(un))); 15690 ASSERT(un->un_sense_isbusy != 0); 15691 15692 un->un_sense_isbusy = 0; 15693 bp = sense_xp->xb_sense_bp; 15694 sense_xp->xb_sense_bp = NULL; 15695 15696 /* This pkt is no longer interested in getting sense data */ 15697 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING; 15698 15699 return (bp); 15700 } 15701 15702 15703 15704 /* 15705 * Function: sd_alloc_rqs 15706 * 15707 * Description: Set up the unit to receive auto request sense data 15708 * 15709 * Return Code: DDI_SUCCESS or DDI_FAILURE 15710 * 15711 * Context: Called under attach(9E) context 15712 */ 15713 15714 static int 15715 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un) 15716 { 15717 struct sd_xbuf *xp; 15718 15719 ASSERT(un != NULL); 15720 ASSERT(!mutex_owned(SD_MUTEX(un))); 15721 ASSERT(un->un_rqs_bp == NULL); 15722 ASSERT(un->un_rqs_pktp == NULL); 15723 15724 /* 15725 * First allocate the required buf and scsi_pkt structs, then set up 15726 * the CDB in the scsi_pkt for a REQUEST SENSE command. 15727 */ 15728 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL, 15729 MAX_SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL); 15730 if (un->un_rqs_bp == NULL) { 15731 return (DDI_FAILURE); 15732 } 15733 15734 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp, 15735 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL); 15736 15737 if (un->un_rqs_pktp == NULL) { 15738 sd_free_rqs(un); 15739 return (DDI_FAILURE); 15740 } 15741 15742 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */ 15743 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp, 15744 SCMD_REQUEST_SENSE, 0, MAX_SENSE_LENGTH, 0); 15745 15746 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp); 15747 15748 /* Set up the other needed members in the ARQ scsi_pkt. */ 15749 un->un_rqs_pktp->pkt_comp = sdintr; 15750 un->un_rqs_pktp->pkt_time = sd_io_time; 15751 un->un_rqs_pktp->pkt_flags |= 15752 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */ 15753 15754 /* 15755 * Allocate & init the sd_xbuf struct for the RQS command. Do not 15756 * provide any intpkt, destroypkt routines as we take care of 15757 * scsi_pkt allocation/freeing here and in sd_free_rqs(). 15758 */ 15759 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 15760 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL); 15761 xp->xb_pktp = un->un_rqs_pktp; 15762 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15763 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n", 15764 un, xp, un->un_rqs_pktp, un->un_rqs_bp); 15765 15766 /* 15767 * Save the pointer to the request sense private bp so it can 15768 * be retrieved in sdintr. 15769 */ 15770 un->un_rqs_pktp->pkt_private = un->un_rqs_bp; 15771 ASSERT(un->un_rqs_bp->b_private == xp); 15772 15773 /* 15774 * See if the HBA supports auto-request sense for the specified 15775 * target/lun. If it does, then try to enable it (if not already 15776 * enabled). 15777 * 15778 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return 15779 * failure, while for other HBAs (pln) scsi_ifsetcap will always 15780 * return success. However, in both of these cases ARQ is always 15781 * enabled and scsi_ifgetcap will always return true. The best approach 15782 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). 15783 * 15784 * The 3rd case is the HBA (adp) always return enabled on 15785 * scsi_ifgetgetcap even when it's not enable, the best approach 15786 * is issue a scsi_ifsetcap then a scsi_ifgetcap 15787 * Note: this case is to circumvent the Adaptec bug. (x86 only) 15788 */ 15789 15790 if (un->un_f_is_fibre == TRUE) { 15791 un->un_f_arq_enabled = TRUE; 15792 } else { 15793 #if defined(__i386) || defined(__amd64) 15794 /* 15795 * Circumvent the Adaptec bug, remove this code when 15796 * the bug is fixed 15797 */ 15798 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); 15799 #endif 15800 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { 15801 case 0: 15802 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15803 "sd_alloc_rqs: HBA supports ARQ\n"); 15804 /* 15805 * ARQ is supported by this HBA but currently is not 15806 * enabled. Attempt to enable it and if successful then 15807 * mark this instance as ARQ enabled. 15808 */ 15809 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1) 15810 == 1) { 15811 /* Successfully enabled ARQ in the HBA */ 15812 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15813 "sd_alloc_rqs: ARQ enabled\n"); 15814 un->un_f_arq_enabled = TRUE; 15815 } else { 15816 /* Could not enable ARQ in the HBA */ 15817 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15818 "sd_alloc_rqs: failed ARQ enable\n"); 15819 un->un_f_arq_enabled = FALSE; 15820 } 15821 break; 15822 case 1: 15823 /* 15824 * ARQ is supported by this HBA and is already enabled. 15825 * Just mark ARQ as enabled for this instance. 15826 */ 15827 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15828 "sd_alloc_rqs: ARQ already enabled\n"); 15829 un->un_f_arq_enabled = TRUE; 15830 break; 15831 default: 15832 /* 15833 * ARQ is not supported by this HBA; disable it for this 15834 * instance. 15835 */ 15836 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15837 "sd_alloc_rqs: HBA does not support ARQ\n"); 15838 un->un_f_arq_enabled = FALSE; 15839 break; 15840 } 15841 } 15842 15843 return (DDI_SUCCESS); 15844 } 15845 15846 15847 /* 15848 * Function: sd_free_rqs 15849 * 15850 * Description: Cleanup for the pre-instance RQS command. 15851 * 15852 * Context: Kernel thread context 15853 */ 15854 15855 static void 15856 sd_free_rqs(struct sd_lun *un) 15857 { 15858 ASSERT(un != NULL); 15859 15860 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n"); 15861 15862 /* 15863 * If consistent memory is bound to a scsi_pkt, the pkt 15864 * has to be destroyed *before* freeing the consistent memory. 15865 * Don't change the sequence of this operations. 15866 * scsi_destroy_pkt() might access memory, which isn't allowed, 15867 * after it was freed in scsi_free_consistent_buf(). 15868 */ 15869 if (un->un_rqs_pktp != NULL) { 15870 scsi_destroy_pkt(un->un_rqs_pktp); 15871 un->un_rqs_pktp = NULL; 15872 } 15873 15874 if (un->un_rqs_bp != NULL) { 15875 struct sd_xbuf *xp = SD_GET_XBUF(un->un_rqs_bp); 15876 if (xp != NULL) { 15877 kmem_free(xp, sizeof (struct sd_xbuf)); 15878 } 15879 scsi_free_consistent_buf(un->un_rqs_bp); 15880 un->un_rqs_bp = NULL; 15881 } 15882 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n"); 15883 } 15884 15885 15886 15887 /* 15888 * Function: sd_reduce_throttle 15889 * 15890 * Description: Reduces the maximum # of outstanding commands on a 15891 * target to the current number of outstanding commands. 15892 * Queues a tiemout(9F) callback to restore the limit 15893 * after a specified interval has elapsed. 15894 * Typically used when we get a TRAN_BUSY return code 15895 * back from scsi_transport(). 15896 * 15897 * Arguments: un - ptr to the sd_lun softstate struct 15898 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL 15899 * 15900 * Context: May be called from interrupt context 15901 */ 15902 15903 static void 15904 sd_reduce_throttle(struct sd_lun *un, int throttle_type) 15905 { 15906 ASSERT(un != NULL); 15907 ASSERT(mutex_owned(SD_MUTEX(un))); 15908 ASSERT(un->un_ncmds_in_transport >= 0); 15909 15910 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 15911 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n", 15912 un, un->un_throttle, un->un_ncmds_in_transport); 15913 15914 if (un->un_throttle > 1) { 15915 if (un->un_f_use_adaptive_throttle == TRUE) { 15916 switch (throttle_type) { 15917 case SD_THROTTLE_TRAN_BUSY: 15918 if (un->un_busy_throttle == 0) { 15919 un->un_busy_throttle = un->un_throttle; 15920 } 15921 break; 15922 case SD_THROTTLE_QFULL: 15923 un->un_busy_throttle = 0; 15924 break; 15925 default: 15926 ASSERT(FALSE); 15927 } 15928 15929 if (un->un_ncmds_in_transport > 0) { 15930 un->un_throttle = un->un_ncmds_in_transport; 15931 } 15932 15933 } else { 15934 if (un->un_ncmds_in_transport == 0) { 15935 un->un_throttle = 1; 15936 } else { 15937 un->un_throttle = un->un_ncmds_in_transport; 15938 } 15939 } 15940 } 15941 15942 /* Reschedule the timeout if none is currently active */ 15943 if (un->un_reset_throttle_timeid == NULL) { 15944 un->un_reset_throttle_timeid = timeout(sd_restore_throttle, 15945 un, SD_THROTTLE_RESET_INTERVAL); 15946 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15947 "sd_reduce_throttle: timeout scheduled!\n"); 15948 } 15949 15950 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 15951 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle); 15952 } 15953 15954 15955 15956 /* 15957 * Function: sd_restore_throttle 15958 * 15959 * Description: Callback function for timeout(9F). Resets the current 15960 * value of un->un_throttle to its default. 15961 * 15962 * Arguments: arg - pointer to associated softstate for the device. 15963 * 15964 * Context: May be called from interrupt context 15965 */ 15966 15967 static void 15968 sd_restore_throttle(void *arg) 15969 { 15970 struct sd_lun *un = arg; 15971 15972 ASSERT(un != NULL); 15973 ASSERT(!mutex_owned(SD_MUTEX(un))); 15974 15975 mutex_enter(SD_MUTEX(un)); 15976 15977 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 15978 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle); 15979 15980 un->un_reset_throttle_timeid = NULL; 15981 15982 if (un->un_f_use_adaptive_throttle == TRUE) { 15983 /* 15984 * If un_busy_throttle is nonzero, then it contains the 15985 * value that un_throttle was when we got a TRAN_BUSY back 15986 * from scsi_transport(). We want to revert back to this 15987 * value. 15988 * 15989 * In the QFULL case, the throttle limit will incrementally 15990 * increase until it reaches max throttle. 15991 */ 15992 if (un->un_busy_throttle > 0) { 15993 un->un_throttle = un->un_busy_throttle; 15994 un->un_busy_throttle = 0; 15995 } else { 15996 /* 15997 * increase throttle by 10% open gate slowly, schedule 15998 * another restore if saved throttle has not been 15999 * reached 16000 */ 16001 short throttle; 16002 if (sd_qfull_throttle_enable) { 16003 throttle = un->un_throttle + 16004 max((un->un_throttle / 10), 1); 16005 un->un_throttle = 16006 (throttle < un->un_saved_throttle) ? 16007 throttle : un->un_saved_throttle; 16008 if (un->un_throttle < un->un_saved_throttle) { 16009 un->un_reset_throttle_timeid = 16010 timeout(sd_restore_throttle, 16011 un, 16012 SD_QFULL_THROTTLE_RESET_INTERVAL); 16013 } 16014 } 16015 } 16016 16017 /* 16018 * If un_throttle has fallen below the low-water mark, we 16019 * restore the maximum value here (and allow it to ratchet 16020 * down again if necessary). 16021 */ 16022 if (un->un_throttle < un->un_min_throttle) { 16023 un->un_throttle = un->un_saved_throttle; 16024 } 16025 } else { 16026 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 16027 "restoring limit from 0x%x to 0x%x\n", 16028 un->un_throttle, un->un_saved_throttle); 16029 un->un_throttle = un->un_saved_throttle; 16030 } 16031 16032 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 16033 "sd_restore_throttle: calling sd_start_cmds!\n"); 16034 16035 sd_start_cmds(un, NULL); 16036 16037 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 16038 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n", 16039 un, un->un_throttle); 16040 16041 mutex_exit(SD_MUTEX(un)); 16042 16043 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n"); 16044 } 16045 16046 /* 16047 * Function: sdrunout 16048 * 16049 * Description: Callback routine for scsi_init_pkt when a resource allocation 16050 * fails. 16051 * 16052 * Arguments: arg - a pointer to the sd_lun unit struct for the particular 16053 * soft state instance. 16054 * 16055 * Return Code: The scsi_init_pkt routine allows for the callback function to 16056 * return a 0 indicating the callback should be rescheduled or a 1 16057 * indicating not to reschedule. This routine always returns 1 16058 * because the driver always provides a callback function to 16059 * scsi_init_pkt. This results in a callback always being scheduled 16060 * (via the scsi_init_pkt callback implementation) if a resource 16061 * failure occurs. 16062 * 16063 * Context: This callback function may not block or call routines that block 16064 * 16065 * Note: Using the scsi_init_pkt callback facility can result in an I/O 16066 * request persisting at the head of the list which cannot be 16067 * satisfied even after multiple retries. In the future the driver 16068 * may implement some time of maximum runout count before failing 16069 * an I/O. 16070 */ 16071 16072 static int 16073 sdrunout(caddr_t arg) 16074 { 16075 struct sd_lun *un = (struct sd_lun *)arg; 16076 16077 ASSERT(un != NULL); 16078 ASSERT(!mutex_owned(SD_MUTEX(un))); 16079 16080 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n"); 16081 16082 mutex_enter(SD_MUTEX(un)); 16083 sd_start_cmds(un, NULL); 16084 mutex_exit(SD_MUTEX(un)); 16085 /* 16086 * This callback routine always returns 1 (i.e. do not reschedule) 16087 * because we always specify sdrunout as the callback handler for 16088 * scsi_init_pkt inside the call to sd_start_cmds. 16089 */ 16090 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n"); 16091 return (1); 16092 } 16093 16094 16095 /* 16096 * Function: sdintr 16097 * 16098 * Description: Completion callback routine for scsi_pkt(9S) structs 16099 * sent to the HBA driver via scsi_transport(9F). 16100 * 16101 * Context: Interrupt context 16102 */ 16103 16104 static void 16105 sdintr(struct scsi_pkt *pktp) 16106 { 16107 struct buf *bp; 16108 struct sd_xbuf *xp; 16109 struct sd_lun *un; 16110 size_t actual_len; 16111 sd_ssc_t *sscp; 16112 16113 ASSERT(pktp != NULL); 16114 bp = (struct buf *)pktp->pkt_private; 16115 ASSERT(bp != NULL); 16116 xp = SD_GET_XBUF(bp); 16117 ASSERT(xp != NULL); 16118 ASSERT(xp->xb_pktp != NULL); 16119 un = SD_GET_UN(bp); 16120 ASSERT(un != NULL); 16121 ASSERT(!mutex_owned(SD_MUTEX(un))); 16122 16123 #ifdef SD_FAULT_INJECTION 16124 16125 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n"); 16126 /* SD FaultInjection */ 16127 sd_faultinjection(pktp); 16128 16129 #endif /* SD_FAULT_INJECTION */ 16130 16131 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p," 16132 " xp:0x%p, un:0x%p\n", bp, xp, un); 16133 16134 mutex_enter(SD_MUTEX(un)); 16135 16136 ASSERT(un->un_fm_private != NULL); 16137 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc; 16138 ASSERT(sscp != NULL); 16139 16140 /* Reduce the count of the #commands currently in transport */ 16141 un->un_ncmds_in_transport--; 16142 ASSERT(un->un_ncmds_in_transport >= 0); 16143 16144 /* Increment counter to indicate that the callback routine is active */ 16145 un->un_in_callback++; 16146 16147 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 16148 16149 #ifdef SDDEBUG 16150 if (bp == un->un_retry_bp) { 16151 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: " 16152 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n", 16153 un, un->un_retry_bp, un->un_ncmds_in_transport); 16154 } 16155 #endif 16156 16157 /* 16158 * If pkt_reason is CMD_DEV_GONE, fail the command, and update the media 16159 * state if needed. 16160 */ 16161 if (pktp->pkt_reason == CMD_DEV_GONE) { 16162 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16163 "Command failed to complete...Device is gone\n"); 16164 if (un->un_mediastate != DKIO_DEV_GONE) { 16165 un->un_mediastate = DKIO_DEV_GONE; 16166 cv_broadcast(&un->un_state_cv); 16167 } 16168 sd_return_failed_command(un, bp, EIO); 16169 goto exit; 16170 } 16171 16172 if (pktp->pkt_state & STATE_XARQ_DONE) { 16173 SD_TRACE(SD_LOG_COMMON, un, 16174 "sdintr: extra sense data received. pkt=%p\n", pktp); 16175 } 16176 16177 /* 16178 * First see if the pkt has auto-request sense data with it.... 16179 * Look at the packet state first so we don't take a performance 16180 * hit looking at the arq enabled flag unless absolutely necessary. 16181 */ 16182 if ((pktp->pkt_state & STATE_ARQ_DONE) && 16183 (un->un_f_arq_enabled == TRUE)) { 16184 /* 16185 * The HBA did an auto request sense for this command so check 16186 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 16187 * driver command that should not be retried. 16188 */ 16189 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 16190 /* 16191 * Save the relevant sense info into the xp for the 16192 * original cmd. 16193 */ 16194 struct scsi_arq_status *asp; 16195 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 16196 xp->xb_sense_status = 16197 *((uchar_t *)(&(asp->sts_rqpkt_status))); 16198 xp->xb_sense_state = asp->sts_rqpkt_state; 16199 xp->xb_sense_resid = asp->sts_rqpkt_resid; 16200 if (pktp->pkt_state & STATE_XARQ_DONE) { 16201 actual_len = MAX_SENSE_LENGTH - 16202 xp->xb_sense_resid; 16203 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16204 MAX_SENSE_LENGTH); 16205 } else { 16206 if (xp->xb_sense_resid > SENSE_LENGTH) { 16207 actual_len = MAX_SENSE_LENGTH - 16208 xp->xb_sense_resid; 16209 } else { 16210 actual_len = SENSE_LENGTH - 16211 xp->xb_sense_resid; 16212 } 16213 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 16214 if ((((struct uscsi_cmd *) 16215 (xp->xb_pktinfo))->uscsi_rqlen) > 16216 actual_len) { 16217 xp->xb_sense_resid = 16218 (((struct uscsi_cmd *) 16219 (xp->xb_pktinfo))-> 16220 uscsi_rqlen) - actual_len; 16221 } else { 16222 xp->xb_sense_resid = 0; 16223 } 16224 } 16225 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16226 SENSE_LENGTH); 16227 } 16228 16229 /* fail the command */ 16230 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16231 "sdintr: arq done and FLAG_DIAGNOSE set\n"); 16232 sd_return_failed_command(un, bp, EIO); 16233 goto exit; 16234 } 16235 16236 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 16237 /* 16238 * We want to either retry or fail this command, so free 16239 * the DMA resources here. If we retry the command then 16240 * the DMA resources will be reallocated in sd_start_cmds(). 16241 * Note that when PKT_DMA_PARTIAL is used, this reallocation 16242 * causes the *entire* transfer to start over again from the 16243 * beginning of the request, even for PARTIAL chunks that 16244 * have already transferred successfully. 16245 */ 16246 if ((un->un_f_is_fibre == TRUE) && 16247 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 16248 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 16249 scsi_dmafree(pktp); 16250 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 16251 } 16252 #endif 16253 16254 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16255 "sdintr: arq done, sd_handle_auto_request_sense\n"); 16256 16257 sd_handle_auto_request_sense(un, bp, xp, pktp); 16258 goto exit; 16259 } 16260 16261 /* Next see if this is the REQUEST SENSE pkt for the instance */ 16262 if (pktp->pkt_flags & FLAG_SENSING) { 16263 /* This pktp is from the unit's REQUEST_SENSE command */ 16264 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16265 "sdintr: sd_handle_request_sense\n"); 16266 sd_handle_request_sense(un, bp, xp, pktp); 16267 goto exit; 16268 } 16269 16270 /* 16271 * Check to see if the command successfully completed as requested; 16272 * this is the most common case (and also the hot performance path). 16273 * 16274 * Requirements for successful completion are: 16275 * pkt_reason is CMD_CMPLT and packet status is status good. 16276 * In addition: 16277 * - A residual of zero indicates successful completion no matter what 16278 * the command is. 16279 * - If the residual is not zero and the command is not a read or 16280 * write, then it's still defined as successful completion. In other 16281 * words, if the command is a read or write the residual must be 16282 * zero for successful completion. 16283 * - If the residual is not zero and the command is a read or 16284 * write, and it's a USCSICMD, then it's still defined as 16285 * successful completion. 16286 */ 16287 if ((pktp->pkt_reason == CMD_CMPLT) && 16288 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) { 16289 16290 /* 16291 * Since this command is returned with a good status, we 16292 * can reset the count for Sonoma failover. 16293 */ 16294 un->un_sonoma_failure_count = 0; 16295 16296 /* 16297 * Return all USCSI commands on good status 16298 */ 16299 if (pktp->pkt_resid == 0) { 16300 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16301 "sdintr: returning command for resid == 0\n"); 16302 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) && 16303 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) { 16304 SD_UPDATE_B_RESID(bp, pktp); 16305 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16306 "sdintr: returning command for resid != 0\n"); 16307 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 16308 SD_UPDATE_B_RESID(bp, pktp); 16309 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16310 "sdintr: returning uscsi command\n"); 16311 } else { 16312 goto not_successful; 16313 } 16314 sd_return_command(un, bp); 16315 16316 /* 16317 * Decrement counter to indicate that the callback routine 16318 * is done. 16319 */ 16320 un->un_in_callback--; 16321 ASSERT(un->un_in_callback >= 0); 16322 mutex_exit(SD_MUTEX(un)); 16323 16324 return; 16325 } 16326 16327 not_successful: 16328 16329 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 16330 /* 16331 * The following is based upon knowledge of the underlying transport 16332 * and its use of DMA resources. This code should be removed when 16333 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor 16334 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf() 16335 * and sd_start_cmds(). 16336 * 16337 * Free any DMA resources associated with this command if there 16338 * is a chance it could be retried or enqueued for later retry. 16339 * If we keep the DMA binding then mpxio cannot reissue the 16340 * command on another path whenever a path failure occurs. 16341 * 16342 * Note that when PKT_DMA_PARTIAL is used, free/reallocation 16343 * causes the *entire* transfer to start over again from the 16344 * beginning of the request, even for PARTIAL chunks that 16345 * have already transferred successfully. 16346 * 16347 * This is only done for non-uscsi commands (and also skipped for the 16348 * driver's internal RQS command). Also just do this for Fibre Channel 16349 * devices as these are the only ones that support mpxio. 16350 */ 16351 if ((un->un_f_is_fibre == TRUE) && 16352 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 16353 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 16354 scsi_dmafree(pktp); 16355 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 16356 } 16357 #endif 16358 16359 /* 16360 * The command did not successfully complete as requested so check 16361 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 16362 * driver command that should not be retried so just return. If 16363 * FLAG_DIAGNOSE is not set the error will be processed below. 16364 */ 16365 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 16366 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16367 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n"); 16368 /* 16369 * Issue a request sense if a check condition caused the error 16370 * (we handle the auto request sense case above), otherwise 16371 * just fail the command. 16372 */ 16373 if ((pktp->pkt_reason == CMD_CMPLT) && 16374 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { 16375 sd_send_request_sense_command(un, bp, pktp); 16376 } else { 16377 sd_return_failed_command(un, bp, EIO); 16378 } 16379 goto exit; 16380 } 16381 16382 /* 16383 * The command did not successfully complete as requested so process 16384 * the error, retry, and/or attempt recovery. 16385 */ 16386 switch (pktp->pkt_reason) { 16387 case CMD_CMPLT: 16388 switch (SD_GET_PKT_STATUS(pktp)) { 16389 case STATUS_GOOD: 16390 /* 16391 * The command completed successfully with a non-zero 16392 * residual 16393 */ 16394 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16395 "sdintr: STATUS_GOOD \n"); 16396 sd_pkt_status_good(un, bp, xp, pktp); 16397 break; 16398 16399 case STATUS_CHECK: 16400 case STATUS_TERMINATED: 16401 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16402 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n"); 16403 sd_pkt_status_check_condition(un, bp, xp, pktp); 16404 break; 16405 16406 case STATUS_BUSY: 16407 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16408 "sdintr: STATUS_BUSY\n"); 16409 sd_pkt_status_busy(un, bp, xp, pktp); 16410 break; 16411 16412 case STATUS_RESERVATION_CONFLICT: 16413 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16414 "sdintr: STATUS_RESERVATION_CONFLICT\n"); 16415 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 16416 break; 16417 16418 case STATUS_QFULL: 16419 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16420 "sdintr: STATUS_QFULL\n"); 16421 sd_pkt_status_qfull(un, bp, xp, pktp); 16422 break; 16423 16424 case STATUS_MET: 16425 case STATUS_INTERMEDIATE: 16426 case STATUS_SCSI2: 16427 case STATUS_INTERMEDIATE_MET: 16428 case STATUS_ACA_ACTIVE: 16429 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16430 "Unexpected SCSI status received: 0x%x\n", 16431 SD_GET_PKT_STATUS(pktp)); 16432 /* 16433 * Mark the ssc_flags when detected invalid status 16434 * code for non-USCSI command. 16435 */ 16436 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16437 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS, 16438 0, "stat-code"); 16439 } 16440 sd_return_failed_command(un, bp, EIO); 16441 break; 16442 16443 default: 16444 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16445 "Invalid SCSI status received: 0x%x\n", 16446 SD_GET_PKT_STATUS(pktp)); 16447 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16448 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS, 16449 0, "stat-code"); 16450 } 16451 sd_return_failed_command(un, bp, EIO); 16452 break; 16453 16454 } 16455 break; 16456 16457 case CMD_INCOMPLETE: 16458 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16459 "sdintr: CMD_INCOMPLETE\n"); 16460 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp); 16461 break; 16462 case CMD_TRAN_ERR: 16463 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16464 "sdintr: CMD_TRAN_ERR\n"); 16465 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp); 16466 break; 16467 case CMD_RESET: 16468 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16469 "sdintr: CMD_RESET \n"); 16470 sd_pkt_reason_cmd_reset(un, bp, xp, pktp); 16471 break; 16472 case CMD_ABORTED: 16473 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16474 "sdintr: CMD_ABORTED \n"); 16475 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp); 16476 break; 16477 case CMD_TIMEOUT: 16478 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16479 "sdintr: CMD_TIMEOUT\n"); 16480 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp); 16481 break; 16482 case CMD_UNX_BUS_FREE: 16483 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16484 "sdintr: CMD_UNX_BUS_FREE \n"); 16485 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp); 16486 break; 16487 case CMD_TAG_REJECT: 16488 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16489 "sdintr: CMD_TAG_REJECT\n"); 16490 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp); 16491 break; 16492 default: 16493 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16494 "sdintr: default\n"); 16495 /* 16496 * Mark the ssc_flags for detecting invliad pkt_reason. 16497 */ 16498 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16499 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_PKT_REASON, 16500 0, "pkt-reason"); 16501 } 16502 sd_pkt_reason_default(un, bp, xp, pktp); 16503 break; 16504 } 16505 16506 exit: 16507 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n"); 16508 16509 /* Decrement counter to indicate that the callback routine is done. */ 16510 un->un_in_callback--; 16511 ASSERT(un->un_in_callback >= 0); 16512 16513 /* 16514 * At this point, the pkt has been dispatched, ie, it is either 16515 * being re-tried or has been returned to its caller and should 16516 * not be referenced. 16517 */ 16518 16519 mutex_exit(SD_MUTEX(un)); 16520 } 16521 16522 16523 /* 16524 * Function: sd_print_incomplete_msg 16525 * 16526 * Description: Prints the error message for a CMD_INCOMPLETE error. 16527 * 16528 * Arguments: un - ptr to associated softstate for the device. 16529 * bp - ptr to the buf(9S) for the command. 16530 * arg - message string ptr 16531 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED, 16532 * or SD_NO_RETRY_ISSUED. 16533 * 16534 * Context: May be called under interrupt context 16535 */ 16536 16537 static void 16538 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 16539 { 16540 struct scsi_pkt *pktp; 16541 char *msgp; 16542 char *cmdp = arg; 16543 16544 ASSERT(un != NULL); 16545 ASSERT(mutex_owned(SD_MUTEX(un))); 16546 ASSERT(bp != NULL); 16547 ASSERT(arg != NULL); 16548 pktp = SD_GET_PKTP(bp); 16549 ASSERT(pktp != NULL); 16550 16551 switch (code) { 16552 case SD_DELAYED_RETRY_ISSUED: 16553 case SD_IMMEDIATE_RETRY_ISSUED: 16554 msgp = "retrying"; 16555 break; 16556 case SD_NO_RETRY_ISSUED: 16557 default: 16558 msgp = "giving up"; 16559 break; 16560 } 16561 16562 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 16563 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16564 "incomplete %s- %s\n", cmdp, msgp); 16565 } 16566 } 16567 16568 16569 16570 /* 16571 * Function: sd_pkt_status_good 16572 * 16573 * Description: Processing for a STATUS_GOOD code in pkt_status. 16574 * 16575 * Context: May be called under interrupt context 16576 */ 16577 16578 static void 16579 sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 16580 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16581 { 16582 char *cmdp; 16583 16584 ASSERT(un != NULL); 16585 ASSERT(mutex_owned(SD_MUTEX(un))); 16586 ASSERT(bp != NULL); 16587 ASSERT(xp != NULL); 16588 ASSERT(pktp != NULL); 16589 ASSERT(pktp->pkt_reason == CMD_CMPLT); 16590 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD); 16591 ASSERT(pktp->pkt_resid != 0); 16592 16593 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n"); 16594 16595 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16596 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) { 16597 case SCMD_READ: 16598 cmdp = "read"; 16599 break; 16600 case SCMD_WRITE: 16601 cmdp = "write"; 16602 break; 16603 default: 16604 SD_UPDATE_B_RESID(bp, pktp); 16605 sd_return_command(un, bp); 16606 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 16607 return; 16608 } 16609 16610 /* 16611 * See if we can retry the read/write, preferrably immediately. 16612 * If retries are exhaused, then sd_retry_command() will update 16613 * the b_resid count. 16614 */ 16615 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg, 16616 cmdp, EIO, (clock_t)0, NULL); 16617 16618 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 16619 } 16620 16621 16622 16623 16624 16625 /* 16626 * Function: sd_handle_request_sense 16627 * 16628 * Description: Processing for non-auto Request Sense command. 16629 * 16630 * Arguments: un - ptr to associated softstate 16631 * sense_bp - ptr to buf(9S) for the RQS command 16632 * sense_xp - ptr to the sd_xbuf for the RQS command 16633 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command 16634 * 16635 * Context: May be called under interrupt context 16636 */ 16637 16638 static void 16639 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp, 16640 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp) 16641 { 16642 struct buf *cmd_bp; /* buf for the original command */ 16643 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */ 16644 struct scsi_pkt *cmd_pktp; /* pkt for the original command */ 16645 size_t actual_len; /* actual sense data length */ 16646 16647 ASSERT(un != NULL); 16648 ASSERT(mutex_owned(SD_MUTEX(un))); 16649 ASSERT(sense_bp != NULL); 16650 ASSERT(sense_xp != NULL); 16651 ASSERT(sense_pktp != NULL); 16652 16653 /* 16654 * Note the sense_bp, sense_xp, and sense_pktp here are for the 16655 * RQS command and not the original command. 16656 */ 16657 ASSERT(sense_pktp == un->un_rqs_pktp); 16658 ASSERT(sense_bp == un->un_rqs_bp); 16659 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) == 16660 (FLAG_SENSING | FLAG_HEAD)); 16661 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) & 16662 FLAG_SENSING) == FLAG_SENSING); 16663 16664 /* These are the bp, xp, and pktp for the original command */ 16665 cmd_bp = sense_xp->xb_sense_bp; 16666 cmd_xp = SD_GET_XBUF(cmd_bp); 16667 cmd_pktp = SD_GET_PKTP(cmd_bp); 16668 16669 if (sense_pktp->pkt_reason != CMD_CMPLT) { 16670 /* 16671 * The REQUEST SENSE command failed. Release the REQUEST 16672 * SENSE command for re-use, get back the bp for the original 16673 * command, and attempt to re-try the original command if 16674 * FLAG_DIAGNOSE is not set in the original packet. 16675 */ 16676 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16677 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 16678 cmd_bp = sd_mark_rqs_idle(un, sense_xp); 16679 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD, 16680 NULL, NULL, EIO, (clock_t)0, NULL); 16681 return; 16682 } 16683 } 16684 16685 /* 16686 * Save the relevant sense info into the xp for the original cmd. 16687 * 16688 * Note: if the request sense failed the state info will be zero 16689 * as set in sd_mark_rqs_busy() 16690 */ 16691 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp); 16692 cmd_xp->xb_sense_state = sense_pktp->pkt_state; 16693 actual_len = MAX_SENSE_LENGTH - sense_pktp->pkt_resid; 16694 if ((cmd_xp->xb_pkt_flags & SD_XB_USCSICMD) && 16695 (((struct uscsi_cmd *)cmd_xp->xb_pktinfo)->uscsi_rqlen > 16696 SENSE_LENGTH)) { 16697 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 16698 MAX_SENSE_LENGTH); 16699 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid; 16700 } else { 16701 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, 16702 SENSE_LENGTH); 16703 if (actual_len < SENSE_LENGTH) { 16704 cmd_xp->xb_sense_resid = SENSE_LENGTH - actual_len; 16705 } else { 16706 cmd_xp->xb_sense_resid = 0; 16707 } 16708 } 16709 16710 /* 16711 * Free up the RQS command.... 16712 * NOTE: 16713 * Must do this BEFORE calling sd_validate_sense_data! 16714 * sd_validate_sense_data may return the original command in 16715 * which case the pkt will be freed and the flags can no 16716 * longer be touched. 16717 * SD_MUTEX is held through this process until the command 16718 * is dispatched based upon the sense data, so there are 16719 * no race conditions. 16720 */ 16721 (void) sd_mark_rqs_idle(un, sense_xp); 16722 16723 /* 16724 * For a retryable command see if we have valid sense data, if so then 16725 * turn it over to sd_decode_sense() to figure out the right course of 16726 * action. Just fail a non-retryable command. 16727 */ 16728 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 16729 if (sd_validate_sense_data(un, cmd_bp, cmd_xp, actual_len) == 16730 SD_SENSE_DATA_IS_VALID) { 16731 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp); 16732 } 16733 } else { 16734 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB", 16735 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 16736 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data", 16737 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 16738 sd_return_failed_command(un, cmd_bp, EIO); 16739 } 16740 } 16741 16742 16743 16744 16745 /* 16746 * Function: sd_handle_auto_request_sense 16747 * 16748 * Description: Processing for auto-request sense information. 16749 * 16750 * Arguments: un - ptr to associated softstate 16751 * bp - ptr to buf(9S) for the command 16752 * xp - ptr to the sd_xbuf for the command 16753 * pktp - ptr to the scsi_pkt(9S) for the command 16754 * 16755 * Context: May be called under interrupt context 16756 */ 16757 16758 static void 16759 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 16760 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16761 { 16762 struct scsi_arq_status *asp; 16763 size_t actual_len; 16764 16765 ASSERT(un != NULL); 16766 ASSERT(mutex_owned(SD_MUTEX(un))); 16767 ASSERT(bp != NULL); 16768 ASSERT(xp != NULL); 16769 ASSERT(pktp != NULL); 16770 ASSERT(pktp != un->un_rqs_pktp); 16771 ASSERT(bp != un->un_rqs_bp); 16772 16773 /* 16774 * For auto-request sense, we get a scsi_arq_status back from 16775 * the HBA, with the sense data in the sts_sensedata member. 16776 * The pkt_scbp of the packet points to this scsi_arq_status. 16777 */ 16778 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 16779 16780 if (asp->sts_rqpkt_reason != CMD_CMPLT) { 16781 /* 16782 * The auto REQUEST SENSE failed; see if we can re-try 16783 * the original command. 16784 */ 16785 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16786 "auto request sense failed (reason=%s)\n", 16787 scsi_rname(asp->sts_rqpkt_reason)); 16788 16789 sd_reset_target(un, pktp); 16790 16791 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 16792 NULL, NULL, EIO, (clock_t)0, NULL); 16793 return; 16794 } 16795 16796 /* Save the relevant sense info into the xp for the original cmd. */ 16797 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status))); 16798 xp->xb_sense_state = asp->sts_rqpkt_state; 16799 xp->xb_sense_resid = asp->sts_rqpkt_resid; 16800 if (xp->xb_sense_state & STATE_XARQ_DONE) { 16801 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 16802 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16803 MAX_SENSE_LENGTH); 16804 } else { 16805 if (xp->xb_sense_resid > SENSE_LENGTH) { 16806 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid; 16807 } else { 16808 actual_len = SENSE_LENGTH - xp->xb_sense_resid; 16809 } 16810 if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 16811 if ((((struct uscsi_cmd *) 16812 (xp->xb_pktinfo))->uscsi_rqlen) > actual_len) { 16813 xp->xb_sense_resid = (((struct uscsi_cmd *) 16814 (xp->xb_pktinfo))->uscsi_rqlen) - 16815 actual_len; 16816 } else { 16817 xp->xb_sense_resid = 0; 16818 } 16819 } 16820 bcopy(&asp->sts_sensedata, xp->xb_sense_data, SENSE_LENGTH); 16821 } 16822 16823 /* 16824 * See if we have valid sense data, if so then turn it over to 16825 * sd_decode_sense() to figure out the right course of action. 16826 */ 16827 if (sd_validate_sense_data(un, bp, xp, actual_len) == 16828 SD_SENSE_DATA_IS_VALID) { 16829 sd_decode_sense(un, bp, xp, pktp); 16830 } 16831 } 16832 16833 16834 /* 16835 * Function: sd_print_sense_failed_msg 16836 * 16837 * Description: Print log message when RQS has failed. 16838 * 16839 * Arguments: un - ptr to associated softstate 16840 * bp - ptr to buf(9S) for the command 16841 * arg - generic message string ptr 16842 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16843 * or SD_NO_RETRY_ISSUED 16844 * 16845 * Context: May be called from interrupt context 16846 */ 16847 16848 static void 16849 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg, 16850 int code) 16851 { 16852 char *msgp = arg; 16853 16854 ASSERT(un != NULL); 16855 ASSERT(mutex_owned(SD_MUTEX(un))); 16856 ASSERT(bp != NULL); 16857 16858 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) { 16859 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp); 16860 } 16861 } 16862 16863 16864 /* 16865 * Function: sd_validate_sense_data 16866 * 16867 * Description: Check the given sense data for validity. 16868 * If the sense data is not valid, the command will 16869 * be either failed or retried! 16870 * 16871 * Return Code: SD_SENSE_DATA_IS_INVALID 16872 * SD_SENSE_DATA_IS_VALID 16873 * 16874 * Context: May be called from interrupt context 16875 */ 16876 16877 static int 16878 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 16879 size_t actual_len) 16880 { 16881 struct scsi_extended_sense *esp; 16882 struct scsi_pkt *pktp; 16883 char *msgp = NULL; 16884 sd_ssc_t *sscp; 16885 16886 ASSERT(un != NULL); 16887 ASSERT(mutex_owned(SD_MUTEX(un))); 16888 ASSERT(bp != NULL); 16889 ASSERT(bp != un->un_rqs_bp); 16890 ASSERT(xp != NULL); 16891 ASSERT(un->un_fm_private != NULL); 16892 16893 pktp = SD_GET_PKTP(bp); 16894 ASSERT(pktp != NULL); 16895 16896 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc; 16897 ASSERT(sscp != NULL); 16898 16899 /* 16900 * Check the status of the RQS command (auto or manual). 16901 */ 16902 switch (xp->xb_sense_status & STATUS_MASK) { 16903 case STATUS_GOOD: 16904 break; 16905 16906 case STATUS_RESERVATION_CONFLICT: 16907 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 16908 return (SD_SENSE_DATA_IS_INVALID); 16909 16910 case STATUS_BUSY: 16911 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16912 "Busy Status on REQUEST SENSE\n"); 16913 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL, 16914 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); 16915 return (SD_SENSE_DATA_IS_INVALID); 16916 16917 case STATUS_QFULL: 16918 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16919 "QFULL Status on REQUEST SENSE\n"); 16920 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, 16921 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter); 16922 return (SD_SENSE_DATA_IS_INVALID); 16923 16924 case STATUS_CHECK: 16925 case STATUS_TERMINATED: 16926 msgp = "Check Condition on REQUEST SENSE\n"; 16927 goto sense_failed; 16928 16929 default: 16930 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n"; 16931 goto sense_failed; 16932 } 16933 16934 /* 16935 * See if we got the minimum required amount of sense data. 16936 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes 16937 * or less. 16938 */ 16939 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) || 16940 (actual_len == 0)) { 16941 msgp = "Request Sense couldn't get sense data\n"; 16942 goto sense_failed; 16943 } 16944 16945 if (actual_len < SUN_MIN_SENSE_LENGTH) { 16946 msgp = "Not enough sense information\n"; 16947 /* Mark the ssc_flags for detecting invalid sense data */ 16948 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16949 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0, 16950 "sense-data"); 16951 } 16952 goto sense_failed; 16953 } 16954 16955 /* 16956 * We require the extended sense data 16957 */ 16958 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 16959 if (esp->es_class != CLASS_EXTENDED_SENSE) { 16960 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 16961 static char tmp[8]; 16962 static char buf[148]; 16963 char *p = (char *)(xp->xb_sense_data); 16964 int i; 16965 16966 mutex_enter(&sd_sense_mutex); 16967 (void) strcpy(buf, "undecodable sense information:"); 16968 for (i = 0; i < actual_len; i++) { 16969 (void) sprintf(tmp, " 0x%x", *(p++)&0xff); 16970 (void) strcpy(&buf[strlen(buf)], tmp); 16971 } 16972 i = strlen(buf); 16973 (void) strcpy(&buf[i], "-(assumed fatal)\n"); 16974 16975 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP) { 16976 scsi_log(SD_DEVINFO(un), sd_label, 16977 CE_WARN, buf); 16978 } 16979 mutex_exit(&sd_sense_mutex); 16980 } 16981 16982 /* Mark the ssc_flags for detecting invalid sense data */ 16983 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 16984 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0, 16985 "sense-data"); 16986 } 16987 16988 /* Note: Legacy behavior, fail the command with no retry */ 16989 sd_return_failed_command(un, bp, EIO); 16990 return (SD_SENSE_DATA_IS_INVALID); 16991 } 16992 16993 /* 16994 * Check that es_code is valid (es_class concatenated with es_code 16995 * make up the "response code" field. es_class will always be 7, so 16996 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the 16997 * format. 16998 */ 16999 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) && 17000 (esp->es_code != CODE_FMT_FIXED_DEFERRED) && 17001 (esp->es_code != CODE_FMT_DESCR_CURRENT) && 17002 (esp->es_code != CODE_FMT_DESCR_DEFERRED) && 17003 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) { 17004 /* Mark the ssc_flags for detecting invalid sense data */ 17005 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) { 17006 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0, 17007 "sense-data"); 17008 } 17009 goto sense_failed; 17010 } 17011 17012 return (SD_SENSE_DATA_IS_VALID); 17013 17014 sense_failed: 17015 /* 17016 * If the request sense failed (for whatever reason), attempt 17017 * to retry the original command. 17018 */ 17019 #if defined(__i386) || defined(__amd64) 17020 /* 17021 * SD_RETRY_DELAY is conditionally compile (#if fibre) in 17022 * sddef.h for Sparc platform, and x86 uses 1 binary 17023 * for both SCSI/FC. 17024 * The SD_RETRY_DELAY value need to be adjusted here 17025 * when SD_RETRY_DELAY change in sddef.h 17026 */ 17027 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 17028 sd_print_sense_failed_msg, msgp, EIO, 17029 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL); 17030 #else 17031 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 17032 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL); 17033 #endif 17034 17035 return (SD_SENSE_DATA_IS_INVALID); 17036 } 17037 17038 /* 17039 * Function: sd_decode_sense 17040 * 17041 * Description: Take recovery action(s) when SCSI Sense Data is received. 17042 * 17043 * Context: Interrupt context. 17044 */ 17045 17046 static void 17047 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 17048 struct scsi_pkt *pktp) 17049 { 17050 uint8_t sense_key; 17051 17052 ASSERT(un != NULL); 17053 ASSERT(mutex_owned(SD_MUTEX(un))); 17054 ASSERT(bp != NULL); 17055 ASSERT(bp != un->un_rqs_bp); 17056 ASSERT(xp != NULL); 17057 ASSERT(pktp != NULL); 17058 17059 sense_key = scsi_sense_key(xp->xb_sense_data); 17060 17061 switch (sense_key) { 17062 case KEY_NO_SENSE: 17063 sd_sense_key_no_sense(un, bp, xp, pktp); 17064 break; 17065 case KEY_RECOVERABLE_ERROR: 17066 sd_sense_key_recoverable_error(un, xp->xb_sense_data, 17067 bp, xp, pktp); 17068 break; 17069 case KEY_NOT_READY: 17070 sd_sense_key_not_ready(un, xp->xb_sense_data, 17071 bp, xp, pktp); 17072 break; 17073 case KEY_MEDIUM_ERROR: 17074 case KEY_HARDWARE_ERROR: 17075 sd_sense_key_medium_or_hardware_error(un, 17076 xp->xb_sense_data, bp, xp, pktp); 17077 break; 17078 case KEY_ILLEGAL_REQUEST: 17079 sd_sense_key_illegal_request(un, bp, xp, pktp); 17080 break; 17081 case KEY_UNIT_ATTENTION: 17082 sd_sense_key_unit_attention(un, xp->xb_sense_data, 17083 bp, xp, pktp); 17084 break; 17085 case KEY_WRITE_PROTECT: 17086 case KEY_VOLUME_OVERFLOW: 17087 case KEY_MISCOMPARE: 17088 sd_sense_key_fail_command(un, bp, xp, pktp); 17089 break; 17090 case KEY_BLANK_CHECK: 17091 sd_sense_key_blank_check(un, bp, xp, pktp); 17092 break; 17093 case KEY_ABORTED_COMMAND: 17094 sd_sense_key_aborted_command(un, bp, xp, pktp); 17095 break; 17096 case KEY_VENDOR_UNIQUE: 17097 case KEY_COPY_ABORTED: 17098 case KEY_EQUAL: 17099 case KEY_RESERVED: 17100 default: 17101 sd_sense_key_default(un, xp->xb_sense_data, 17102 bp, xp, pktp); 17103 break; 17104 } 17105 } 17106 17107 17108 /* 17109 * Function: sd_dump_memory 17110 * 17111 * Description: Debug logging routine to print the contents of a user provided 17112 * buffer. The output of the buffer is broken up into 256 byte 17113 * segments due to a size constraint of the scsi_log. 17114 * implementation. 17115 * 17116 * Arguments: un - ptr to softstate 17117 * comp - component mask 17118 * title - "title" string to preceed data when printed 17119 * data - ptr to data block to be printed 17120 * len - size of data block to be printed 17121 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c) 17122 * 17123 * Context: May be called from interrupt context 17124 */ 17125 17126 #define SD_DUMP_MEMORY_BUF_SIZE 256 17127 17128 static char *sd_dump_format_string[] = { 17129 " 0x%02x", 17130 " %c" 17131 }; 17132 17133 static void 17134 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data, 17135 int len, int fmt) 17136 { 17137 int i, j; 17138 int avail_count; 17139 int start_offset; 17140 int end_offset; 17141 size_t entry_len; 17142 char *bufp; 17143 char *local_buf; 17144 char *format_string; 17145 17146 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR)); 17147 17148 /* 17149 * In the debug version of the driver, this function is called from a 17150 * number of places which are NOPs in the release driver. 17151 * The debug driver therefore has additional methods of filtering 17152 * debug output. 17153 */ 17154 #ifdef SDDEBUG 17155 /* 17156 * In the debug version of the driver we can reduce the amount of debug 17157 * messages by setting sd_error_level to something other than 17158 * SCSI_ERR_ALL and clearing bits in sd_level_mask and 17159 * sd_component_mask. 17160 */ 17161 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) || 17162 (sd_error_level != SCSI_ERR_ALL)) { 17163 return; 17164 } 17165 if (((sd_component_mask & comp) == 0) || 17166 (sd_error_level != SCSI_ERR_ALL)) { 17167 return; 17168 } 17169 #else 17170 if (sd_error_level != SCSI_ERR_ALL) { 17171 return; 17172 } 17173 #endif 17174 17175 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP); 17176 bufp = local_buf; 17177 /* 17178 * Available length is the length of local_buf[], minus the 17179 * length of the title string, minus one for the ":", minus 17180 * one for the newline, minus one for the NULL terminator. 17181 * This gives the #bytes available for holding the printed 17182 * values from the given data buffer. 17183 */ 17184 if (fmt == SD_LOG_HEX) { 17185 format_string = sd_dump_format_string[0]; 17186 } else /* SD_LOG_CHAR */ { 17187 format_string = sd_dump_format_string[1]; 17188 } 17189 /* 17190 * Available count is the number of elements from the given 17191 * data buffer that we can fit into the available length. 17192 * This is based upon the size of the format string used. 17193 * Make one entry and find it's size. 17194 */ 17195 (void) sprintf(bufp, format_string, data[0]); 17196 entry_len = strlen(bufp); 17197 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len; 17198 17199 j = 0; 17200 while (j < len) { 17201 bufp = local_buf; 17202 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE); 17203 start_offset = j; 17204 17205 end_offset = start_offset + avail_count; 17206 17207 (void) sprintf(bufp, "%s:", title); 17208 bufp += strlen(bufp); 17209 for (i = start_offset; ((i < end_offset) && (j < len)); 17210 i++, j++) { 17211 (void) sprintf(bufp, format_string, data[i]); 17212 bufp += entry_len; 17213 } 17214 (void) sprintf(bufp, "\n"); 17215 17216 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf); 17217 } 17218 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE); 17219 } 17220 17221 /* 17222 * Function: sd_print_sense_msg 17223 * 17224 * Description: Log a message based upon the given sense data. 17225 * 17226 * Arguments: un - ptr to associated softstate 17227 * bp - ptr to buf(9S) for the command 17228 * arg - ptr to associate sd_sense_info struct 17229 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17230 * or SD_NO_RETRY_ISSUED 17231 * 17232 * Context: May be called from interrupt context 17233 */ 17234 17235 static void 17236 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 17237 { 17238 struct sd_xbuf *xp; 17239 struct scsi_pkt *pktp; 17240 uint8_t *sensep; 17241 daddr_t request_blkno; 17242 diskaddr_t err_blkno; 17243 int severity; 17244 int pfa_flag; 17245 extern struct scsi_key_strings scsi_cmds[]; 17246 17247 ASSERT(un != NULL); 17248 ASSERT(mutex_owned(SD_MUTEX(un))); 17249 ASSERT(bp != NULL); 17250 xp = SD_GET_XBUF(bp); 17251 ASSERT(xp != NULL); 17252 pktp = SD_GET_PKTP(bp); 17253 ASSERT(pktp != NULL); 17254 ASSERT(arg != NULL); 17255 17256 severity = ((struct sd_sense_info *)(arg))->ssi_severity; 17257 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag; 17258 17259 if ((code == SD_DELAYED_RETRY_ISSUED) || 17260 (code == SD_IMMEDIATE_RETRY_ISSUED)) { 17261 severity = SCSI_ERR_RETRYABLE; 17262 } 17263 17264 /* Use absolute block number for the request block number */ 17265 request_blkno = xp->xb_blkno; 17266 17267 /* 17268 * Now try to get the error block number from the sense data 17269 */ 17270 sensep = xp->xb_sense_data; 17271 17272 if (scsi_sense_info_uint64(sensep, SENSE_LENGTH, 17273 (uint64_t *)&err_blkno)) { 17274 /* 17275 * We retrieved the error block number from the information 17276 * portion of the sense data. 17277 * 17278 * For USCSI commands we are better off using the error 17279 * block no. as the requested block no. (This is the best 17280 * we can estimate.) 17281 */ 17282 if ((SD_IS_BUFIO(xp) == FALSE) && 17283 ((pktp->pkt_flags & FLAG_SILENT) == 0)) { 17284 request_blkno = err_blkno; 17285 } 17286 } else { 17287 /* 17288 * Without the es_valid bit set (for fixed format) or an 17289 * information descriptor (for descriptor format) we cannot 17290 * be certain of the error blkno, so just use the 17291 * request_blkno. 17292 */ 17293 err_blkno = (diskaddr_t)request_blkno; 17294 } 17295 17296 /* 17297 * The following will log the buffer contents for the release driver 17298 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error 17299 * level is set to verbose. 17300 */ 17301 sd_dump_memory(un, SD_LOG_IO, "Failed CDB", 17302 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 17303 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 17304 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX); 17305 17306 if (pfa_flag == FALSE) { 17307 /* This is normally only set for USCSI */ 17308 if ((pktp->pkt_flags & FLAG_SILENT) != 0) { 17309 return; 17310 } 17311 17312 if ((SD_IS_BUFIO(xp) == TRUE) && 17313 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) && 17314 (severity < sd_error_level))) { 17315 return; 17316 } 17317 } 17318 /* 17319 * Check for Sonoma Failover and keep a count of how many failed I/O's 17320 */ 17321 if ((SD_IS_LSI(un)) && 17322 (scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) && 17323 (scsi_sense_asc(sensep) == 0x94) && 17324 (scsi_sense_ascq(sensep) == 0x01)) { 17325 un->un_sonoma_failure_count++; 17326 if (un->un_sonoma_failure_count > 1) { 17327 return; 17328 } 17329 } 17330 17331 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP || 17332 ((scsi_sense_key(sensep) == KEY_RECOVERABLE_ERROR) && 17333 (pktp->pkt_resid == 0))) { 17334 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity, 17335 request_blkno, err_blkno, scsi_cmds, 17336 (struct scsi_extended_sense *)sensep, 17337 un->un_additional_codes, NULL); 17338 } 17339 } 17340 17341 /* 17342 * Function: sd_sense_key_no_sense 17343 * 17344 * Description: Recovery action when sense data was not received. 17345 * 17346 * Context: May be called from interrupt context 17347 */ 17348 17349 static void 17350 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 17351 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17352 { 17353 struct sd_sense_info si; 17354 17355 ASSERT(un != NULL); 17356 ASSERT(mutex_owned(SD_MUTEX(un))); 17357 ASSERT(bp != NULL); 17358 ASSERT(xp != NULL); 17359 ASSERT(pktp != NULL); 17360 17361 si.ssi_severity = SCSI_ERR_FATAL; 17362 si.ssi_pfa_flag = FALSE; 17363 17364 SD_UPDATE_ERRSTATS(un, sd_softerrs); 17365 17366 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17367 &si, EIO, (clock_t)0, NULL); 17368 } 17369 17370 17371 /* 17372 * Function: sd_sense_key_recoverable_error 17373 * 17374 * Description: Recovery actions for a SCSI "Recovered Error" sense key. 17375 * 17376 * Context: May be called from interrupt context 17377 */ 17378 17379 static void 17380 sd_sense_key_recoverable_error(struct sd_lun *un, 17381 uint8_t *sense_datap, 17382 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17383 { 17384 struct sd_sense_info si; 17385 uint8_t asc = scsi_sense_asc(sense_datap); 17386 17387 ASSERT(un != NULL); 17388 ASSERT(mutex_owned(SD_MUTEX(un))); 17389 ASSERT(bp != NULL); 17390 ASSERT(xp != NULL); 17391 ASSERT(pktp != NULL); 17392 17393 /* 17394 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED 17395 */ 17396 if ((asc == 0x5D) && (sd_report_pfa != 0)) { 17397 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 17398 si.ssi_severity = SCSI_ERR_INFO; 17399 si.ssi_pfa_flag = TRUE; 17400 } else { 17401 SD_UPDATE_ERRSTATS(un, sd_softerrs); 17402 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err); 17403 si.ssi_severity = SCSI_ERR_RECOVERED; 17404 si.ssi_pfa_flag = FALSE; 17405 } 17406 17407 if (pktp->pkt_resid == 0) { 17408 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17409 sd_return_command(un, bp); 17410 return; 17411 } 17412 17413 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17414 &si, EIO, (clock_t)0, NULL); 17415 } 17416 17417 17418 17419 17420 /* 17421 * Function: sd_sense_key_not_ready 17422 * 17423 * Description: Recovery actions for a SCSI "Not Ready" sense key. 17424 * 17425 * Context: May be called from interrupt context 17426 */ 17427 17428 static void 17429 sd_sense_key_not_ready(struct sd_lun *un, 17430 uint8_t *sense_datap, 17431 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17432 { 17433 struct sd_sense_info si; 17434 uint8_t asc = scsi_sense_asc(sense_datap); 17435 uint8_t ascq = scsi_sense_ascq(sense_datap); 17436 17437 ASSERT(un != NULL); 17438 ASSERT(mutex_owned(SD_MUTEX(un))); 17439 ASSERT(bp != NULL); 17440 ASSERT(xp != NULL); 17441 ASSERT(pktp != NULL); 17442 17443 si.ssi_severity = SCSI_ERR_FATAL; 17444 si.ssi_pfa_flag = FALSE; 17445 17446 /* 17447 * Update error stats after first NOT READY error. Disks may have 17448 * been powered down and may need to be restarted. For CDROMs, 17449 * report NOT READY errors only if media is present. 17450 */ 17451 if ((ISCD(un) && (asc == 0x3A)) || 17452 (xp->xb_nr_retry_count > 0)) { 17453 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17454 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err); 17455 } 17456 17457 /* 17458 * Just fail if the "not ready" retry limit has been reached. 17459 */ 17460 if (xp->xb_nr_retry_count >= un->un_notready_retry_count) { 17461 /* Special check for error message printing for removables. */ 17462 if (un->un_f_has_removable_media && (asc == 0x04) && 17463 (ascq >= 0x04)) { 17464 si.ssi_severity = SCSI_ERR_ALL; 17465 } 17466 goto fail_command; 17467 } 17468 17469 /* 17470 * Check the ASC and ASCQ in the sense data as needed, to determine 17471 * what to do. 17472 */ 17473 switch (asc) { 17474 case 0x04: /* LOGICAL UNIT NOT READY */ 17475 /* 17476 * disk drives that don't spin up result in a very long delay 17477 * in format without warning messages. We will log a message 17478 * if the error level is set to verbose. 17479 */ 17480 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17481 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17482 "logical unit not ready, resetting disk\n"); 17483 } 17484 17485 /* 17486 * There are different requirements for CDROMs and disks for 17487 * the number of retries. If a CD-ROM is giving this, it is 17488 * probably reading TOC and is in the process of getting 17489 * ready, so we should keep on trying for a long time to make 17490 * sure that all types of media are taken in account (for 17491 * some media the drive takes a long time to read TOC). For 17492 * disks we do not want to retry this too many times as this 17493 * can cause a long hang in format when the drive refuses to 17494 * spin up (a very common failure). 17495 */ 17496 switch (ascq) { 17497 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */ 17498 /* 17499 * Disk drives frequently refuse to spin up which 17500 * results in a very long hang in format without 17501 * warning messages. 17502 * 17503 * Note: This code preserves the legacy behavior of 17504 * comparing xb_nr_retry_count against zero for fibre 17505 * channel targets instead of comparing against the 17506 * un_reset_retry_count value. The reason for this 17507 * discrepancy has been so utterly lost beneath the 17508 * Sands of Time that even Indiana Jones could not 17509 * find it. 17510 */ 17511 if (un->un_f_is_fibre == TRUE) { 17512 if (((sd_level_mask & SD_LOGMASK_DIAG) || 17513 (xp->xb_nr_retry_count > 0)) && 17514 (un->un_startstop_timeid == NULL)) { 17515 scsi_log(SD_DEVINFO(un), sd_label, 17516 CE_WARN, "logical unit not ready, " 17517 "resetting disk\n"); 17518 sd_reset_target(un, pktp); 17519 } 17520 } else { 17521 if (((sd_level_mask & SD_LOGMASK_DIAG) || 17522 (xp->xb_nr_retry_count > 17523 un->un_reset_retry_count)) && 17524 (un->un_startstop_timeid == NULL)) { 17525 scsi_log(SD_DEVINFO(un), sd_label, 17526 CE_WARN, "logical unit not ready, " 17527 "resetting disk\n"); 17528 sd_reset_target(un, pktp); 17529 } 17530 } 17531 break; 17532 17533 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */ 17534 /* 17535 * If the target is in the process of becoming 17536 * ready, just proceed with the retry. This can 17537 * happen with CD-ROMs that take a long time to 17538 * read TOC after a power cycle or reset. 17539 */ 17540 goto do_retry; 17541 17542 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */ 17543 break; 17544 17545 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */ 17546 /* 17547 * Retries cannot help here so just fail right away. 17548 */ 17549 goto fail_command; 17550 17551 case 0x88: 17552 /* 17553 * Vendor-unique code for T3/T4: it indicates a 17554 * path problem in a mutipathed config, but as far as 17555 * the target driver is concerned it equates to a fatal 17556 * error, so we should just fail the command right away 17557 * (without printing anything to the console). If this 17558 * is not a T3/T4, fall thru to the default recovery 17559 * action. 17560 * T3/T4 is FC only, don't need to check is_fibre 17561 */ 17562 if (SD_IS_T3(un) || SD_IS_T4(un)) { 17563 sd_return_failed_command(un, bp, EIO); 17564 return; 17565 } 17566 /* FALLTHRU */ 17567 17568 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */ 17569 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */ 17570 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */ 17571 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */ 17572 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */ 17573 default: /* Possible future codes in SCSI spec? */ 17574 /* 17575 * For removable-media devices, do not retry if 17576 * ASCQ > 2 as these result mostly from USCSI commands 17577 * on MMC devices issued to check status of an 17578 * operation initiated in immediate mode. Also for 17579 * ASCQ >= 4 do not print console messages as these 17580 * mainly represent a user-initiated operation 17581 * instead of a system failure. 17582 */ 17583 if (un->un_f_has_removable_media) { 17584 si.ssi_severity = SCSI_ERR_ALL; 17585 goto fail_command; 17586 } 17587 break; 17588 } 17589 17590 /* 17591 * As part of our recovery attempt for the NOT READY 17592 * condition, we issue a START STOP UNIT command. However 17593 * we want to wait for a short delay before attempting this 17594 * as there may still be more commands coming back from the 17595 * target with the check condition. To do this we use 17596 * timeout(9F) to call sd_start_stop_unit_callback() after 17597 * the delay interval expires. (sd_start_stop_unit_callback() 17598 * dispatches sd_start_stop_unit_task(), which will issue 17599 * the actual START STOP UNIT command. The delay interval 17600 * is one-half of the delay that we will use to retry the 17601 * command that generated the NOT READY condition. 17602 * 17603 * Note that we could just dispatch sd_start_stop_unit_task() 17604 * from here and allow it to sleep for the delay interval, 17605 * but then we would be tying up the taskq thread 17606 * uncesessarily for the duration of the delay. 17607 * 17608 * Do not issue the START STOP UNIT if the current command 17609 * is already a START STOP UNIT. 17610 */ 17611 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) { 17612 break; 17613 } 17614 17615 /* 17616 * Do not schedule the timeout if one is already pending. 17617 */ 17618 if (un->un_startstop_timeid != NULL) { 17619 SD_INFO(SD_LOG_ERROR, un, 17620 "sd_sense_key_not_ready: restart already issued to" 17621 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)), 17622 ddi_get_instance(SD_DEVINFO(un))); 17623 break; 17624 } 17625 17626 /* 17627 * Schedule the START STOP UNIT command, then queue the command 17628 * for a retry. 17629 * 17630 * Note: A timeout is not scheduled for this retry because we 17631 * want the retry to be serial with the START_STOP_UNIT. The 17632 * retry will be started when the START_STOP_UNIT is completed 17633 * in sd_start_stop_unit_task. 17634 */ 17635 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback, 17636 un, un->un_busy_timeout / 2); 17637 xp->xb_nr_retry_count++; 17638 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter); 17639 return; 17640 17641 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */ 17642 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17643 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17644 "unit does not respond to selection\n"); 17645 } 17646 break; 17647 17648 case 0x3A: /* MEDIUM NOT PRESENT */ 17649 if (sd_error_level >= SCSI_ERR_FATAL) { 17650 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17651 "Caddy not inserted in drive\n"); 17652 } 17653 17654 sr_ejected(un); 17655 un->un_mediastate = DKIO_EJECTED; 17656 /* The state has changed, inform the media watch routines */ 17657 cv_broadcast(&un->un_state_cv); 17658 /* Just fail if no media is present in the drive. */ 17659 goto fail_command; 17660 17661 default: 17662 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17663 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 17664 "Unit not Ready. Additional sense code 0x%x\n", 17665 asc); 17666 } 17667 break; 17668 } 17669 17670 do_retry: 17671 17672 /* 17673 * Retry the command, as some targets may report NOT READY for 17674 * several seconds after being reset. 17675 */ 17676 xp->xb_nr_retry_count++; 17677 si.ssi_severity = SCSI_ERR_RETRYABLE; 17678 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 17679 &si, EIO, un->un_busy_timeout, NULL); 17680 17681 return; 17682 17683 fail_command: 17684 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17685 sd_return_failed_command(un, bp, EIO); 17686 } 17687 17688 17689 17690 /* 17691 * Function: sd_sense_key_medium_or_hardware_error 17692 * 17693 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error" 17694 * sense key. 17695 * 17696 * Context: May be called from interrupt context 17697 */ 17698 17699 static void 17700 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 17701 uint8_t *sense_datap, 17702 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17703 { 17704 struct sd_sense_info si; 17705 uint8_t sense_key = scsi_sense_key(sense_datap); 17706 uint8_t asc = scsi_sense_asc(sense_datap); 17707 17708 ASSERT(un != NULL); 17709 ASSERT(mutex_owned(SD_MUTEX(un))); 17710 ASSERT(bp != NULL); 17711 ASSERT(xp != NULL); 17712 ASSERT(pktp != NULL); 17713 17714 si.ssi_severity = SCSI_ERR_FATAL; 17715 si.ssi_pfa_flag = FALSE; 17716 17717 if (sense_key == KEY_MEDIUM_ERROR) { 17718 SD_UPDATE_ERRSTATS(un, sd_rq_media_err); 17719 } 17720 17721 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17722 17723 if ((un->un_reset_retry_count != 0) && 17724 (xp->xb_retry_count == un->un_reset_retry_count)) { 17725 mutex_exit(SD_MUTEX(un)); 17726 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */ 17727 if (un->un_f_allow_bus_device_reset == TRUE) { 17728 17729 boolean_t try_resetting_target = B_TRUE; 17730 17731 /* 17732 * We need to be able to handle specific ASC when we are 17733 * handling a KEY_HARDWARE_ERROR. In particular 17734 * taking the default action of resetting the target may 17735 * not be the appropriate way to attempt recovery. 17736 * Resetting a target because of a single LUN failure 17737 * victimizes all LUNs on that target. 17738 * 17739 * This is true for the LSI arrays, if an LSI 17740 * array controller returns an ASC of 0x84 (LUN Dead) we 17741 * should trust it. 17742 */ 17743 17744 if (sense_key == KEY_HARDWARE_ERROR) { 17745 switch (asc) { 17746 case 0x84: 17747 if (SD_IS_LSI(un)) { 17748 try_resetting_target = B_FALSE; 17749 } 17750 break; 17751 default: 17752 break; 17753 } 17754 } 17755 17756 if (try_resetting_target == B_TRUE) { 17757 int reset_retval = 0; 17758 if (un->un_f_lun_reset_enabled == TRUE) { 17759 SD_TRACE(SD_LOG_IO_CORE, un, 17760 "sd_sense_key_medium_or_hardware_" 17761 "error: issuing RESET_LUN\n"); 17762 reset_retval = 17763 scsi_reset(SD_ADDRESS(un), 17764 RESET_LUN); 17765 } 17766 if (reset_retval == 0) { 17767 SD_TRACE(SD_LOG_IO_CORE, un, 17768 "sd_sense_key_medium_or_hardware_" 17769 "error: issuing RESET_TARGET\n"); 17770 (void) scsi_reset(SD_ADDRESS(un), 17771 RESET_TARGET); 17772 } 17773 } 17774 } 17775 mutex_enter(SD_MUTEX(un)); 17776 } 17777 17778 /* 17779 * This really ought to be a fatal error, but we will retry anyway 17780 * as some drives report this as a spurious error. 17781 */ 17782 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17783 &si, EIO, (clock_t)0, NULL); 17784 } 17785 17786 17787 17788 /* 17789 * Function: sd_sense_key_illegal_request 17790 * 17791 * Description: Recovery actions for a SCSI "Illegal Request" sense key. 17792 * 17793 * Context: May be called from interrupt context 17794 */ 17795 17796 static void 17797 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 17798 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17799 { 17800 struct sd_sense_info si; 17801 17802 ASSERT(un != NULL); 17803 ASSERT(mutex_owned(SD_MUTEX(un))); 17804 ASSERT(bp != NULL); 17805 ASSERT(xp != NULL); 17806 ASSERT(pktp != NULL); 17807 17808 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err); 17809 17810 si.ssi_severity = SCSI_ERR_INFO; 17811 si.ssi_pfa_flag = FALSE; 17812 17813 /* Pointless to retry if the target thinks it's an illegal request */ 17814 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17815 sd_return_failed_command(un, bp, EIO); 17816 } 17817 17818 17819 17820 17821 /* 17822 * Function: sd_sense_key_unit_attention 17823 * 17824 * Description: Recovery actions for a SCSI "Unit Attention" sense key. 17825 * 17826 * Context: May be called from interrupt context 17827 */ 17828 17829 static void 17830 sd_sense_key_unit_attention(struct sd_lun *un, 17831 uint8_t *sense_datap, 17832 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17833 { 17834 /* 17835 * For UNIT ATTENTION we allow retries for one minute. Devices 17836 * like Sonoma can return UNIT ATTENTION close to a minute 17837 * under certain conditions. 17838 */ 17839 int retry_check_flag = SD_RETRIES_UA; 17840 boolean_t kstat_updated = B_FALSE; 17841 struct sd_sense_info si; 17842 uint8_t asc = scsi_sense_asc(sense_datap); 17843 uint8_t ascq = scsi_sense_ascq(sense_datap); 17844 17845 ASSERT(un != NULL); 17846 ASSERT(mutex_owned(SD_MUTEX(un))); 17847 ASSERT(bp != NULL); 17848 ASSERT(xp != NULL); 17849 ASSERT(pktp != NULL); 17850 17851 si.ssi_severity = SCSI_ERR_INFO; 17852 si.ssi_pfa_flag = FALSE; 17853 17854 17855 switch (asc) { 17856 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */ 17857 if (sd_report_pfa != 0) { 17858 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 17859 si.ssi_pfa_flag = TRUE; 17860 retry_check_flag = SD_RETRIES_STANDARD; 17861 goto do_retry; 17862 } 17863 17864 break; 17865 17866 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */ 17867 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 17868 un->un_resvd_status |= 17869 (SD_LOST_RESERVE | SD_WANT_RESERVE); 17870 } 17871 #ifdef _LP64 17872 if (un->un_blockcount + 1 > SD_GROUP1_MAX_ADDRESS) { 17873 if (taskq_dispatch(sd_tq, sd_reenable_dsense_task, 17874 un, KM_NOSLEEP) == 0) { 17875 /* 17876 * If we can't dispatch the task we'll just 17877 * live without descriptor sense. We can 17878 * try again on the next "unit attention" 17879 */ 17880 SD_ERROR(SD_LOG_ERROR, un, 17881 "sd_sense_key_unit_attention: " 17882 "Could not dispatch " 17883 "sd_reenable_dsense_task\n"); 17884 } 17885 } 17886 #endif /* _LP64 */ 17887 /* FALLTHRU */ 17888 17889 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */ 17890 if (!un->un_f_has_removable_media) { 17891 break; 17892 } 17893 17894 /* 17895 * When we get a unit attention from a removable-media device, 17896 * it may be in a state that will take a long time to recover 17897 * (e.g., from a reset). Since we are executing in interrupt 17898 * context here, we cannot wait around for the device to come 17899 * back. So hand this command off to sd_media_change_task() 17900 * for deferred processing under taskq thread context. (Note 17901 * that the command still may be failed if a problem is 17902 * encountered at a later time.) 17903 */ 17904 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp, 17905 KM_NOSLEEP) == 0) { 17906 /* 17907 * Cannot dispatch the request so fail the command. 17908 */ 17909 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17910 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 17911 si.ssi_severity = SCSI_ERR_FATAL; 17912 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17913 sd_return_failed_command(un, bp, EIO); 17914 } 17915 17916 /* 17917 * If failed to dispatch sd_media_change_task(), we already 17918 * updated kstat. If succeed to dispatch sd_media_change_task(), 17919 * we should update kstat later if it encounters an error. So, 17920 * we update kstat_updated flag here. 17921 */ 17922 kstat_updated = B_TRUE; 17923 17924 /* 17925 * Either the command has been successfully dispatched to a 17926 * task Q for retrying, or the dispatch failed. In either case 17927 * do NOT retry again by calling sd_retry_command. This sets up 17928 * two retries of the same command and when one completes and 17929 * frees the resources the other will access freed memory, 17930 * a bad thing. 17931 */ 17932 return; 17933 17934 default: 17935 break; 17936 } 17937 17938 /* 17939 * ASC ASCQ 17940 * 2A 09 Capacity data has changed 17941 * 2A 01 Mode parameters changed 17942 * 3F 0E Reported luns data has changed 17943 * Arrays that support logical unit expansion should report 17944 * capacity changes(2Ah/09). Mode parameters changed and 17945 * reported luns data has changed are the approximation. 17946 */ 17947 if (((asc == 0x2a) && (ascq == 0x09)) || 17948 ((asc == 0x2a) && (ascq == 0x01)) || 17949 ((asc == 0x3f) && (ascq == 0x0e))) { 17950 if (taskq_dispatch(sd_tq, sd_target_change_task, un, 17951 KM_NOSLEEP) == 0) { 17952 SD_ERROR(SD_LOG_ERROR, un, 17953 "sd_sense_key_unit_attention: " 17954 "Could not dispatch sd_target_change_task\n"); 17955 } 17956 } 17957 17958 /* 17959 * Update kstat if we haven't done that. 17960 */ 17961 if (!kstat_updated) { 17962 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17963 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 17964 } 17965 17966 do_retry: 17967 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si, 17968 EIO, SD_UA_RETRY_DELAY, NULL); 17969 } 17970 17971 17972 17973 /* 17974 * Function: sd_sense_key_fail_command 17975 * 17976 * Description: Use to fail a command when we don't like the sense key that 17977 * was returned. 17978 * 17979 * Context: May be called from interrupt context 17980 */ 17981 17982 static void 17983 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 17984 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17985 { 17986 struct sd_sense_info si; 17987 17988 ASSERT(un != NULL); 17989 ASSERT(mutex_owned(SD_MUTEX(un))); 17990 ASSERT(bp != NULL); 17991 ASSERT(xp != NULL); 17992 ASSERT(pktp != NULL); 17993 17994 si.ssi_severity = SCSI_ERR_FATAL; 17995 si.ssi_pfa_flag = FALSE; 17996 17997 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17998 sd_return_failed_command(un, bp, EIO); 17999 } 18000 18001 18002 18003 /* 18004 * Function: sd_sense_key_blank_check 18005 * 18006 * Description: Recovery actions for a SCSI "Blank Check" sense key. 18007 * Has no monetary connotation. 18008 * 18009 * Context: May be called from interrupt context 18010 */ 18011 18012 static void 18013 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 18014 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18015 { 18016 struct sd_sense_info si; 18017 18018 ASSERT(un != NULL); 18019 ASSERT(mutex_owned(SD_MUTEX(un))); 18020 ASSERT(bp != NULL); 18021 ASSERT(xp != NULL); 18022 ASSERT(pktp != NULL); 18023 18024 /* 18025 * Blank check is not fatal for removable devices, therefore 18026 * it does not require a console message. 18027 */ 18028 si.ssi_severity = (un->un_f_has_removable_media) ? SCSI_ERR_ALL : 18029 SCSI_ERR_FATAL; 18030 si.ssi_pfa_flag = FALSE; 18031 18032 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18033 sd_return_failed_command(un, bp, EIO); 18034 } 18035 18036 18037 18038 18039 /* 18040 * Function: sd_sense_key_aborted_command 18041 * 18042 * Description: Recovery actions for a SCSI "Aborted Command" sense key. 18043 * 18044 * Context: May be called from interrupt context 18045 */ 18046 18047 static void 18048 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 18049 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18050 { 18051 struct sd_sense_info si; 18052 18053 ASSERT(un != NULL); 18054 ASSERT(mutex_owned(SD_MUTEX(un))); 18055 ASSERT(bp != NULL); 18056 ASSERT(xp != NULL); 18057 ASSERT(pktp != NULL); 18058 18059 si.ssi_severity = SCSI_ERR_FATAL; 18060 si.ssi_pfa_flag = FALSE; 18061 18062 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18063 18064 /* 18065 * This really ought to be a fatal error, but we will retry anyway 18066 * as some drives report this as a spurious error. 18067 */ 18068 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18069 &si, EIO, drv_usectohz(100000), NULL); 18070 } 18071 18072 18073 18074 /* 18075 * Function: sd_sense_key_default 18076 * 18077 * Description: Default recovery action for several SCSI sense keys (basically 18078 * attempts a retry). 18079 * 18080 * Context: May be called from interrupt context 18081 */ 18082 18083 static void 18084 sd_sense_key_default(struct sd_lun *un, 18085 uint8_t *sense_datap, 18086 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 18087 { 18088 struct sd_sense_info si; 18089 uint8_t sense_key = scsi_sense_key(sense_datap); 18090 18091 ASSERT(un != NULL); 18092 ASSERT(mutex_owned(SD_MUTEX(un))); 18093 ASSERT(bp != NULL); 18094 ASSERT(xp != NULL); 18095 ASSERT(pktp != NULL); 18096 18097 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18098 18099 /* 18100 * Undecoded sense key. Attempt retries and hope that will fix 18101 * the problem. Otherwise, we're dead. 18102 */ 18103 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 18104 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18105 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]); 18106 } 18107 18108 si.ssi_severity = SCSI_ERR_FATAL; 18109 si.ssi_pfa_flag = FALSE; 18110 18111 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 18112 &si, EIO, (clock_t)0, NULL); 18113 } 18114 18115 18116 18117 /* 18118 * Function: sd_print_retry_msg 18119 * 18120 * Description: Print a message indicating the retry action being taken. 18121 * 18122 * Arguments: un - ptr to associated softstate 18123 * bp - ptr to buf(9S) for the command 18124 * arg - not used. 18125 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 18126 * or SD_NO_RETRY_ISSUED 18127 * 18128 * Context: May be called from interrupt context 18129 */ 18130 /* ARGSUSED */ 18131 static void 18132 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag) 18133 { 18134 struct sd_xbuf *xp; 18135 struct scsi_pkt *pktp; 18136 char *reasonp; 18137 char *msgp; 18138 18139 ASSERT(un != NULL); 18140 ASSERT(mutex_owned(SD_MUTEX(un))); 18141 ASSERT(bp != NULL); 18142 pktp = SD_GET_PKTP(bp); 18143 ASSERT(pktp != NULL); 18144 xp = SD_GET_XBUF(bp); 18145 ASSERT(xp != NULL); 18146 18147 ASSERT(!mutex_owned(&un->un_pm_mutex)); 18148 mutex_enter(&un->un_pm_mutex); 18149 if ((un->un_state == SD_STATE_SUSPENDED) || 18150 (SD_DEVICE_IS_IN_LOW_POWER(un)) || 18151 (pktp->pkt_flags & FLAG_SILENT)) { 18152 mutex_exit(&un->un_pm_mutex); 18153 goto update_pkt_reason; 18154 } 18155 mutex_exit(&un->un_pm_mutex); 18156 18157 /* 18158 * Suppress messages if they are all the same pkt_reason; with 18159 * TQ, many (up to 256) are returned with the same pkt_reason. 18160 * If we are in panic, then suppress the retry messages. 18161 */ 18162 switch (flag) { 18163 case SD_NO_RETRY_ISSUED: 18164 msgp = "giving up"; 18165 break; 18166 case SD_IMMEDIATE_RETRY_ISSUED: 18167 case SD_DELAYED_RETRY_ISSUED: 18168 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) || 18169 ((pktp->pkt_reason == un->un_last_pkt_reason) && 18170 (sd_error_level != SCSI_ERR_ALL))) { 18171 return; 18172 } 18173 msgp = "retrying command"; 18174 break; 18175 default: 18176 goto update_pkt_reason; 18177 } 18178 18179 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" : 18180 scsi_rname(pktp->pkt_reason)); 18181 18182 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP) { 18183 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18184 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp); 18185 } 18186 18187 update_pkt_reason: 18188 /* 18189 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason. 18190 * This is to prevent multiple console messages for the same failure 18191 * condition. Note that un->un_last_pkt_reason is NOT restored if & 18192 * when the command is retried successfully because there still may be 18193 * more commands coming back with the same value of pktp->pkt_reason. 18194 */ 18195 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) { 18196 un->un_last_pkt_reason = pktp->pkt_reason; 18197 } 18198 } 18199 18200 18201 /* 18202 * Function: sd_print_cmd_incomplete_msg 18203 * 18204 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason. 18205 * 18206 * Arguments: un - ptr to associated softstate 18207 * bp - ptr to buf(9S) for the command 18208 * arg - passed to sd_print_retry_msg() 18209 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 18210 * or SD_NO_RETRY_ISSUED 18211 * 18212 * Context: May be called from interrupt context 18213 */ 18214 18215 static void 18216 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, 18217 int code) 18218 { 18219 dev_info_t *dip; 18220 18221 ASSERT(un != NULL); 18222 ASSERT(mutex_owned(SD_MUTEX(un))); 18223 ASSERT(bp != NULL); 18224 18225 switch (code) { 18226 case SD_NO_RETRY_ISSUED: 18227 /* Command was failed. Someone turned off this target? */ 18228 if (un->un_state != SD_STATE_OFFLINE) { 18229 /* 18230 * Suppress message if we are detaching and 18231 * device has been disconnected 18232 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation 18233 * private interface and not part of the DDI 18234 */ 18235 dip = un->un_sd->sd_dev; 18236 if (!(DEVI_IS_DETACHING(dip) && 18237 DEVI_IS_DEVICE_REMOVED(dip))) { 18238 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18239 "disk not responding to selection\n"); 18240 } 18241 New_state(un, SD_STATE_OFFLINE); 18242 } 18243 break; 18244 18245 case SD_DELAYED_RETRY_ISSUED: 18246 case SD_IMMEDIATE_RETRY_ISSUED: 18247 default: 18248 /* Command was successfully queued for retry */ 18249 sd_print_retry_msg(un, bp, arg, code); 18250 break; 18251 } 18252 } 18253 18254 18255 /* 18256 * Function: sd_pkt_reason_cmd_incomplete 18257 * 18258 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason. 18259 * 18260 * Context: May be called from interrupt context 18261 */ 18262 18263 static void 18264 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 18265 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18266 { 18267 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE; 18268 18269 ASSERT(un != NULL); 18270 ASSERT(mutex_owned(SD_MUTEX(un))); 18271 ASSERT(bp != NULL); 18272 ASSERT(xp != NULL); 18273 ASSERT(pktp != NULL); 18274 18275 /* Do not do a reset if selection did not complete */ 18276 /* Note: Should this not just check the bit? */ 18277 if (pktp->pkt_state != STATE_GOT_BUS) { 18278 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18279 sd_reset_target(un, pktp); 18280 } 18281 18282 /* 18283 * If the target was not successfully selected, then set 18284 * SD_RETRIES_FAILFAST to indicate that we lost communication 18285 * with the target, and further retries and/or commands are 18286 * likely to take a long time. 18287 */ 18288 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) { 18289 flag |= SD_RETRIES_FAILFAST; 18290 } 18291 18292 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18293 18294 sd_retry_command(un, bp, flag, 18295 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18296 } 18297 18298 18299 18300 /* 18301 * Function: sd_pkt_reason_cmd_tran_err 18302 * 18303 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason. 18304 * 18305 * Context: May be called from interrupt context 18306 */ 18307 18308 static void 18309 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 18310 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18311 { 18312 ASSERT(un != NULL); 18313 ASSERT(mutex_owned(SD_MUTEX(un))); 18314 ASSERT(bp != NULL); 18315 ASSERT(xp != NULL); 18316 ASSERT(pktp != NULL); 18317 18318 /* 18319 * Do not reset if we got a parity error, or if 18320 * selection did not complete. 18321 */ 18322 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18323 /* Note: Should this not just check the bit for pkt_state? */ 18324 if (((pktp->pkt_statistics & STAT_PERR) == 0) && 18325 (pktp->pkt_state != STATE_GOT_BUS)) { 18326 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18327 sd_reset_target(un, pktp); 18328 } 18329 18330 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18331 18332 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18333 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18334 } 18335 18336 18337 18338 /* 18339 * Function: sd_pkt_reason_cmd_reset 18340 * 18341 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason. 18342 * 18343 * Context: May be called from interrupt context 18344 */ 18345 18346 static void 18347 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 18348 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18349 { 18350 ASSERT(un != NULL); 18351 ASSERT(mutex_owned(SD_MUTEX(un))); 18352 ASSERT(bp != NULL); 18353 ASSERT(xp != NULL); 18354 ASSERT(pktp != NULL); 18355 18356 /* The target may still be running the command, so try to reset. */ 18357 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18358 sd_reset_target(un, pktp); 18359 18360 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18361 18362 /* 18363 * If pkt_reason is CMD_RESET chances are that this pkt got 18364 * reset because another target on this bus caused it. The target 18365 * that caused it should get CMD_TIMEOUT with pkt_statistics 18366 * of STAT_TIMEOUT/STAT_DEV_RESET. 18367 */ 18368 18369 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 18370 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18371 } 18372 18373 18374 18375 18376 /* 18377 * Function: sd_pkt_reason_cmd_aborted 18378 * 18379 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason. 18380 * 18381 * Context: May be called from interrupt context 18382 */ 18383 18384 static void 18385 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 18386 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18387 { 18388 ASSERT(un != NULL); 18389 ASSERT(mutex_owned(SD_MUTEX(un))); 18390 ASSERT(bp != NULL); 18391 ASSERT(xp != NULL); 18392 ASSERT(pktp != NULL); 18393 18394 /* The target may still be running the command, so try to reset. */ 18395 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18396 sd_reset_target(un, pktp); 18397 18398 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18399 18400 /* 18401 * If pkt_reason is CMD_ABORTED chances are that this pkt got 18402 * aborted because another target on this bus caused it. The target 18403 * that caused it should get CMD_TIMEOUT with pkt_statistics 18404 * of STAT_TIMEOUT/STAT_DEV_RESET. 18405 */ 18406 18407 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 18408 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18409 } 18410 18411 18412 18413 /* 18414 * Function: sd_pkt_reason_cmd_timeout 18415 * 18416 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason. 18417 * 18418 * Context: May be called from interrupt context 18419 */ 18420 18421 static void 18422 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 18423 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18424 { 18425 ASSERT(un != NULL); 18426 ASSERT(mutex_owned(SD_MUTEX(un))); 18427 ASSERT(bp != NULL); 18428 ASSERT(xp != NULL); 18429 ASSERT(pktp != NULL); 18430 18431 18432 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18433 sd_reset_target(un, pktp); 18434 18435 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18436 18437 /* 18438 * A command timeout indicates that we could not establish 18439 * communication with the target, so set SD_RETRIES_FAILFAST 18440 * as further retries/commands are likely to take a long time. 18441 */ 18442 sd_retry_command(un, bp, 18443 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST), 18444 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18445 } 18446 18447 18448 18449 /* 18450 * Function: sd_pkt_reason_cmd_unx_bus_free 18451 * 18452 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason. 18453 * 18454 * Context: May be called from interrupt context 18455 */ 18456 18457 static void 18458 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 18459 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18460 { 18461 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code); 18462 18463 ASSERT(un != NULL); 18464 ASSERT(mutex_owned(SD_MUTEX(un))); 18465 ASSERT(bp != NULL); 18466 ASSERT(xp != NULL); 18467 ASSERT(pktp != NULL); 18468 18469 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18470 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18471 18472 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? 18473 sd_print_retry_msg : NULL; 18474 18475 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18476 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18477 } 18478 18479 18480 /* 18481 * Function: sd_pkt_reason_cmd_tag_reject 18482 * 18483 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason. 18484 * 18485 * Context: May be called from interrupt context 18486 */ 18487 18488 static void 18489 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 18490 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18491 { 18492 ASSERT(un != NULL); 18493 ASSERT(mutex_owned(SD_MUTEX(un))); 18494 ASSERT(bp != NULL); 18495 ASSERT(xp != NULL); 18496 ASSERT(pktp != NULL); 18497 18498 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18499 pktp->pkt_flags = 0; 18500 un->un_tagflags = 0; 18501 if (un->un_f_opt_queueing == TRUE) { 18502 un->un_throttle = min(un->un_throttle, 3); 18503 } else { 18504 un->un_throttle = 1; 18505 } 18506 mutex_exit(SD_MUTEX(un)); 18507 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 18508 mutex_enter(SD_MUTEX(un)); 18509 18510 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18511 18512 /* Legacy behavior not to check retry counts here. */ 18513 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE), 18514 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18515 } 18516 18517 18518 /* 18519 * Function: sd_pkt_reason_default 18520 * 18521 * Description: Default recovery actions for SCSA pkt_reason values that 18522 * do not have more explicit recovery actions. 18523 * 18524 * Context: May be called from interrupt context 18525 */ 18526 18527 static void 18528 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 18529 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18530 { 18531 ASSERT(un != NULL); 18532 ASSERT(mutex_owned(SD_MUTEX(un))); 18533 ASSERT(bp != NULL); 18534 ASSERT(xp != NULL); 18535 ASSERT(pktp != NULL); 18536 18537 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18538 sd_reset_target(un, pktp); 18539 18540 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18541 18542 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18543 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18544 } 18545 18546 18547 18548 /* 18549 * Function: sd_pkt_status_check_condition 18550 * 18551 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status. 18552 * 18553 * Context: May be called from interrupt context 18554 */ 18555 18556 static void 18557 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 18558 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18559 { 18560 ASSERT(un != NULL); 18561 ASSERT(mutex_owned(SD_MUTEX(un))); 18562 ASSERT(bp != NULL); 18563 ASSERT(xp != NULL); 18564 ASSERT(pktp != NULL); 18565 18566 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: " 18567 "entry: buf:0x%p xp:0x%p\n", bp, xp); 18568 18569 /* 18570 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the 18571 * command will be retried after the request sense). Otherwise, retry 18572 * the command. Note: we are issuing the request sense even though the 18573 * retry limit may have been reached for the failed command. 18574 */ 18575 if (un->un_f_arq_enabled == FALSE) { 18576 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 18577 "no ARQ, sending request sense command\n"); 18578 sd_send_request_sense_command(un, bp, pktp); 18579 } else { 18580 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 18581 "ARQ,retrying request sense command\n"); 18582 #if defined(__i386) || defined(__amd64) 18583 /* 18584 * The SD_RETRY_DELAY value need to be adjusted here 18585 * when SD_RETRY_DELAY change in sddef.h 18586 */ 18587 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 18588 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, 18589 NULL); 18590 #else 18591 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 18592 EIO, SD_RETRY_DELAY, NULL); 18593 #endif 18594 } 18595 18596 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); 18597 } 18598 18599 18600 /* 18601 * Function: sd_pkt_status_busy 18602 * 18603 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status. 18604 * 18605 * Context: May be called from interrupt context 18606 */ 18607 18608 static void 18609 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 18610 struct scsi_pkt *pktp) 18611 { 18612 ASSERT(un != NULL); 18613 ASSERT(mutex_owned(SD_MUTEX(un))); 18614 ASSERT(bp != NULL); 18615 ASSERT(xp != NULL); 18616 ASSERT(pktp != NULL); 18617 18618 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18619 "sd_pkt_status_busy: entry\n"); 18620 18621 /* If retries are exhausted, just fail the command. */ 18622 if (xp->xb_retry_count >= un->un_busy_retry_count) { 18623 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18624 "device busy too long\n"); 18625 sd_return_failed_command(un, bp, EIO); 18626 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18627 "sd_pkt_status_busy: exit\n"); 18628 return; 18629 } 18630 xp->xb_retry_count++; 18631 18632 /* 18633 * Try to reset the target. However, we do not want to perform 18634 * more than one reset if the device continues to fail. The reset 18635 * will be performed when the retry count reaches the reset 18636 * threshold. This threshold should be set such that at least 18637 * one retry is issued before the reset is performed. 18638 */ 18639 if (xp->xb_retry_count == 18640 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) { 18641 int rval = 0; 18642 mutex_exit(SD_MUTEX(un)); 18643 if (un->un_f_allow_bus_device_reset == TRUE) { 18644 /* 18645 * First try to reset the LUN; if we cannot then 18646 * try to reset the target. 18647 */ 18648 if (un->un_f_lun_reset_enabled == TRUE) { 18649 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18650 "sd_pkt_status_busy: RESET_LUN\n"); 18651 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 18652 } 18653 if (rval == 0) { 18654 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18655 "sd_pkt_status_busy: RESET_TARGET\n"); 18656 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 18657 } 18658 } 18659 if (rval == 0) { 18660 /* 18661 * If the RESET_LUN and/or RESET_TARGET failed, 18662 * try RESET_ALL 18663 */ 18664 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18665 "sd_pkt_status_busy: RESET_ALL\n"); 18666 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL); 18667 } 18668 mutex_enter(SD_MUTEX(un)); 18669 if (rval == 0) { 18670 /* 18671 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed. 18672 * At this point we give up & fail the command. 18673 */ 18674 sd_return_failed_command(un, bp, EIO); 18675 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18676 "sd_pkt_status_busy: exit (failed cmd)\n"); 18677 return; 18678 } 18679 } 18680 18681 /* 18682 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as 18683 * we have already checked the retry counts above. 18684 */ 18685 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 18686 EIO, un->un_busy_timeout, NULL); 18687 18688 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18689 "sd_pkt_status_busy: exit\n"); 18690 } 18691 18692 18693 /* 18694 * Function: sd_pkt_status_reservation_conflict 18695 * 18696 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI 18697 * command status. 18698 * 18699 * Context: May be called from interrupt context 18700 */ 18701 18702 static void 18703 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp, 18704 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18705 { 18706 ASSERT(un != NULL); 18707 ASSERT(mutex_owned(SD_MUTEX(un))); 18708 ASSERT(bp != NULL); 18709 ASSERT(xp != NULL); 18710 ASSERT(pktp != NULL); 18711 18712 /* 18713 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation 18714 * conflict could be due to various reasons like incorrect keys, not 18715 * registered or not reserved etc. So, we return EACCES to the caller. 18716 */ 18717 if (un->un_reservation_type == SD_SCSI3_RESERVATION) { 18718 int cmd = SD_GET_PKT_OPCODE(pktp); 18719 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) || 18720 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) { 18721 sd_return_failed_command(un, bp, EACCES); 18722 return; 18723 } 18724 } 18725 18726 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 18727 18728 if ((un->un_resvd_status & SD_FAILFAST) != 0) { 18729 if (sd_failfast_enable != 0) { 18730 /* By definition, we must panic here.... */ 18731 sd_panic_for_res_conflict(un); 18732 /*NOTREACHED*/ 18733 } 18734 SD_ERROR(SD_LOG_IO, un, 18735 "sd_handle_resv_conflict: Disk Reserved\n"); 18736 sd_return_failed_command(un, bp, EACCES); 18737 return; 18738 } 18739 18740 /* 18741 * 1147670: retry only if sd_retry_on_reservation_conflict 18742 * property is set (default is 1). Retries will not succeed 18743 * on a disk reserved by another initiator. HA systems 18744 * may reset this via sd.conf to avoid these retries. 18745 * 18746 * Note: The legacy return code for this failure is EIO, however EACCES 18747 * seems more appropriate for a reservation conflict. 18748 */ 18749 if (sd_retry_on_reservation_conflict == 0) { 18750 SD_ERROR(SD_LOG_IO, un, 18751 "sd_handle_resv_conflict: Device Reserved\n"); 18752 sd_return_failed_command(un, bp, EIO); 18753 return; 18754 } 18755 18756 /* 18757 * Retry the command if we can. 18758 * 18759 * Note: The legacy return code for this failure is EIO, however EACCES 18760 * seems more appropriate for a reservation conflict. 18761 */ 18762 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 18763 (clock_t)2, NULL); 18764 } 18765 18766 18767 18768 /* 18769 * Function: sd_pkt_status_qfull 18770 * 18771 * Description: Handle a QUEUE FULL condition from the target. This can 18772 * occur if the HBA does not handle the queue full condition. 18773 * (Basically this means third-party HBAs as Sun HBAs will 18774 * handle the queue full condition.) Note that if there are 18775 * some commands already in the transport, then the queue full 18776 * has occurred because the queue for this nexus is actually 18777 * full. If there are no commands in the transport, then the 18778 * queue full is resulting from some other initiator or lun 18779 * consuming all the resources at the target. 18780 * 18781 * Context: May be called from interrupt context 18782 */ 18783 18784 static void 18785 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 18786 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18787 { 18788 ASSERT(un != NULL); 18789 ASSERT(mutex_owned(SD_MUTEX(un))); 18790 ASSERT(bp != NULL); 18791 ASSERT(xp != NULL); 18792 ASSERT(pktp != NULL); 18793 18794 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18795 "sd_pkt_status_qfull: entry\n"); 18796 18797 /* 18798 * Just lower the QFULL throttle and retry the command. Note that 18799 * we do not limit the number of retries here. 18800 */ 18801 sd_reduce_throttle(un, SD_THROTTLE_QFULL); 18802 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, 18803 SD_RESTART_TIMEOUT, NULL); 18804 18805 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18806 "sd_pkt_status_qfull: exit\n"); 18807 } 18808 18809 18810 /* 18811 * Function: sd_reset_target 18812 * 18813 * Description: Issue a scsi_reset(9F), with either RESET_LUN, 18814 * RESET_TARGET, or RESET_ALL. 18815 * 18816 * Context: May be called under interrupt context. 18817 */ 18818 18819 static void 18820 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp) 18821 { 18822 int rval = 0; 18823 18824 ASSERT(un != NULL); 18825 ASSERT(mutex_owned(SD_MUTEX(un))); 18826 ASSERT(pktp != NULL); 18827 18828 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n"); 18829 18830 /* 18831 * No need to reset if the transport layer has already done so. 18832 */ 18833 if ((pktp->pkt_statistics & 18834 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) { 18835 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18836 "sd_reset_target: no reset\n"); 18837 return; 18838 } 18839 18840 mutex_exit(SD_MUTEX(un)); 18841 18842 if (un->un_f_allow_bus_device_reset == TRUE) { 18843 if (un->un_f_lun_reset_enabled == TRUE) { 18844 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18845 "sd_reset_target: RESET_LUN\n"); 18846 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 18847 } 18848 if (rval == 0) { 18849 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18850 "sd_reset_target: RESET_TARGET\n"); 18851 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 18852 } 18853 } 18854 18855 if (rval == 0) { 18856 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18857 "sd_reset_target: RESET_ALL\n"); 18858 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 18859 } 18860 18861 mutex_enter(SD_MUTEX(un)); 18862 18863 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n"); 18864 } 18865 18866 /* 18867 * Function: sd_target_change_task 18868 * 18869 * Description: Handle dynamic target change 18870 * 18871 * Context: Executes in a taskq() thread context 18872 */ 18873 static void 18874 sd_target_change_task(void *arg) 18875 { 18876 struct sd_lun *un = arg; 18877 uint64_t capacity; 18878 diskaddr_t label_cap; 18879 uint_t lbasize; 18880 sd_ssc_t *ssc; 18881 18882 ASSERT(un != NULL); 18883 ASSERT(!mutex_owned(SD_MUTEX(un))); 18884 18885 if ((un->un_f_blockcount_is_valid == FALSE) || 18886 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 18887 return; 18888 } 18889 18890 ssc = sd_ssc_init(un); 18891 18892 if (sd_send_scsi_READ_CAPACITY(ssc, &capacity, 18893 &lbasize, SD_PATH_DIRECT) != 0) { 18894 SD_ERROR(SD_LOG_ERROR, un, 18895 "sd_target_change_task: fail to read capacity\n"); 18896 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 18897 goto task_exit; 18898 } 18899 18900 mutex_enter(SD_MUTEX(un)); 18901 if (capacity <= un->un_blockcount) { 18902 mutex_exit(SD_MUTEX(un)); 18903 goto task_exit; 18904 } 18905 18906 sd_update_block_info(un, lbasize, capacity); 18907 mutex_exit(SD_MUTEX(un)); 18908 18909 /* 18910 * If lun is EFI labeled and lun capacity is greater than the 18911 * capacity contained in the label, log a sys event. 18912 */ 18913 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap, 18914 (void*)SD_PATH_DIRECT) == 0) { 18915 mutex_enter(SD_MUTEX(un)); 18916 if (un->un_f_blockcount_is_valid && 18917 un->un_blockcount > label_cap) { 18918 mutex_exit(SD_MUTEX(un)); 18919 sd_log_lun_expansion_event(un, KM_SLEEP); 18920 } else { 18921 mutex_exit(SD_MUTEX(un)); 18922 } 18923 } 18924 18925 task_exit: 18926 sd_ssc_fini(ssc); 18927 } 18928 18929 /* 18930 * Function: sd_log_lun_expansion_event 18931 * 18932 * Description: Log lun expansion sys event 18933 * 18934 * Context: Never called from interrupt context 18935 */ 18936 static void 18937 sd_log_lun_expansion_event(struct sd_lun *un, int km_flag) 18938 { 18939 int err; 18940 char *path; 18941 nvlist_t *dle_attr_list; 18942 18943 /* Allocate and build sysevent attribute list */ 18944 err = nvlist_alloc(&dle_attr_list, NV_UNIQUE_NAME_TYPE, km_flag); 18945 if (err != 0) { 18946 SD_ERROR(SD_LOG_ERROR, un, 18947 "sd_log_lun_expansion_event: fail to allocate space\n"); 18948 return; 18949 } 18950 18951 path = kmem_alloc(MAXPATHLEN, km_flag); 18952 if (path == NULL) { 18953 nvlist_free(dle_attr_list); 18954 SD_ERROR(SD_LOG_ERROR, un, 18955 "sd_log_lun_expansion_event: fail to allocate space\n"); 18956 return; 18957 } 18958 /* 18959 * Add path attribute to identify the lun. 18960 * We are using minor node 'a' as the sysevent attribute. 18961 */ 18962 (void) snprintf(path, MAXPATHLEN, "/devices"); 18963 (void) ddi_pathname(SD_DEVINFO(un), path + strlen(path)); 18964 (void) snprintf(path + strlen(path), MAXPATHLEN - strlen(path), 18965 ":a"); 18966 18967 err = nvlist_add_string(dle_attr_list, DEV_PHYS_PATH, path); 18968 if (err != 0) { 18969 nvlist_free(dle_attr_list); 18970 kmem_free(path, MAXPATHLEN); 18971 SD_ERROR(SD_LOG_ERROR, un, 18972 "sd_log_lun_expansion_event: fail to add attribute\n"); 18973 return; 18974 } 18975 18976 /* Log dynamic lun expansion sysevent */ 18977 err = ddi_log_sysevent(SD_DEVINFO(un), SUNW_VENDOR, EC_DEV_STATUS, 18978 ESC_DEV_DLE, dle_attr_list, NULL, km_flag); 18979 if (err != DDI_SUCCESS) { 18980 SD_ERROR(SD_LOG_ERROR, un, 18981 "sd_log_lun_expansion_event: fail to log sysevent\n"); 18982 } 18983 18984 nvlist_free(dle_attr_list); 18985 kmem_free(path, MAXPATHLEN); 18986 } 18987 18988 /* 18989 * Function: sd_media_change_task 18990 * 18991 * Description: Recovery action for CDROM to become available. 18992 * 18993 * Context: Executes in a taskq() thread context 18994 */ 18995 18996 static void 18997 sd_media_change_task(void *arg) 18998 { 18999 struct scsi_pkt *pktp = arg; 19000 struct sd_lun *un; 19001 struct buf *bp; 19002 struct sd_xbuf *xp; 19003 int err = 0; 19004 int retry_count = 0; 19005 int retry_limit = SD_UNIT_ATTENTION_RETRY/10; 19006 struct sd_sense_info si; 19007 19008 ASSERT(pktp != NULL); 19009 bp = (struct buf *)pktp->pkt_private; 19010 ASSERT(bp != NULL); 19011 xp = SD_GET_XBUF(bp); 19012 ASSERT(xp != NULL); 19013 un = SD_GET_UN(bp); 19014 ASSERT(un != NULL); 19015 ASSERT(!mutex_owned(SD_MUTEX(un))); 19016 ASSERT(un->un_f_monitor_media_state); 19017 19018 si.ssi_severity = SCSI_ERR_INFO; 19019 si.ssi_pfa_flag = FALSE; 19020 19021 /* 19022 * When a reset is issued on a CDROM, it takes a long time to 19023 * recover. First few attempts to read capacity and other things 19024 * related to handling unit attention fail (with a ASC 0x4 and 19025 * ASCQ 0x1). In that case we want to do enough retries and we want 19026 * to limit the retries in other cases of genuine failures like 19027 * no media in drive. 19028 */ 19029 while (retry_count++ < retry_limit) { 19030 if ((err = sd_handle_mchange(un)) == 0) { 19031 break; 19032 } 19033 if (err == EAGAIN) { 19034 retry_limit = SD_UNIT_ATTENTION_RETRY; 19035 } 19036 /* Sleep for 0.5 sec. & try again */ 19037 delay(drv_usectohz(500000)); 19038 } 19039 19040 /* 19041 * Dispatch (retry or fail) the original command here, 19042 * along with appropriate console messages.... 19043 * 19044 * Must grab the mutex before calling sd_retry_command, 19045 * sd_print_sense_msg and sd_return_failed_command. 19046 */ 19047 mutex_enter(SD_MUTEX(un)); 19048 if (err != SD_CMD_SUCCESS) { 19049 SD_UPDATE_ERRSTATS(un, sd_harderrs); 19050 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 19051 si.ssi_severity = SCSI_ERR_FATAL; 19052 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 19053 sd_return_failed_command(un, bp, EIO); 19054 } else { 19055 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 19056 &si, EIO, (clock_t)0, NULL); 19057 } 19058 mutex_exit(SD_MUTEX(un)); 19059 } 19060 19061 19062 19063 /* 19064 * Function: sd_handle_mchange 19065 * 19066 * Description: Perform geometry validation & other recovery when CDROM 19067 * has been removed from drive. 19068 * 19069 * Return Code: 0 for success 19070 * errno-type return code of either sd_send_scsi_DOORLOCK() or 19071 * sd_send_scsi_READ_CAPACITY() 19072 * 19073 * Context: Executes in a taskq() thread context 19074 */ 19075 19076 static int 19077 sd_handle_mchange(struct sd_lun *un) 19078 { 19079 uint64_t capacity; 19080 uint32_t lbasize; 19081 int rval; 19082 sd_ssc_t *ssc; 19083 19084 ASSERT(!mutex_owned(SD_MUTEX(un))); 19085 ASSERT(un->un_f_monitor_media_state); 19086 19087 ssc = sd_ssc_init(un); 19088 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize, 19089 SD_PATH_DIRECT_PRIORITY); 19090 19091 if (rval != 0) 19092 goto failed; 19093 19094 mutex_enter(SD_MUTEX(un)); 19095 sd_update_block_info(un, lbasize, capacity); 19096 19097 if (un->un_errstats != NULL) { 19098 struct sd_errstats *stp = 19099 (struct sd_errstats *)un->un_errstats->ks_data; 19100 stp->sd_capacity.value.ui64 = (uint64_t) 19101 ((uint64_t)un->un_blockcount * 19102 (uint64_t)un->un_tgt_blocksize); 19103 } 19104 19105 /* 19106 * Check if the media in the device is writable or not 19107 */ 19108 if (ISCD(un)) { 19109 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT_PRIORITY); 19110 } 19111 19112 /* 19113 * Note: Maybe let the strategy/partitioning chain worry about getting 19114 * valid geometry. 19115 */ 19116 mutex_exit(SD_MUTEX(un)); 19117 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 19118 19119 19120 if (cmlb_validate(un->un_cmlbhandle, 0, 19121 (void *)SD_PATH_DIRECT_PRIORITY) != 0) { 19122 sd_ssc_fini(ssc); 19123 return (EIO); 19124 } else { 19125 if (un->un_f_pkstats_enabled) { 19126 sd_set_pstats(un); 19127 SD_TRACE(SD_LOG_IO_PARTITION, un, 19128 "sd_handle_mchange: un:0x%p pstats created and " 19129 "set\n", un); 19130 } 19131 } 19132 19133 /* 19134 * Try to lock the door 19135 */ 19136 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 19137 SD_PATH_DIRECT_PRIORITY); 19138 failed: 19139 if (rval != 0) 19140 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19141 sd_ssc_fini(ssc); 19142 return (rval); 19143 } 19144 19145 19146 /* 19147 * Function: sd_send_scsi_DOORLOCK 19148 * 19149 * Description: Issue the scsi DOOR LOCK command 19150 * 19151 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 19152 * structure for this target. 19153 * flag - SD_REMOVAL_ALLOW 19154 * SD_REMOVAL_PREVENT 19155 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19156 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19157 * to use the USCSI "direct" chain and bypass the normal 19158 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19159 * command is issued as part of an error recovery action. 19160 * 19161 * Return Code: 0 - Success 19162 * errno return code from sd_ssc_send() 19163 * 19164 * Context: Can sleep. 19165 */ 19166 19167 static int 19168 sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag) 19169 { 19170 struct scsi_extended_sense sense_buf; 19171 union scsi_cdb cdb; 19172 struct uscsi_cmd ucmd_buf; 19173 int status; 19174 struct sd_lun *un; 19175 19176 ASSERT(ssc != NULL); 19177 un = ssc->ssc_un; 19178 ASSERT(un != NULL); 19179 ASSERT(!mutex_owned(SD_MUTEX(un))); 19180 19181 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un); 19182 19183 /* already determined doorlock is not supported, fake success */ 19184 if (un->un_f_doorlock_supported == FALSE) { 19185 return (0); 19186 } 19187 19188 /* 19189 * If we are ejecting and see an SD_REMOVAL_PREVENT 19190 * ignore the command so we can complete the eject 19191 * operation. 19192 */ 19193 if (flag == SD_REMOVAL_PREVENT) { 19194 mutex_enter(SD_MUTEX(un)); 19195 if (un->un_f_ejecting == TRUE) { 19196 mutex_exit(SD_MUTEX(un)); 19197 return (EAGAIN); 19198 } 19199 mutex_exit(SD_MUTEX(un)); 19200 } 19201 19202 bzero(&cdb, sizeof (cdb)); 19203 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19204 19205 cdb.scc_cmd = SCMD_DOORLOCK; 19206 cdb.cdb_opaque[4] = (uchar_t)flag; 19207 19208 ucmd_buf.uscsi_cdb = (char *)&cdb; 19209 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19210 ucmd_buf.uscsi_bufaddr = NULL; 19211 ucmd_buf.uscsi_buflen = 0; 19212 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19213 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19214 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19215 ucmd_buf.uscsi_timeout = 15; 19216 19217 SD_TRACE(SD_LOG_IO, un, 19218 "sd_send_scsi_DOORLOCK: returning sd_ssc_send\n"); 19219 19220 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19221 UIO_SYSSPACE, path_flag); 19222 19223 if (status == 0) 19224 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19225 19226 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) && 19227 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19228 (scsi_sense_key((uint8_t *)&sense_buf) == KEY_ILLEGAL_REQUEST)) { 19229 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19230 19231 /* fake success and skip subsequent doorlock commands */ 19232 un->un_f_doorlock_supported = FALSE; 19233 return (0); 19234 } 19235 19236 return (status); 19237 } 19238 19239 /* 19240 * Function: sd_send_scsi_READ_CAPACITY 19241 * 19242 * Description: This routine uses the scsi READ CAPACITY command to determine 19243 * the device capacity in number of blocks and the device native 19244 * block size. If this function returns a failure, then the 19245 * values in *capp and *lbap are undefined. If the capacity 19246 * returned is 0xffffffff then the lun is too large for a 19247 * normal READ CAPACITY command and the results of a 19248 * READ CAPACITY 16 will be used instead. 19249 * 19250 * Arguments: ssc - ssc contains ptr to soft state struct for the target 19251 * capp - ptr to unsigned 64-bit variable to receive the 19252 * capacity value from the command. 19253 * lbap - ptr to unsigned 32-bit varaible to receive the 19254 * block size value from the command 19255 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19256 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19257 * to use the USCSI "direct" chain and bypass the normal 19258 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19259 * command is issued as part of an error recovery action. 19260 * 19261 * Return Code: 0 - Success 19262 * EIO - IO error 19263 * EACCES - Reservation conflict detected 19264 * EAGAIN - Device is becoming ready 19265 * errno return code from sd_ssc_send() 19266 * 19267 * Context: Can sleep. Blocks until command completes. 19268 */ 19269 19270 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity) 19271 19272 static int 19273 sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp, uint32_t *lbap, 19274 int path_flag) 19275 { 19276 struct scsi_extended_sense sense_buf; 19277 struct uscsi_cmd ucmd_buf; 19278 union scsi_cdb cdb; 19279 uint32_t *capacity_buf; 19280 uint64_t capacity; 19281 uint32_t lbasize; 19282 int status; 19283 struct sd_lun *un; 19284 19285 ASSERT(ssc != NULL); 19286 19287 un = ssc->ssc_un; 19288 ASSERT(un != NULL); 19289 ASSERT(!mutex_owned(SD_MUTEX(un))); 19290 ASSERT(capp != NULL); 19291 ASSERT(lbap != NULL); 19292 19293 SD_TRACE(SD_LOG_IO, un, 19294 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 19295 19296 /* 19297 * First send a READ_CAPACITY command to the target. 19298 * (This command is mandatory under SCSI-2.) 19299 * 19300 * Set up the CDB for the READ_CAPACITY command. The Partial 19301 * Medium Indicator bit is cleared. The address field must be 19302 * zero if the PMI bit is zero. 19303 */ 19304 bzero(&cdb, sizeof (cdb)); 19305 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19306 19307 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP); 19308 19309 cdb.scc_cmd = SCMD_READ_CAPACITY; 19310 19311 ucmd_buf.uscsi_cdb = (char *)&cdb; 19312 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19313 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; 19314 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; 19315 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19316 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19317 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19318 ucmd_buf.uscsi_timeout = 60; 19319 19320 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19321 UIO_SYSSPACE, path_flag); 19322 19323 switch (status) { 19324 case 0: 19325 /* Return failure if we did not get valid capacity data. */ 19326 if (ucmd_buf.uscsi_resid != 0) { 19327 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 19328 "sd_send_scsi_READ_CAPACITY received invalid " 19329 "capacity data"); 19330 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19331 return (EIO); 19332 } 19333 /* 19334 * Read capacity and block size from the READ CAPACITY 10 data. 19335 * This data may be adjusted later due to device specific 19336 * issues. 19337 * 19338 * According to the SCSI spec, the READ CAPACITY 10 19339 * command returns the following: 19340 * 19341 * bytes 0-3: Maximum logical block address available. 19342 * (MSB in byte:0 & LSB in byte:3) 19343 * 19344 * bytes 4-7: Block length in bytes 19345 * (MSB in byte:4 & LSB in byte:7) 19346 * 19347 */ 19348 capacity = BE_32(capacity_buf[0]); 19349 lbasize = BE_32(capacity_buf[1]); 19350 19351 /* 19352 * Done with capacity_buf 19353 */ 19354 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19355 19356 /* 19357 * if the reported capacity is set to all 0xf's, then 19358 * this disk is too large and requires SBC-2 commands. 19359 * Reissue the request using READ CAPACITY 16. 19360 */ 19361 if (capacity == 0xffffffff) { 19362 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19363 status = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, 19364 &lbasize, path_flag); 19365 if (status != 0) { 19366 return (status); 19367 } 19368 } 19369 break; /* Success! */ 19370 case EIO: 19371 switch (ucmd_buf.uscsi_status) { 19372 case STATUS_RESERVATION_CONFLICT: 19373 status = EACCES; 19374 break; 19375 case STATUS_CHECK: 19376 /* 19377 * Check condition; look for ASC/ASCQ of 0x04/0x01 19378 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 19379 */ 19380 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19381 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 19382 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 19383 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19384 return (EAGAIN); 19385 } 19386 break; 19387 default: 19388 break; 19389 } 19390 /* FALLTHRU */ 19391 default: 19392 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 19393 return (status); 19394 } 19395 19396 /* 19397 * Some ATAPI CD-ROM drives report inaccurate LBA size values 19398 * (2352 and 0 are common) so for these devices always force the value 19399 * to 2048 as required by the ATAPI specs. 19400 */ 19401 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 19402 lbasize = 2048; 19403 } 19404 19405 /* 19406 * Get the maximum LBA value from the READ CAPACITY data. 19407 * Here we assume that the Partial Medium Indicator (PMI) bit 19408 * was cleared when issuing the command. This means that the LBA 19409 * returned from the device is the LBA of the last logical block 19410 * on the logical unit. The actual logical block count will be 19411 * this value plus one. 19412 * 19413 * Currently the capacity is saved in terms of un->un_sys_blocksize, 19414 * so scale the capacity value to reflect this. 19415 */ 19416 capacity = (capacity + 1) * (lbasize / un->un_sys_blocksize); 19417 19418 /* 19419 * Copy the values from the READ CAPACITY command into the space 19420 * provided by the caller. 19421 */ 19422 *capp = capacity; 19423 *lbap = lbasize; 19424 19425 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: " 19426 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 19427 19428 /* 19429 * Both the lbasize and capacity from the device must be nonzero, 19430 * otherwise we assume that the values are not valid and return 19431 * failure to the caller. (4203735) 19432 */ 19433 if ((capacity == 0) || (lbasize == 0)) { 19434 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 19435 "sd_send_scsi_READ_CAPACITY received invalid value " 19436 "capacity %llu lbasize %d", capacity, lbasize); 19437 return (EIO); 19438 } 19439 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19440 return (0); 19441 } 19442 19443 /* 19444 * Function: sd_send_scsi_READ_CAPACITY_16 19445 * 19446 * Description: This routine uses the scsi READ CAPACITY 16 command to 19447 * determine the device capacity in number of blocks and the 19448 * device native block size. If this function returns a failure, 19449 * then the values in *capp and *lbap are undefined. 19450 * This routine should always be called by 19451 * sd_send_scsi_READ_CAPACITY which will appy any device 19452 * specific adjustments to capacity and lbasize. 19453 * 19454 * Arguments: ssc - ssc contains ptr to soft state struct for the target 19455 * capp - ptr to unsigned 64-bit variable to receive the 19456 * capacity value from the command. 19457 * lbap - ptr to unsigned 32-bit varaible to receive the 19458 * block size value from the command 19459 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19460 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19461 * to use the USCSI "direct" chain and bypass the normal 19462 * command waitq. SD_PATH_DIRECT_PRIORITY is used when 19463 * this command is issued as part of an error recovery 19464 * action. 19465 * 19466 * Return Code: 0 - Success 19467 * EIO - IO error 19468 * EACCES - Reservation conflict detected 19469 * EAGAIN - Device is becoming ready 19470 * errno return code from sd_ssc_send() 19471 * 19472 * Context: Can sleep. Blocks until command completes. 19473 */ 19474 19475 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16) 19476 19477 static int 19478 sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp, 19479 uint32_t *lbap, int path_flag) 19480 { 19481 struct scsi_extended_sense sense_buf; 19482 struct uscsi_cmd ucmd_buf; 19483 union scsi_cdb cdb; 19484 uint64_t *capacity16_buf; 19485 uint64_t capacity; 19486 uint32_t lbasize; 19487 int status; 19488 struct sd_lun *un; 19489 19490 ASSERT(ssc != NULL); 19491 19492 un = ssc->ssc_un; 19493 ASSERT(un != NULL); 19494 ASSERT(!mutex_owned(SD_MUTEX(un))); 19495 ASSERT(capp != NULL); 19496 ASSERT(lbap != NULL); 19497 19498 SD_TRACE(SD_LOG_IO, un, 19499 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 19500 19501 /* 19502 * First send a READ_CAPACITY_16 command to the target. 19503 * 19504 * Set up the CDB for the READ_CAPACITY_16 command. The Partial 19505 * Medium Indicator bit is cleared. The address field must be 19506 * zero if the PMI bit is zero. 19507 */ 19508 bzero(&cdb, sizeof (cdb)); 19509 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19510 19511 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP); 19512 19513 ucmd_buf.uscsi_cdb = (char *)&cdb; 19514 ucmd_buf.uscsi_cdblen = CDB_GROUP4; 19515 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; 19516 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; 19517 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19518 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19519 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19520 ucmd_buf.uscsi_timeout = 60; 19521 19522 /* 19523 * Read Capacity (16) is a Service Action In command. One 19524 * command byte (0x9E) is overloaded for multiple operations, 19525 * with the second CDB byte specifying the desired operation 19526 */ 19527 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4; 19528 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4; 19529 19530 /* 19531 * Fill in allocation length field 19532 */ 19533 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen); 19534 19535 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19536 UIO_SYSSPACE, path_flag); 19537 19538 switch (status) { 19539 case 0: 19540 /* Return failure if we did not get valid capacity data. */ 19541 if (ucmd_buf.uscsi_resid > 20) { 19542 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 19543 "sd_send_scsi_READ_CAPACITY_16 received invalid " 19544 "capacity data"); 19545 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19546 return (EIO); 19547 } 19548 19549 /* 19550 * Read capacity and block size from the READ CAPACITY 10 data. 19551 * This data may be adjusted later due to device specific 19552 * issues. 19553 * 19554 * According to the SCSI spec, the READ CAPACITY 10 19555 * command returns the following: 19556 * 19557 * bytes 0-7: Maximum logical block address available. 19558 * (MSB in byte:0 & LSB in byte:7) 19559 * 19560 * bytes 8-11: Block length in bytes 19561 * (MSB in byte:8 & LSB in byte:11) 19562 * 19563 */ 19564 capacity = BE_64(capacity16_buf[0]); 19565 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]); 19566 19567 /* 19568 * Done with capacity16_buf 19569 */ 19570 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19571 19572 /* 19573 * if the reported capacity is set to all 0xf's, then 19574 * this disk is too large. This could only happen with 19575 * a device that supports LBAs larger than 64 bits which 19576 * are not defined by any current T10 standards. 19577 */ 19578 if (capacity == 0xffffffffffffffff) { 19579 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 19580 "disk is too large"); 19581 return (EIO); 19582 } 19583 break; /* Success! */ 19584 case EIO: 19585 switch (ucmd_buf.uscsi_status) { 19586 case STATUS_RESERVATION_CONFLICT: 19587 status = EACCES; 19588 break; 19589 case STATUS_CHECK: 19590 /* 19591 * Check condition; look for ASC/ASCQ of 0x04/0x01 19592 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 19593 */ 19594 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19595 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) && 19596 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) { 19597 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19598 return (EAGAIN); 19599 } 19600 break; 19601 default: 19602 break; 19603 } 19604 /* FALLTHRU */ 19605 default: 19606 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19607 return (status); 19608 } 19609 19610 *capp = capacity; 19611 *lbap = lbasize; 19612 19613 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: " 19614 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 19615 19616 return (0); 19617 } 19618 19619 19620 /* 19621 * Function: sd_send_scsi_START_STOP_UNIT 19622 * 19623 * Description: Issue a scsi START STOP UNIT command to the target. 19624 * 19625 * Arguments: ssc - ssc contatins pointer to driver soft state (unit) 19626 * structure for this target. 19627 * flag - SD_TARGET_START 19628 * SD_TARGET_STOP 19629 * SD_TARGET_EJECT 19630 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19631 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19632 * to use the USCSI "direct" chain and bypass the normal 19633 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19634 * command is issued as part of an error recovery action. 19635 * 19636 * Return Code: 0 - Success 19637 * EIO - IO error 19638 * EACCES - Reservation conflict detected 19639 * ENXIO - Not Ready, medium not present 19640 * errno return code from sd_ssc_send() 19641 * 19642 * Context: Can sleep. 19643 */ 19644 19645 static int 19646 sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int flag, int path_flag) 19647 { 19648 struct scsi_extended_sense sense_buf; 19649 union scsi_cdb cdb; 19650 struct uscsi_cmd ucmd_buf; 19651 int status; 19652 struct sd_lun *un; 19653 19654 ASSERT(ssc != NULL); 19655 un = ssc->ssc_un; 19656 ASSERT(un != NULL); 19657 ASSERT(!mutex_owned(SD_MUTEX(un))); 19658 19659 SD_TRACE(SD_LOG_IO, un, 19660 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un); 19661 19662 if (un->un_f_check_start_stop && 19663 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) && 19664 (un->un_f_start_stop_supported != TRUE)) { 19665 return (0); 19666 } 19667 19668 /* 19669 * If we are performing an eject operation and 19670 * we receive any command other than SD_TARGET_EJECT 19671 * we should immediately return. 19672 */ 19673 if (flag != SD_TARGET_EJECT) { 19674 mutex_enter(SD_MUTEX(un)); 19675 if (un->un_f_ejecting == TRUE) { 19676 mutex_exit(SD_MUTEX(un)); 19677 return (EAGAIN); 19678 } 19679 mutex_exit(SD_MUTEX(un)); 19680 } 19681 19682 bzero(&cdb, sizeof (cdb)); 19683 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19684 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19685 19686 cdb.scc_cmd = SCMD_START_STOP; 19687 cdb.cdb_opaque[4] = (uchar_t)flag; 19688 19689 ucmd_buf.uscsi_cdb = (char *)&cdb; 19690 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19691 ucmd_buf.uscsi_bufaddr = NULL; 19692 ucmd_buf.uscsi_buflen = 0; 19693 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19694 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19695 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19696 ucmd_buf.uscsi_timeout = 200; 19697 19698 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19699 UIO_SYSSPACE, path_flag); 19700 19701 switch (status) { 19702 case 0: 19703 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19704 break; /* Success! */ 19705 case EIO: 19706 switch (ucmd_buf.uscsi_status) { 19707 case STATUS_RESERVATION_CONFLICT: 19708 status = EACCES; 19709 break; 19710 case STATUS_CHECK: 19711 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) { 19712 switch (scsi_sense_key( 19713 (uint8_t *)&sense_buf)) { 19714 case KEY_ILLEGAL_REQUEST: 19715 status = ENOTSUP; 19716 break; 19717 case KEY_NOT_READY: 19718 if (scsi_sense_asc( 19719 (uint8_t *)&sense_buf) 19720 == 0x3A) { 19721 status = ENXIO; 19722 } 19723 break; 19724 default: 19725 break; 19726 } 19727 } 19728 break; 19729 default: 19730 break; 19731 } 19732 break; 19733 default: 19734 break; 19735 } 19736 19737 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n"); 19738 19739 return (status); 19740 } 19741 19742 19743 /* 19744 * Function: sd_start_stop_unit_callback 19745 * 19746 * Description: timeout(9F) callback to begin recovery process for a 19747 * device that has spun down. 19748 * 19749 * Arguments: arg - pointer to associated softstate struct. 19750 * 19751 * Context: Executes in a timeout(9F) thread context 19752 */ 19753 19754 static void 19755 sd_start_stop_unit_callback(void *arg) 19756 { 19757 struct sd_lun *un = arg; 19758 ASSERT(un != NULL); 19759 ASSERT(!mutex_owned(SD_MUTEX(un))); 19760 19761 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n"); 19762 19763 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP); 19764 } 19765 19766 19767 /* 19768 * Function: sd_start_stop_unit_task 19769 * 19770 * Description: Recovery procedure when a drive is spun down. 19771 * 19772 * Arguments: arg - pointer to associated softstate struct. 19773 * 19774 * Context: Executes in a taskq() thread context 19775 */ 19776 19777 static void 19778 sd_start_stop_unit_task(void *arg) 19779 { 19780 struct sd_lun *un = arg; 19781 sd_ssc_t *ssc; 19782 int rval; 19783 19784 ASSERT(un != NULL); 19785 ASSERT(!mutex_owned(SD_MUTEX(un))); 19786 19787 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n"); 19788 19789 /* 19790 * Some unformatted drives report not ready error, no need to 19791 * restart if format has been initiated. 19792 */ 19793 mutex_enter(SD_MUTEX(un)); 19794 if (un->un_f_format_in_progress == TRUE) { 19795 mutex_exit(SD_MUTEX(un)); 19796 return; 19797 } 19798 mutex_exit(SD_MUTEX(un)); 19799 19800 /* 19801 * When a START STOP command is issued from here, it is part of a 19802 * failure recovery operation and must be issued before any other 19803 * commands, including any pending retries. Thus it must be sent 19804 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up 19805 * succeeds or not, we will start I/O after the attempt. 19806 */ 19807 ssc = sd_ssc_init(un); 19808 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_START, 19809 SD_PATH_DIRECT_PRIORITY); 19810 if (rval != 0) 19811 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 19812 sd_ssc_fini(ssc); 19813 /* 19814 * The above call blocks until the START_STOP_UNIT command completes. 19815 * Now that it has completed, we must re-try the original IO that 19816 * received the NOT READY condition in the first place. There are 19817 * three possible conditions here: 19818 * 19819 * (1) The original IO is on un_retry_bp. 19820 * (2) The original IO is on the regular wait queue, and un_retry_bp 19821 * is NULL. 19822 * (3) The original IO is on the regular wait queue, and un_retry_bp 19823 * points to some other, unrelated bp. 19824 * 19825 * For each case, we must call sd_start_cmds() with un_retry_bp 19826 * as the argument. If un_retry_bp is NULL, this will initiate 19827 * processing of the regular wait queue. If un_retry_bp is not NULL, 19828 * then this will process the bp on un_retry_bp. That may or may not 19829 * be the original IO, but that does not matter: the important thing 19830 * is to keep the IO processing going at this point. 19831 * 19832 * Note: This is a very specific error recovery sequence associated 19833 * with a drive that is not spun up. We attempt a START_STOP_UNIT and 19834 * serialize the I/O with completion of the spin-up. 19835 */ 19836 mutex_enter(SD_MUTEX(un)); 19837 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19838 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n", 19839 un, un->un_retry_bp); 19840 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */ 19841 sd_start_cmds(un, un->un_retry_bp); 19842 mutex_exit(SD_MUTEX(un)); 19843 19844 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n"); 19845 } 19846 19847 19848 /* 19849 * Function: sd_send_scsi_INQUIRY 19850 * 19851 * Description: Issue the scsi INQUIRY command. 19852 * 19853 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 19854 * structure for this target. 19855 * bufaddr 19856 * buflen 19857 * evpd 19858 * page_code 19859 * page_length 19860 * 19861 * Return Code: 0 - Success 19862 * errno return code from sd_ssc_send() 19863 * 19864 * Context: Can sleep. Does not return until command is completed. 19865 */ 19866 19867 static int 19868 sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr, size_t buflen, 19869 uchar_t evpd, uchar_t page_code, size_t *residp) 19870 { 19871 union scsi_cdb cdb; 19872 struct uscsi_cmd ucmd_buf; 19873 int status; 19874 struct sd_lun *un; 19875 19876 ASSERT(ssc != NULL); 19877 un = ssc->ssc_un; 19878 ASSERT(un != NULL); 19879 ASSERT(!mutex_owned(SD_MUTEX(un))); 19880 ASSERT(bufaddr != NULL); 19881 19882 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un); 19883 19884 bzero(&cdb, sizeof (cdb)); 19885 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19886 bzero(bufaddr, buflen); 19887 19888 cdb.scc_cmd = SCMD_INQUIRY; 19889 cdb.cdb_opaque[1] = evpd; 19890 cdb.cdb_opaque[2] = page_code; 19891 FORMG0COUNT(&cdb, buflen); 19892 19893 ucmd_buf.uscsi_cdb = (char *)&cdb; 19894 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19895 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19896 ucmd_buf.uscsi_buflen = buflen; 19897 ucmd_buf.uscsi_rqbuf = NULL; 19898 ucmd_buf.uscsi_rqlen = 0; 19899 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 19900 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */ 19901 19902 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 19903 UIO_SYSSPACE, SD_PATH_DIRECT); 19904 19905 /* 19906 * Only handle status == 0, the upper-level caller 19907 * will put different assessment based on the context. 19908 */ 19909 if (status == 0) 19910 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 19911 19912 if ((status == 0) && (residp != NULL)) { 19913 *residp = ucmd_buf.uscsi_resid; 19914 } 19915 19916 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n"); 19917 19918 return (status); 19919 } 19920 19921 19922 /* 19923 * Function: sd_send_scsi_TEST_UNIT_READY 19924 * 19925 * Description: Issue the scsi TEST UNIT READY command. 19926 * This routine can be told to set the flag USCSI_DIAGNOSE to 19927 * prevent retrying failed commands. Use this when the intent 19928 * is either to check for device readiness, to clear a Unit 19929 * Attention, or to clear any outstanding sense data. 19930 * However under specific conditions the expected behavior 19931 * is for retries to bring a device ready, so use the flag 19932 * with caution. 19933 * 19934 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 19935 * structure for this target. 19936 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present 19937 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE. 19938 * 0: dont check for media present, do retries on cmd. 19939 * 19940 * Return Code: 0 - Success 19941 * EIO - IO error 19942 * EACCES - Reservation conflict detected 19943 * ENXIO - Not Ready, medium not present 19944 * errno return code from sd_ssc_send() 19945 * 19946 * Context: Can sleep. Does not return until command is completed. 19947 */ 19948 19949 static int 19950 sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag) 19951 { 19952 struct scsi_extended_sense sense_buf; 19953 union scsi_cdb cdb; 19954 struct uscsi_cmd ucmd_buf; 19955 int status; 19956 struct sd_lun *un; 19957 19958 ASSERT(ssc != NULL); 19959 un = ssc->ssc_un; 19960 ASSERT(un != NULL); 19961 ASSERT(!mutex_owned(SD_MUTEX(un))); 19962 19963 SD_TRACE(SD_LOG_IO, un, 19964 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un); 19965 19966 /* 19967 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect 19968 * timeouts when they receive a TUR and the queue is not empty. Check 19969 * the configuration flag set during attach (indicating the drive has 19970 * this firmware bug) and un_ncmds_in_transport before issuing the 19971 * TUR. If there are 19972 * pending commands return success, this is a bit arbitrary but is ok 19973 * for non-removables (i.e. the eliteI disks) and non-clustering 19974 * configurations. 19975 */ 19976 if (un->un_f_cfg_tur_check == TRUE) { 19977 mutex_enter(SD_MUTEX(un)); 19978 if (un->un_ncmds_in_transport != 0) { 19979 mutex_exit(SD_MUTEX(un)); 19980 return (0); 19981 } 19982 mutex_exit(SD_MUTEX(un)); 19983 } 19984 19985 bzero(&cdb, sizeof (cdb)); 19986 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19987 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19988 19989 cdb.scc_cmd = SCMD_TEST_UNIT_READY; 19990 19991 ucmd_buf.uscsi_cdb = (char *)&cdb; 19992 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19993 ucmd_buf.uscsi_bufaddr = NULL; 19994 ucmd_buf.uscsi_buflen = 0; 19995 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19996 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19997 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19998 19999 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ 20000 if ((flag & SD_DONT_RETRY_TUR) != 0) { 20001 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; 20002 } 20003 ucmd_buf.uscsi_timeout = 60; 20004 20005 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20006 UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : 20007 SD_PATH_STANDARD)); 20008 20009 switch (status) { 20010 case 0: 20011 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20012 break; /* Success! */ 20013 case EIO: 20014 switch (ucmd_buf.uscsi_status) { 20015 case STATUS_RESERVATION_CONFLICT: 20016 status = EACCES; 20017 break; 20018 case STATUS_CHECK: 20019 if ((flag & SD_CHECK_FOR_MEDIA) == 0) { 20020 break; 20021 } 20022 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20023 (scsi_sense_key((uint8_t *)&sense_buf) == 20024 KEY_NOT_READY) && 20025 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x3A)) { 20026 status = ENXIO; 20027 } 20028 break; 20029 default: 20030 break; 20031 } 20032 break; 20033 default: 20034 break; 20035 } 20036 20037 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n"); 20038 20039 return (status); 20040 } 20041 20042 /* 20043 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN 20044 * 20045 * Description: Issue the scsi PERSISTENT RESERVE IN command. 20046 * 20047 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20048 * structure for this target. 20049 * 20050 * Return Code: 0 - Success 20051 * EACCES 20052 * ENOTSUP 20053 * errno return code from sd_ssc_send() 20054 * 20055 * Context: Can sleep. Does not return until command is completed. 20056 */ 20057 20058 static int 20059 sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc, uchar_t usr_cmd, 20060 uint16_t data_len, uchar_t *data_bufp) 20061 { 20062 struct scsi_extended_sense sense_buf; 20063 union scsi_cdb cdb; 20064 struct uscsi_cmd ucmd_buf; 20065 int status; 20066 int no_caller_buf = FALSE; 20067 struct sd_lun *un; 20068 20069 ASSERT(ssc != NULL); 20070 un = ssc->ssc_un; 20071 ASSERT(un != NULL); 20072 ASSERT(!mutex_owned(SD_MUTEX(un))); 20073 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV)); 20074 20075 SD_TRACE(SD_LOG_IO, un, 20076 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un); 20077 20078 bzero(&cdb, sizeof (cdb)); 20079 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20080 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20081 if (data_bufp == NULL) { 20082 /* Allocate a default buf if the caller did not give one */ 20083 ASSERT(data_len == 0); 20084 data_len = MHIOC_RESV_KEY_SIZE; 20085 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP); 20086 no_caller_buf = TRUE; 20087 } 20088 20089 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN; 20090 cdb.cdb_opaque[1] = usr_cmd; 20091 FORMG1COUNT(&cdb, data_len); 20092 20093 ucmd_buf.uscsi_cdb = (char *)&cdb; 20094 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 20095 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; 20096 ucmd_buf.uscsi_buflen = data_len; 20097 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20098 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20099 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20100 ucmd_buf.uscsi_timeout = 60; 20101 20102 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20103 UIO_SYSSPACE, SD_PATH_STANDARD); 20104 20105 switch (status) { 20106 case 0: 20107 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20108 20109 break; /* Success! */ 20110 case EIO: 20111 switch (ucmd_buf.uscsi_status) { 20112 case STATUS_RESERVATION_CONFLICT: 20113 status = EACCES; 20114 break; 20115 case STATUS_CHECK: 20116 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20117 (scsi_sense_key((uint8_t *)&sense_buf) == 20118 KEY_ILLEGAL_REQUEST)) { 20119 status = ENOTSUP; 20120 } 20121 break; 20122 default: 20123 break; 20124 } 20125 break; 20126 default: 20127 break; 20128 } 20129 20130 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n"); 20131 20132 if (no_caller_buf == TRUE) { 20133 kmem_free(data_bufp, data_len); 20134 } 20135 20136 return (status); 20137 } 20138 20139 20140 /* 20141 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT 20142 * 20143 * Description: This routine is the driver entry point for handling CD-ROM 20144 * multi-host persistent reservation requests (MHIOCGRP_INKEYS, 20145 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the 20146 * device. 20147 * 20148 * Arguments: ssc - ssc contains un - pointer to soft state struct 20149 * for the target. 20150 * usr_cmd SCSI-3 reservation facility command (one of 20151 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE, 20152 * SD_SCSI3_PREEMPTANDABORT) 20153 * usr_bufp - user provided pointer register, reserve descriptor or 20154 * preempt and abort structure (mhioc_register_t, 20155 * mhioc_resv_desc_t, mhioc_preemptandabort_t) 20156 * 20157 * Return Code: 0 - Success 20158 * EACCES 20159 * ENOTSUP 20160 * errno return code from sd_ssc_send() 20161 * 20162 * Context: Can sleep. Does not return until command is completed. 20163 */ 20164 20165 static int 20166 sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, uchar_t usr_cmd, 20167 uchar_t *usr_bufp) 20168 { 20169 struct scsi_extended_sense sense_buf; 20170 union scsi_cdb cdb; 20171 struct uscsi_cmd ucmd_buf; 20172 int status; 20173 uchar_t data_len = sizeof (sd_prout_t); 20174 sd_prout_t *prp; 20175 struct sd_lun *un; 20176 20177 ASSERT(ssc != NULL); 20178 un = ssc->ssc_un; 20179 ASSERT(un != NULL); 20180 ASSERT(!mutex_owned(SD_MUTEX(un))); 20181 ASSERT(data_len == 24); /* required by scsi spec */ 20182 20183 SD_TRACE(SD_LOG_IO, un, 20184 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un); 20185 20186 if (usr_bufp == NULL) { 20187 return (EINVAL); 20188 } 20189 20190 bzero(&cdb, sizeof (cdb)); 20191 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20192 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20193 prp = kmem_zalloc(data_len, KM_SLEEP); 20194 20195 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT; 20196 cdb.cdb_opaque[1] = usr_cmd; 20197 FORMG1COUNT(&cdb, data_len); 20198 20199 ucmd_buf.uscsi_cdb = (char *)&cdb; 20200 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 20201 ucmd_buf.uscsi_bufaddr = (caddr_t)prp; 20202 ucmd_buf.uscsi_buflen = data_len; 20203 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20204 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20205 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 20206 ucmd_buf.uscsi_timeout = 60; 20207 20208 switch (usr_cmd) { 20209 case SD_SCSI3_REGISTER: { 20210 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp; 20211 20212 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 20213 bcopy(ptr->newkey.key, prp->service_key, 20214 MHIOC_RESV_KEY_SIZE); 20215 prp->aptpl = ptr->aptpl; 20216 break; 20217 } 20218 case SD_SCSI3_RESERVE: 20219 case SD_SCSI3_RELEASE: { 20220 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 20221 20222 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 20223 prp->scope_address = BE_32(ptr->scope_specific_addr); 20224 cdb.cdb_opaque[2] = ptr->type; 20225 break; 20226 } 20227 case SD_SCSI3_PREEMPTANDABORT: { 20228 mhioc_preemptandabort_t *ptr = 20229 (mhioc_preemptandabort_t *)usr_bufp; 20230 20231 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 20232 bcopy(ptr->victim_key.key, prp->service_key, 20233 MHIOC_RESV_KEY_SIZE); 20234 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr); 20235 cdb.cdb_opaque[2] = ptr->resvdesc.type; 20236 ucmd_buf.uscsi_flags |= USCSI_HEAD; 20237 break; 20238 } 20239 case SD_SCSI3_REGISTERANDIGNOREKEY: 20240 { 20241 mhioc_registerandignorekey_t *ptr; 20242 ptr = (mhioc_registerandignorekey_t *)usr_bufp; 20243 bcopy(ptr->newkey.key, 20244 prp->service_key, MHIOC_RESV_KEY_SIZE); 20245 prp->aptpl = ptr->aptpl; 20246 break; 20247 } 20248 default: 20249 ASSERT(FALSE); 20250 break; 20251 } 20252 20253 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20254 UIO_SYSSPACE, SD_PATH_STANDARD); 20255 20256 switch (status) { 20257 case 0: 20258 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20259 break; /* Success! */ 20260 case EIO: 20261 switch (ucmd_buf.uscsi_status) { 20262 case STATUS_RESERVATION_CONFLICT: 20263 status = EACCES; 20264 break; 20265 case STATUS_CHECK: 20266 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20267 (scsi_sense_key((uint8_t *)&sense_buf) == 20268 KEY_ILLEGAL_REQUEST)) { 20269 status = ENOTSUP; 20270 } 20271 break; 20272 default: 20273 break; 20274 } 20275 break; 20276 default: 20277 break; 20278 } 20279 20280 kmem_free(prp, data_len); 20281 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n"); 20282 return (status); 20283 } 20284 20285 20286 /* 20287 * Function: sd_send_scsi_SYNCHRONIZE_CACHE 20288 * 20289 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target 20290 * 20291 * Arguments: un - pointer to the target's soft state struct 20292 * dkc - pointer to the callback structure 20293 * 20294 * Return Code: 0 - success 20295 * errno-type error code 20296 * 20297 * Context: kernel thread context only. 20298 * 20299 * _______________________________________________________________ 20300 * | dkc_flag & | dkc_callback | DKIOCFLUSHWRITECACHE | 20301 * |FLUSH_VOLATILE| | operation | 20302 * |______________|______________|_________________________________| 20303 * | 0 | NULL | Synchronous flush on both | 20304 * | | | volatile and non-volatile cache | 20305 * |______________|______________|_________________________________| 20306 * | 1 | NULL | Synchronous flush on volatile | 20307 * | | | cache; disk drivers may suppress| 20308 * | | | flush if disk table indicates | 20309 * | | | non-volatile cache | 20310 * |______________|______________|_________________________________| 20311 * | 0 | !NULL | Asynchronous flush on both | 20312 * | | | volatile and non-volatile cache;| 20313 * |______________|______________|_________________________________| 20314 * | 1 | !NULL | Asynchronous flush on volatile | 20315 * | | | cache; disk drivers may suppress| 20316 * | | | flush if disk table indicates | 20317 * | | | non-volatile cache | 20318 * |______________|______________|_________________________________| 20319 * 20320 */ 20321 20322 static int 20323 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc) 20324 { 20325 struct sd_uscsi_info *uip; 20326 struct uscsi_cmd *uscmd; 20327 union scsi_cdb *cdb; 20328 struct buf *bp; 20329 int rval = 0; 20330 int is_async; 20331 20332 SD_TRACE(SD_LOG_IO, un, 20333 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un); 20334 20335 ASSERT(un != NULL); 20336 ASSERT(!mutex_owned(SD_MUTEX(un))); 20337 20338 if (dkc == NULL || dkc->dkc_callback == NULL) { 20339 is_async = FALSE; 20340 } else { 20341 is_async = TRUE; 20342 } 20343 20344 mutex_enter(SD_MUTEX(un)); 20345 /* check whether cache flush should be suppressed */ 20346 if (un->un_f_suppress_cache_flush == TRUE) { 20347 mutex_exit(SD_MUTEX(un)); 20348 /* 20349 * suppress the cache flush if the device is told to do 20350 * so by sd.conf or disk table 20351 */ 20352 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_SYNCHRONIZE_CACHE: \ 20353 skip the cache flush since suppress_cache_flush is %d!\n", 20354 un->un_f_suppress_cache_flush); 20355 20356 if (is_async == TRUE) { 20357 /* invoke callback for asynchronous flush */ 20358 (*dkc->dkc_callback)(dkc->dkc_cookie, 0); 20359 } 20360 return (rval); 20361 } 20362 mutex_exit(SD_MUTEX(un)); 20363 20364 /* 20365 * check dkc_flag & FLUSH_VOLATILE so SYNC_NV bit can be 20366 * set properly 20367 */ 20368 cdb = kmem_zalloc(CDB_GROUP1, KM_SLEEP); 20369 cdb->scc_cmd = SCMD_SYNCHRONIZE_CACHE; 20370 20371 mutex_enter(SD_MUTEX(un)); 20372 if (dkc != NULL && un->un_f_sync_nv_supported && 20373 (dkc->dkc_flag & FLUSH_VOLATILE)) { 20374 /* 20375 * if the device supports SYNC_NV bit, turn on 20376 * the SYNC_NV bit to only flush volatile cache 20377 */ 20378 cdb->cdb_un.tag |= SD_SYNC_NV_BIT; 20379 } 20380 mutex_exit(SD_MUTEX(un)); 20381 20382 /* 20383 * First get some memory for the uscsi_cmd struct and cdb 20384 * and initialize for SYNCHRONIZE_CACHE cmd. 20385 */ 20386 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 20387 uscmd->uscsi_cdblen = CDB_GROUP1; 20388 uscmd->uscsi_cdb = (caddr_t)cdb; 20389 uscmd->uscsi_bufaddr = NULL; 20390 uscmd->uscsi_buflen = 0; 20391 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 20392 uscmd->uscsi_rqlen = SENSE_LENGTH; 20393 uscmd->uscsi_rqresid = SENSE_LENGTH; 20394 uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 20395 uscmd->uscsi_timeout = sd_io_time; 20396 20397 /* 20398 * Allocate an sd_uscsi_info struct and fill it with the info 20399 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 20400 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 20401 * since we allocate the buf here in this function, we do not 20402 * need to preserve the prior contents of b_private. 20403 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 20404 */ 20405 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 20406 uip->ui_flags = SD_PATH_DIRECT; 20407 uip->ui_cmdp = uscmd; 20408 20409 bp = getrbuf(KM_SLEEP); 20410 bp->b_private = uip; 20411 20412 /* 20413 * Setup buffer to carry uscsi request. 20414 */ 20415 bp->b_flags = B_BUSY; 20416 bp->b_bcount = 0; 20417 bp->b_blkno = 0; 20418 20419 if (is_async == TRUE) { 20420 bp->b_iodone = sd_send_scsi_SYNCHRONIZE_CACHE_biodone; 20421 uip->ui_dkc = *dkc; 20422 } 20423 20424 bp->b_edev = SD_GET_DEV(un); 20425 bp->b_dev = cmpdev(bp->b_edev); /* maybe unnecessary? */ 20426 20427 /* 20428 * Unset un_f_sync_cache_required flag 20429 */ 20430 mutex_enter(SD_MUTEX(un)); 20431 un->un_f_sync_cache_required = FALSE; 20432 mutex_exit(SD_MUTEX(un)); 20433 20434 (void) sd_uscsi_strategy(bp); 20435 20436 /* 20437 * If synchronous request, wait for completion 20438 * If async just return and let b_iodone callback 20439 * cleanup. 20440 * NOTE: On return, u_ncmds_in_driver will be decremented, 20441 * but it was also incremented in sd_uscsi_strategy(), so 20442 * we should be ok. 20443 */ 20444 if (is_async == FALSE) { 20445 (void) biowait(bp); 20446 rval = sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp); 20447 } 20448 20449 return (rval); 20450 } 20451 20452 20453 static int 20454 sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp) 20455 { 20456 struct sd_uscsi_info *uip; 20457 struct uscsi_cmd *uscmd; 20458 uint8_t *sense_buf; 20459 struct sd_lun *un; 20460 int status; 20461 union scsi_cdb *cdb; 20462 20463 uip = (struct sd_uscsi_info *)(bp->b_private); 20464 ASSERT(uip != NULL); 20465 20466 uscmd = uip->ui_cmdp; 20467 ASSERT(uscmd != NULL); 20468 20469 sense_buf = (uint8_t *)uscmd->uscsi_rqbuf; 20470 ASSERT(sense_buf != NULL); 20471 20472 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 20473 ASSERT(un != NULL); 20474 20475 cdb = (union scsi_cdb *)uscmd->uscsi_cdb; 20476 20477 status = geterror(bp); 20478 switch (status) { 20479 case 0: 20480 break; /* Success! */ 20481 case EIO: 20482 switch (uscmd->uscsi_status) { 20483 case STATUS_RESERVATION_CONFLICT: 20484 /* Ignore reservation conflict */ 20485 status = 0; 20486 goto done; 20487 20488 case STATUS_CHECK: 20489 if ((uscmd->uscsi_rqstatus == STATUS_GOOD) && 20490 (scsi_sense_key(sense_buf) == 20491 KEY_ILLEGAL_REQUEST)) { 20492 /* Ignore Illegal Request error */ 20493 if (cdb->cdb_un.tag&SD_SYNC_NV_BIT) { 20494 mutex_enter(SD_MUTEX(un)); 20495 un->un_f_sync_nv_supported = FALSE; 20496 mutex_exit(SD_MUTEX(un)); 20497 status = 0; 20498 SD_TRACE(SD_LOG_IO, un, 20499 "un_f_sync_nv_supported \ 20500 is set to false.\n"); 20501 goto done; 20502 } 20503 20504 mutex_enter(SD_MUTEX(un)); 20505 un->un_f_sync_cache_supported = FALSE; 20506 mutex_exit(SD_MUTEX(un)); 20507 SD_TRACE(SD_LOG_IO, un, 20508 "sd_send_scsi_SYNCHRONIZE_CACHE_biodone: \ 20509 un_f_sync_cache_supported set to false \ 20510 with asc = %x, ascq = %x\n", 20511 scsi_sense_asc(sense_buf), 20512 scsi_sense_ascq(sense_buf)); 20513 status = ENOTSUP; 20514 goto done; 20515 } 20516 break; 20517 default: 20518 break; 20519 } 20520 /* FALLTHRU */ 20521 default: 20522 /* 20523 * Turn on the un_f_sync_cache_required flag 20524 * since the SYNC CACHE command failed 20525 */ 20526 mutex_enter(SD_MUTEX(un)); 20527 un->un_f_sync_cache_required = TRUE; 20528 mutex_exit(SD_MUTEX(un)); 20529 20530 /* 20531 * Don't log an error message if this device 20532 * has removable media. 20533 */ 20534 if (!un->un_f_has_removable_media) { 20535 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 20536 "SYNCHRONIZE CACHE command failed (%d)\n", status); 20537 } 20538 break; 20539 } 20540 20541 done: 20542 if (uip->ui_dkc.dkc_callback != NULL) { 20543 (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status); 20544 } 20545 20546 ASSERT((bp->b_flags & B_REMAPPED) == 0); 20547 freerbuf(bp); 20548 kmem_free(uip, sizeof (struct sd_uscsi_info)); 20549 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); 20550 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen); 20551 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 20552 20553 return (status); 20554 } 20555 20556 20557 /* 20558 * Function: sd_send_scsi_GET_CONFIGURATION 20559 * 20560 * Description: Issues the get configuration command to the device. 20561 * Called from sd_check_for_writable_cd & sd_get_media_info 20562 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN 20563 * Arguments: ssc 20564 * ucmdbuf 20565 * rqbuf 20566 * rqbuflen 20567 * bufaddr 20568 * buflen 20569 * path_flag 20570 * 20571 * Return Code: 0 - Success 20572 * errno return code from sd_ssc_send() 20573 * 20574 * Context: Can sleep. Does not return until command is completed. 20575 * 20576 */ 20577 20578 static int 20579 sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, struct uscsi_cmd *ucmdbuf, 20580 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen, 20581 int path_flag) 20582 { 20583 char cdb[CDB_GROUP1]; 20584 int status; 20585 struct sd_lun *un; 20586 20587 ASSERT(ssc != NULL); 20588 un = ssc->ssc_un; 20589 ASSERT(un != NULL); 20590 ASSERT(!mutex_owned(SD_MUTEX(un))); 20591 ASSERT(bufaddr != NULL); 20592 ASSERT(ucmdbuf != NULL); 20593 ASSERT(rqbuf != NULL); 20594 20595 SD_TRACE(SD_LOG_IO, un, 20596 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un); 20597 20598 bzero(cdb, sizeof (cdb)); 20599 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 20600 bzero(rqbuf, rqbuflen); 20601 bzero(bufaddr, buflen); 20602 20603 /* 20604 * Set up cdb field for the get configuration command. 20605 */ 20606 cdb[0] = SCMD_GET_CONFIGURATION; 20607 cdb[1] = 0x02; /* Requested Type */ 20608 cdb[8] = SD_PROFILE_HEADER_LEN; 20609 ucmdbuf->uscsi_cdb = cdb; 20610 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 20611 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 20612 ucmdbuf->uscsi_buflen = buflen; 20613 ucmdbuf->uscsi_timeout = sd_io_time; 20614 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 20615 ucmdbuf->uscsi_rqlen = rqbuflen; 20616 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 20617 20618 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL, 20619 UIO_SYSSPACE, path_flag); 20620 20621 switch (status) { 20622 case 0: 20623 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20624 break; /* Success! */ 20625 case EIO: 20626 switch (ucmdbuf->uscsi_status) { 20627 case STATUS_RESERVATION_CONFLICT: 20628 status = EACCES; 20629 break; 20630 default: 20631 break; 20632 } 20633 break; 20634 default: 20635 break; 20636 } 20637 20638 if (status == 0) { 20639 SD_DUMP_MEMORY(un, SD_LOG_IO, 20640 "sd_send_scsi_GET_CONFIGURATION: data", 20641 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 20642 } 20643 20644 SD_TRACE(SD_LOG_IO, un, 20645 "sd_send_scsi_GET_CONFIGURATION: exit\n"); 20646 20647 return (status); 20648 } 20649 20650 /* 20651 * Function: sd_send_scsi_feature_GET_CONFIGURATION 20652 * 20653 * Description: Issues the get configuration command to the device to 20654 * retrieve a specific feature. Called from 20655 * sd_check_for_writable_cd & sd_set_mmc_caps. 20656 * Arguments: ssc 20657 * ucmdbuf 20658 * rqbuf 20659 * rqbuflen 20660 * bufaddr 20661 * buflen 20662 * feature 20663 * 20664 * Return Code: 0 - Success 20665 * errno return code from sd_ssc_send() 20666 * 20667 * Context: Can sleep. Does not return until command is completed. 20668 * 20669 */ 20670 static int 20671 sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc, 20672 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 20673 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag) 20674 { 20675 char cdb[CDB_GROUP1]; 20676 int status; 20677 struct sd_lun *un; 20678 20679 ASSERT(ssc != NULL); 20680 un = ssc->ssc_un; 20681 ASSERT(un != NULL); 20682 ASSERT(!mutex_owned(SD_MUTEX(un))); 20683 ASSERT(bufaddr != NULL); 20684 ASSERT(ucmdbuf != NULL); 20685 ASSERT(rqbuf != NULL); 20686 20687 SD_TRACE(SD_LOG_IO, un, 20688 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un); 20689 20690 bzero(cdb, sizeof (cdb)); 20691 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 20692 bzero(rqbuf, rqbuflen); 20693 bzero(bufaddr, buflen); 20694 20695 /* 20696 * Set up cdb field for the get configuration command. 20697 */ 20698 cdb[0] = SCMD_GET_CONFIGURATION; 20699 cdb[1] = 0x02; /* Requested Type */ 20700 cdb[3] = feature; 20701 cdb[8] = buflen; 20702 ucmdbuf->uscsi_cdb = cdb; 20703 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 20704 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 20705 ucmdbuf->uscsi_buflen = buflen; 20706 ucmdbuf->uscsi_timeout = sd_io_time; 20707 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 20708 ucmdbuf->uscsi_rqlen = rqbuflen; 20709 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 20710 20711 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL, 20712 UIO_SYSSPACE, path_flag); 20713 20714 switch (status) { 20715 case 0: 20716 20717 break; /* Success! */ 20718 case EIO: 20719 switch (ucmdbuf->uscsi_status) { 20720 case STATUS_RESERVATION_CONFLICT: 20721 status = EACCES; 20722 break; 20723 default: 20724 break; 20725 } 20726 break; 20727 default: 20728 break; 20729 } 20730 20731 if (status == 0) { 20732 SD_DUMP_MEMORY(un, SD_LOG_IO, 20733 "sd_send_scsi_feature_GET_CONFIGURATION: data", 20734 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 20735 } 20736 20737 SD_TRACE(SD_LOG_IO, un, 20738 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n"); 20739 20740 return (status); 20741 } 20742 20743 20744 /* 20745 * Function: sd_send_scsi_MODE_SENSE 20746 * 20747 * Description: Utility function for issuing a scsi MODE SENSE command. 20748 * Note: This routine uses a consistent implementation for Group0, 20749 * Group1, and Group2 commands across all platforms. ATAPI devices 20750 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 20751 * 20752 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20753 * structure for this target. 20754 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 20755 * CDB_GROUP[1|2] (10 byte). 20756 * bufaddr - buffer for page data retrieved from the target. 20757 * buflen - size of page to be retrieved. 20758 * page_code - page code of data to be retrieved from the target. 20759 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20760 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20761 * to use the USCSI "direct" chain and bypass the normal 20762 * command waitq. 20763 * 20764 * Return Code: 0 - Success 20765 * errno return code from sd_ssc_send() 20766 * 20767 * Context: Can sleep. Does not return until command is completed. 20768 */ 20769 20770 static int 20771 sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr, 20772 size_t buflen, uchar_t page_code, int path_flag) 20773 { 20774 struct scsi_extended_sense sense_buf; 20775 union scsi_cdb cdb; 20776 struct uscsi_cmd ucmd_buf; 20777 int status; 20778 int headlen; 20779 struct sd_lun *un; 20780 20781 ASSERT(ssc != NULL); 20782 un = ssc->ssc_un; 20783 ASSERT(un != NULL); 20784 ASSERT(!mutex_owned(SD_MUTEX(un))); 20785 ASSERT(bufaddr != NULL); 20786 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 20787 (cdbsize == CDB_GROUP2)); 20788 20789 SD_TRACE(SD_LOG_IO, un, 20790 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un); 20791 20792 bzero(&cdb, sizeof (cdb)); 20793 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20794 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20795 bzero(bufaddr, buflen); 20796 20797 if (cdbsize == CDB_GROUP0) { 20798 cdb.scc_cmd = SCMD_MODE_SENSE; 20799 cdb.cdb_opaque[2] = page_code; 20800 FORMG0COUNT(&cdb, buflen); 20801 headlen = MODE_HEADER_LENGTH; 20802 } else { 20803 cdb.scc_cmd = SCMD_MODE_SENSE_G1; 20804 cdb.cdb_opaque[2] = page_code; 20805 FORMG1COUNT(&cdb, buflen); 20806 headlen = MODE_HEADER_LENGTH_GRP2; 20807 } 20808 20809 ASSERT(headlen <= buflen); 20810 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 20811 20812 ucmd_buf.uscsi_cdb = (char *)&cdb; 20813 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 20814 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 20815 ucmd_buf.uscsi_buflen = buflen; 20816 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20817 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20818 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20819 ucmd_buf.uscsi_timeout = 60; 20820 20821 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20822 UIO_SYSSPACE, path_flag); 20823 20824 switch (status) { 20825 case 0: 20826 /* 20827 * sr_check_wp() uses 0x3f page code and check the header of 20828 * mode page to determine if target device is write-protected. 20829 * But some USB devices return 0 bytes for 0x3f page code. For 20830 * this case, make sure that mode page header is returned at 20831 * least. 20832 */ 20833 if (buflen - ucmd_buf.uscsi_resid < headlen) { 20834 status = EIO; 20835 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1, 20836 "mode page header is not returned"); 20837 } 20838 break; /* Success! */ 20839 case EIO: 20840 switch (ucmd_buf.uscsi_status) { 20841 case STATUS_RESERVATION_CONFLICT: 20842 status = EACCES; 20843 break; 20844 default: 20845 break; 20846 } 20847 break; 20848 default: 20849 break; 20850 } 20851 20852 if (status == 0) { 20853 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data", 20854 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 20855 } 20856 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n"); 20857 20858 return (status); 20859 } 20860 20861 20862 /* 20863 * Function: sd_send_scsi_MODE_SELECT 20864 * 20865 * Description: Utility function for issuing a scsi MODE SELECT command. 20866 * Note: This routine uses a consistent implementation for Group0, 20867 * Group1, and Group2 commands across all platforms. ATAPI devices 20868 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 20869 * 20870 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20871 * structure for this target. 20872 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 20873 * CDB_GROUP[1|2] (10 byte). 20874 * bufaddr - buffer for page data retrieved from the target. 20875 * buflen - size of page to be retrieved. 20876 * save_page - boolean to determin if SP bit should be set. 20877 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20878 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20879 * to use the USCSI "direct" chain and bypass the normal 20880 * command waitq. 20881 * 20882 * Return Code: 0 - Success 20883 * errno return code from sd_ssc_send() 20884 * 20885 * Context: Can sleep. Does not return until command is completed. 20886 */ 20887 20888 static int 20889 sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr, 20890 size_t buflen, uchar_t save_page, int path_flag) 20891 { 20892 struct scsi_extended_sense sense_buf; 20893 union scsi_cdb cdb; 20894 struct uscsi_cmd ucmd_buf; 20895 int status; 20896 struct sd_lun *un; 20897 20898 ASSERT(ssc != NULL); 20899 un = ssc->ssc_un; 20900 ASSERT(un != NULL); 20901 ASSERT(!mutex_owned(SD_MUTEX(un))); 20902 ASSERT(bufaddr != NULL); 20903 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 20904 (cdbsize == CDB_GROUP2)); 20905 20906 SD_TRACE(SD_LOG_IO, un, 20907 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un); 20908 20909 bzero(&cdb, sizeof (cdb)); 20910 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20911 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20912 20913 /* Set the PF bit for many third party drives */ 20914 cdb.cdb_opaque[1] = 0x10; 20915 20916 /* Set the savepage(SP) bit if given */ 20917 if (save_page == SD_SAVE_PAGE) { 20918 cdb.cdb_opaque[1] |= 0x01; 20919 } 20920 20921 if (cdbsize == CDB_GROUP0) { 20922 cdb.scc_cmd = SCMD_MODE_SELECT; 20923 FORMG0COUNT(&cdb, buflen); 20924 } else { 20925 cdb.scc_cmd = SCMD_MODE_SELECT_G1; 20926 FORMG1COUNT(&cdb, buflen); 20927 } 20928 20929 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 20930 20931 ucmd_buf.uscsi_cdb = (char *)&cdb; 20932 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 20933 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 20934 ucmd_buf.uscsi_buflen = buflen; 20935 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20936 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20937 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 20938 ucmd_buf.uscsi_timeout = 60; 20939 20940 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 20941 UIO_SYSSPACE, path_flag); 20942 20943 switch (status) { 20944 case 0: 20945 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 20946 break; /* Success! */ 20947 case EIO: 20948 switch (ucmd_buf.uscsi_status) { 20949 case STATUS_RESERVATION_CONFLICT: 20950 status = EACCES; 20951 break; 20952 default: 20953 break; 20954 } 20955 break; 20956 default: 20957 break; 20958 } 20959 20960 if (status == 0) { 20961 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data", 20962 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 20963 } 20964 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n"); 20965 20966 return (status); 20967 } 20968 20969 20970 /* 20971 * Function: sd_send_scsi_RDWR 20972 * 20973 * Description: Issue a scsi READ or WRITE command with the given parameters. 20974 * 20975 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 20976 * structure for this target. 20977 * cmd: SCMD_READ or SCMD_WRITE 20978 * bufaddr: Address of caller's buffer to receive the RDWR data 20979 * buflen: Length of caller's buffer receive the RDWR data. 20980 * start_block: Block number for the start of the RDWR operation. 20981 * (Assumes target-native block size.) 20982 * residp: Pointer to variable to receive the redisual of the 20983 * RDWR operation (may be NULL of no residual requested). 20984 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20985 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20986 * to use the USCSI "direct" chain and bypass the normal 20987 * command waitq. 20988 * 20989 * Return Code: 0 - Success 20990 * errno return code from sd_ssc_send() 20991 * 20992 * Context: Can sleep. Does not return until command is completed. 20993 */ 20994 20995 static int 20996 sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr, 20997 size_t buflen, daddr_t start_block, int path_flag) 20998 { 20999 struct scsi_extended_sense sense_buf; 21000 union scsi_cdb cdb; 21001 struct uscsi_cmd ucmd_buf; 21002 uint32_t block_count; 21003 int status; 21004 int cdbsize; 21005 uchar_t flag; 21006 struct sd_lun *un; 21007 21008 ASSERT(ssc != NULL); 21009 un = ssc->ssc_un; 21010 ASSERT(un != NULL); 21011 ASSERT(!mutex_owned(SD_MUTEX(un))); 21012 ASSERT(bufaddr != NULL); 21013 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE)); 21014 21015 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un); 21016 21017 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 21018 return (EINVAL); 21019 } 21020 21021 mutex_enter(SD_MUTEX(un)); 21022 block_count = SD_BYTES2TGTBLOCKS(un, buflen); 21023 mutex_exit(SD_MUTEX(un)); 21024 21025 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE; 21026 21027 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: " 21028 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n", 21029 bufaddr, buflen, start_block, block_count); 21030 21031 bzero(&cdb, sizeof (cdb)); 21032 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21033 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21034 21035 /* Compute CDB size to use */ 21036 if (start_block > 0xffffffff) 21037 cdbsize = CDB_GROUP4; 21038 else if ((start_block & 0xFFE00000) || 21039 (un->un_f_cfg_is_atapi == TRUE)) 21040 cdbsize = CDB_GROUP1; 21041 else 21042 cdbsize = CDB_GROUP0; 21043 21044 switch (cdbsize) { 21045 case CDB_GROUP0: /* 6-byte CDBs */ 21046 cdb.scc_cmd = cmd; 21047 FORMG0ADDR(&cdb, start_block); 21048 FORMG0COUNT(&cdb, block_count); 21049 break; 21050 case CDB_GROUP1: /* 10-byte CDBs */ 21051 cdb.scc_cmd = cmd | SCMD_GROUP1; 21052 FORMG1ADDR(&cdb, start_block); 21053 FORMG1COUNT(&cdb, block_count); 21054 break; 21055 case CDB_GROUP4: /* 16-byte CDBs */ 21056 cdb.scc_cmd = cmd | SCMD_GROUP4; 21057 FORMG4LONGADDR(&cdb, (uint64_t)start_block); 21058 FORMG4COUNT(&cdb, block_count); 21059 break; 21060 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */ 21061 default: 21062 /* All others reserved */ 21063 return (EINVAL); 21064 } 21065 21066 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */ 21067 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 21068 21069 ucmd_buf.uscsi_cdb = (char *)&cdb; 21070 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 21071 ucmd_buf.uscsi_bufaddr = bufaddr; 21072 ucmd_buf.uscsi_buflen = buflen; 21073 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21074 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21075 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; 21076 ucmd_buf.uscsi_timeout = 60; 21077 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21078 UIO_SYSSPACE, path_flag); 21079 21080 switch (status) { 21081 case 0: 21082 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21083 break; /* Success! */ 21084 case EIO: 21085 switch (ucmd_buf.uscsi_status) { 21086 case STATUS_RESERVATION_CONFLICT: 21087 status = EACCES; 21088 break; 21089 default: 21090 break; 21091 } 21092 break; 21093 default: 21094 break; 21095 } 21096 21097 if (status == 0) { 21098 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data", 21099 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21100 } 21101 21102 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n"); 21103 21104 return (status); 21105 } 21106 21107 21108 /* 21109 * Function: sd_send_scsi_LOG_SENSE 21110 * 21111 * Description: Issue a scsi LOG_SENSE command with the given parameters. 21112 * 21113 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 21114 * structure for this target. 21115 * 21116 * Return Code: 0 - Success 21117 * errno return code from sd_ssc_send() 21118 * 21119 * Context: Can sleep. Does not return until command is completed. 21120 */ 21121 21122 static int 21123 sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr, uint16_t buflen, 21124 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, 21125 int path_flag) 21126 21127 { 21128 struct scsi_extended_sense sense_buf; 21129 union scsi_cdb cdb; 21130 struct uscsi_cmd ucmd_buf; 21131 int status; 21132 struct sd_lun *un; 21133 21134 ASSERT(ssc != NULL); 21135 un = ssc->ssc_un; 21136 ASSERT(un != NULL); 21137 ASSERT(!mutex_owned(SD_MUTEX(un))); 21138 21139 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un); 21140 21141 bzero(&cdb, sizeof (cdb)); 21142 bzero(&ucmd_buf, sizeof (ucmd_buf)); 21143 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 21144 21145 cdb.scc_cmd = SCMD_LOG_SENSE_G1; 21146 cdb.cdb_opaque[2] = (page_control << 6) | page_code; 21147 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8); 21148 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF); 21149 FORMG1COUNT(&cdb, buflen); 21150 21151 ucmd_buf.uscsi_cdb = (char *)&cdb; 21152 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 21153 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 21154 ucmd_buf.uscsi_buflen = buflen; 21155 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 21156 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 21157 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 21158 ucmd_buf.uscsi_timeout = 60; 21159 21160 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, 21161 UIO_SYSSPACE, path_flag); 21162 21163 switch (status) { 21164 case 0: 21165 break; 21166 case EIO: 21167 switch (ucmd_buf.uscsi_status) { 21168 case STATUS_RESERVATION_CONFLICT: 21169 status = EACCES; 21170 break; 21171 case STATUS_CHECK: 21172 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 21173 (scsi_sense_key((uint8_t *)&sense_buf) == 21174 KEY_ILLEGAL_REQUEST) && 21175 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x24)) { 21176 /* 21177 * ASC 0x24: INVALID FIELD IN CDB 21178 */ 21179 switch (page_code) { 21180 case START_STOP_CYCLE_PAGE: 21181 /* 21182 * The start stop cycle counter is 21183 * implemented as page 0x31 in earlier 21184 * generation disks. In new generation 21185 * disks the start stop cycle counter is 21186 * implemented as page 0xE. To properly 21187 * handle this case if an attempt for 21188 * log page 0xE is made and fails we 21189 * will try again using page 0x31. 21190 * 21191 * Network storage BU committed to 21192 * maintain the page 0x31 for this 21193 * purpose and will not have any other 21194 * page implemented with page code 0x31 21195 * until all disks transition to the 21196 * standard page. 21197 */ 21198 mutex_enter(SD_MUTEX(un)); 21199 un->un_start_stop_cycle_page = 21200 START_STOP_CYCLE_VU_PAGE; 21201 cdb.cdb_opaque[2] = 21202 (char)(page_control << 6) | 21203 un->un_start_stop_cycle_page; 21204 mutex_exit(SD_MUTEX(un)); 21205 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 21206 status = sd_ssc_send( 21207 ssc, &ucmd_buf, FKIOCTL, 21208 UIO_SYSSPACE, path_flag); 21209 21210 break; 21211 case TEMPERATURE_PAGE: 21212 status = ENOTTY; 21213 break; 21214 default: 21215 break; 21216 } 21217 } 21218 break; 21219 default: 21220 break; 21221 } 21222 break; 21223 default: 21224 break; 21225 } 21226 21227 if (status == 0) { 21228 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21229 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data", 21230 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 21231 } 21232 21233 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n"); 21234 21235 return (status); 21236 } 21237 21238 21239 /* 21240 * Function: sdioctl 21241 * 21242 * Description: Driver's ioctl(9e) entry point function. 21243 * 21244 * Arguments: dev - device number 21245 * cmd - ioctl operation to be performed 21246 * arg - user argument, contains data to be set or reference 21247 * parameter for get 21248 * flag - bit flag, indicating open settings, 32/64 bit type 21249 * cred_p - user credential pointer 21250 * rval_p - calling process return value (OPT) 21251 * 21252 * Return Code: EINVAL 21253 * ENOTTY 21254 * ENXIO 21255 * EIO 21256 * EFAULT 21257 * ENOTSUP 21258 * EPERM 21259 * 21260 * Context: Called from the device switch at normal priority. 21261 */ 21262 21263 static int 21264 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) 21265 { 21266 struct sd_lun *un = NULL; 21267 int err = 0; 21268 int i = 0; 21269 cred_t *cr; 21270 int tmprval = EINVAL; 21271 int is_valid; 21272 sd_ssc_t *ssc; 21273 21274 /* 21275 * All device accesses go thru sdstrategy where we check on suspend 21276 * status 21277 */ 21278 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21279 return (ENXIO); 21280 } 21281 21282 ASSERT(!mutex_owned(SD_MUTEX(un))); 21283 21284 /* Initialize sd_ssc_t for internal uscsi commands */ 21285 ssc = sd_ssc_init(un); 21286 21287 is_valid = SD_IS_VALID_LABEL(un); 21288 21289 /* 21290 * Moved this wait from sd_uscsi_strategy to here for 21291 * reasons of deadlock prevention. Internal driver commands, 21292 * specifically those to change a devices power level, result 21293 * in a call to sd_uscsi_strategy. 21294 */ 21295 mutex_enter(SD_MUTEX(un)); 21296 while ((un->un_state == SD_STATE_SUSPENDED) || 21297 (un->un_state == SD_STATE_PM_CHANGING)) { 21298 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 21299 } 21300 /* 21301 * Twiddling the counter here protects commands from now 21302 * through to the top of sd_uscsi_strategy. Without the 21303 * counter inc. a power down, for example, could get in 21304 * after the above check for state is made and before 21305 * execution gets to the top of sd_uscsi_strategy. 21306 * That would cause problems. 21307 */ 21308 un->un_ncmds_in_driver++; 21309 21310 if (!is_valid && 21311 (flag & (FNDELAY | FNONBLOCK))) { 21312 switch (cmd) { 21313 case DKIOCGGEOM: /* SD_PATH_DIRECT */ 21314 case DKIOCGVTOC: 21315 case DKIOCGEXTVTOC: 21316 case DKIOCGAPART: 21317 case DKIOCPARTINFO: 21318 case DKIOCEXTPARTINFO: 21319 case DKIOCSGEOM: 21320 case DKIOCSAPART: 21321 case DKIOCGETEFI: 21322 case DKIOCPARTITION: 21323 case DKIOCSVTOC: 21324 case DKIOCSEXTVTOC: 21325 case DKIOCSETEFI: 21326 case DKIOCGMBOOT: 21327 case DKIOCSMBOOT: 21328 case DKIOCG_PHYGEOM: 21329 case DKIOCG_VIRTGEOM: 21330 /* let cmlb handle it */ 21331 goto skip_ready_valid; 21332 21333 case CDROMPAUSE: 21334 case CDROMRESUME: 21335 case CDROMPLAYMSF: 21336 case CDROMPLAYTRKIND: 21337 case CDROMREADTOCHDR: 21338 case CDROMREADTOCENTRY: 21339 case CDROMSTOP: 21340 case CDROMSTART: 21341 case CDROMVOLCTRL: 21342 case CDROMSUBCHNL: 21343 case CDROMREADMODE2: 21344 case CDROMREADMODE1: 21345 case CDROMREADOFFSET: 21346 case CDROMSBLKMODE: 21347 case CDROMGBLKMODE: 21348 case CDROMGDRVSPEED: 21349 case CDROMSDRVSPEED: 21350 case CDROMCDDA: 21351 case CDROMCDXA: 21352 case CDROMSUBCODE: 21353 if (!ISCD(un)) { 21354 un->un_ncmds_in_driver--; 21355 ASSERT(un->un_ncmds_in_driver >= 0); 21356 mutex_exit(SD_MUTEX(un)); 21357 err = ENOTTY; 21358 goto done_without_assess; 21359 } 21360 break; 21361 case FDEJECT: 21362 case DKIOCEJECT: 21363 case CDROMEJECT: 21364 if (!un->un_f_eject_media_supported) { 21365 un->un_ncmds_in_driver--; 21366 ASSERT(un->un_ncmds_in_driver >= 0); 21367 mutex_exit(SD_MUTEX(un)); 21368 err = ENOTTY; 21369 goto done_without_assess; 21370 } 21371 break; 21372 case DKIOCFLUSHWRITECACHE: 21373 mutex_exit(SD_MUTEX(un)); 21374 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 21375 if (err != 0) { 21376 mutex_enter(SD_MUTEX(un)); 21377 un->un_ncmds_in_driver--; 21378 ASSERT(un->un_ncmds_in_driver >= 0); 21379 mutex_exit(SD_MUTEX(un)); 21380 err = EIO; 21381 goto done_quick_assess; 21382 } 21383 mutex_enter(SD_MUTEX(un)); 21384 /* FALLTHROUGH */ 21385 case DKIOCREMOVABLE: 21386 case DKIOCHOTPLUGGABLE: 21387 case DKIOCINFO: 21388 case DKIOCGMEDIAINFO: 21389 case MHIOCENFAILFAST: 21390 case MHIOCSTATUS: 21391 case MHIOCTKOWN: 21392 case MHIOCRELEASE: 21393 case MHIOCGRP_INKEYS: 21394 case MHIOCGRP_INRESV: 21395 case MHIOCGRP_REGISTER: 21396 case MHIOCGRP_RESERVE: 21397 case MHIOCGRP_PREEMPTANDABORT: 21398 case MHIOCGRP_REGISTERANDIGNOREKEY: 21399 case CDROMCLOSETRAY: 21400 case USCSICMD: 21401 goto skip_ready_valid; 21402 default: 21403 break; 21404 } 21405 21406 mutex_exit(SD_MUTEX(un)); 21407 err = sd_ready_and_valid(ssc, SDPART(dev)); 21408 mutex_enter(SD_MUTEX(un)); 21409 21410 if (err != SD_READY_VALID) { 21411 switch (cmd) { 21412 case DKIOCSTATE: 21413 case CDROMGDRVSPEED: 21414 case CDROMSDRVSPEED: 21415 case FDEJECT: /* for eject command */ 21416 case DKIOCEJECT: 21417 case CDROMEJECT: 21418 case DKIOCREMOVABLE: 21419 case DKIOCHOTPLUGGABLE: 21420 break; 21421 default: 21422 if (un->un_f_has_removable_media) { 21423 err = ENXIO; 21424 } else { 21425 /* Do not map SD_RESERVED_BY_OTHERS to EIO */ 21426 if (err == SD_RESERVED_BY_OTHERS) { 21427 err = EACCES; 21428 } else { 21429 err = EIO; 21430 } 21431 } 21432 un->un_ncmds_in_driver--; 21433 ASSERT(un->un_ncmds_in_driver >= 0); 21434 mutex_exit(SD_MUTEX(un)); 21435 21436 goto done_without_assess; 21437 } 21438 } 21439 } 21440 21441 skip_ready_valid: 21442 mutex_exit(SD_MUTEX(un)); 21443 21444 switch (cmd) { 21445 case DKIOCINFO: 21446 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n"); 21447 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag); 21448 break; 21449 21450 case DKIOCGMEDIAINFO: 21451 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n"); 21452 err = sd_get_media_info(dev, (caddr_t)arg, flag); 21453 break; 21454 21455 case DKIOCGGEOM: 21456 case DKIOCGVTOC: 21457 case DKIOCGEXTVTOC: 21458 case DKIOCGAPART: 21459 case DKIOCPARTINFO: 21460 case DKIOCEXTPARTINFO: 21461 case DKIOCSGEOM: 21462 case DKIOCSAPART: 21463 case DKIOCGETEFI: 21464 case DKIOCPARTITION: 21465 case DKIOCSVTOC: 21466 case DKIOCSEXTVTOC: 21467 case DKIOCSETEFI: 21468 case DKIOCGMBOOT: 21469 case DKIOCSMBOOT: 21470 case DKIOCG_PHYGEOM: 21471 case DKIOCG_VIRTGEOM: 21472 SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd); 21473 21474 /* TUR should spin up */ 21475 21476 if (un->un_f_has_removable_media) 21477 err = sd_send_scsi_TEST_UNIT_READY(ssc, 21478 SD_CHECK_FOR_MEDIA); 21479 21480 else 21481 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 21482 21483 if (err != 0) 21484 goto done_with_assess; 21485 21486 err = cmlb_ioctl(un->un_cmlbhandle, dev, 21487 cmd, arg, flag, cred_p, rval_p, (void *)SD_PATH_DIRECT); 21488 21489 if ((err == 0) && 21490 ((cmd == DKIOCSETEFI) || 21491 (un->un_f_pkstats_enabled) && 21492 (cmd == DKIOCSAPART || cmd == DKIOCSVTOC || 21493 cmd == DKIOCSEXTVTOC))) { 21494 21495 tmprval = cmlb_validate(un->un_cmlbhandle, CMLB_SILENT, 21496 (void *)SD_PATH_DIRECT); 21497 if ((tmprval == 0) && un->un_f_pkstats_enabled) { 21498 sd_set_pstats(un); 21499 SD_TRACE(SD_LOG_IO_PARTITION, un, 21500 "sd_ioctl: un:0x%p pstats created and " 21501 "set\n", un); 21502 } 21503 } 21504 21505 if ((cmd == DKIOCSVTOC || cmd == DKIOCSEXTVTOC) || 21506 ((cmd == DKIOCSETEFI) && (tmprval == 0))) { 21507 21508 mutex_enter(SD_MUTEX(un)); 21509 if (un->un_f_devid_supported && 21510 (un->un_f_opt_fab_devid == TRUE)) { 21511 if (un->un_devid == NULL) { 21512 sd_register_devid(ssc, SD_DEVINFO(un), 21513 SD_TARGET_IS_UNRESERVED); 21514 } else { 21515 /* 21516 * The device id for this disk 21517 * has been fabricated. The 21518 * device id must be preserved 21519 * by writing it back out to 21520 * disk. 21521 */ 21522 if (sd_write_deviceid(ssc) != 0) { 21523 ddi_devid_free(un->un_devid); 21524 un->un_devid = NULL; 21525 } 21526 } 21527 } 21528 mutex_exit(SD_MUTEX(un)); 21529 } 21530 21531 break; 21532 21533 case DKIOCLOCK: 21534 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n"); 21535 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 21536 SD_PATH_STANDARD); 21537 goto done_with_assess; 21538 21539 case DKIOCUNLOCK: 21540 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n"); 21541 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW, 21542 SD_PATH_STANDARD); 21543 goto done_with_assess; 21544 21545 case DKIOCSTATE: { 21546 enum dkio_state state; 21547 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n"); 21548 21549 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) { 21550 err = EFAULT; 21551 } else { 21552 err = sd_check_media(dev, state); 21553 if (err == 0) { 21554 if (ddi_copyout(&un->un_mediastate, (void *)arg, 21555 sizeof (int), flag) != 0) 21556 err = EFAULT; 21557 } 21558 } 21559 break; 21560 } 21561 21562 case DKIOCREMOVABLE: 21563 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n"); 21564 i = un->un_f_has_removable_media ? 1 : 0; 21565 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 21566 err = EFAULT; 21567 } else { 21568 err = 0; 21569 } 21570 break; 21571 21572 case DKIOCHOTPLUGGABLE: 21573 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCHOTPLUGGABLE\n"); 21574 i = un->un_f_is_hotpluggable ? 1 : 0; 21575 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 21576 err = EFAULT; 21577 } else { 21578 err = 0; 21579 } 21580 break; 21581 21582 case DKIOCGTEMPERATURE: 21583 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n"); 21584 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag); 21585 break; 21586 21587 case MHIOCENFAILFAST: 21588 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n"); 21589 if ((err = drv_priv(cred_p)) == 0) { 21590 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag); 21591 } 21592 break; 21593 21594 case MHIOCTKOWN: 21595 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n"); 21596 if ((err = drv_priv(cred_p)) == 0) { 21597 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag); 21598 } 21599 break; 21600 21601 case MHIOCRELEASE: 21602 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n"); 21603 if ((err = drv_priv(cred_p)) == 0) { 21604 err = sd_mhdioc_release(dev); 21605 } 21606 break; 21607 21608 case MHIOCSTATUS: 21609 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n"); 21610 if ((err = drv_priv(cred_p)) == 0) { 21611 switch (sd_send_scsi_TEST_UNIT_READY(ssc, 0)) { 21612 case 0: 21613 err = 0; 21614 break; 21615 case EACCES: 21616 *rval_p = 1; 21617 err = 0; 21618 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 21619 break; 21620 default: 21621 err = EIO; 21622 goto done_with_assess; 21623 } 21624 } 21625 break; 21626 21627 case MHIOCQRESERVE: 21628 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n"); 21629 if ((err = drv_priv(cred_p)) == 0) { 21630 err = sd_reserve_release(dev, SD_RESERVE); 21631 } 21632 break; 21633 21634 case MHIOCREREGISTERDEVID: 21635 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n"); 21636 if (drv_priv(cred_p) == EPERM) { 21637 err = EPERM; 21638 } else if (!un->un_f_devid_supported) { 21639 err = ENOTTY; 21640 } else { 21641 err = sd_mhdioc_register_devid(dev); 21642 } 21643 break; 21644 21645 case MHIOCGRP_INKEYS: 21646 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n"); 21647 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 21648 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21649 err = ENOTSUP; 21650 } else { 21651 err = sd_mhdioc_inkeys(dev, (caddr_t)arg, 21652 flag); 21653 } 21654 } 21655 break; 21656 21657 case MHIOCGRP_INRESV: 21658 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n"); 21659 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 21660 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21661 err = ENOTSUP; 21662 } else { 21663 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag); 21664 } 21665 } 21666 break; 21667 21668 case MHIOCGRP_REGISTER: 21669 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n"); 21670 if ((err = drv_priv(cred_p)) != EPERM) { 21671 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21672 err = ENOTSUP; 21673 } else if (arg != NULL) { 21674 mhioc_register_t reg; 21675 if (ddi_copyin((void *)arg, ®, 21676 sizeof (mhioc_register_t), flag) != 0) { 21677 err = EFAULT; 21678 } else { 21679 err = 21680 sd_send_scsi_PERSISTENT_RESERVE_OUT( 21681 ssc, SD_SCSI3_REGISTER, 21682 (uchar_t *)®); 21683 if (err != 0) 21684 goto done_with_assess; 21685 } 21686 } 21687 } 21688 break; 21689 21690 case MHIOCGRP_RESERVE: 21691 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n"); 21692 if ((err = drv_priv(cred_p)) != EPERM) { 21693 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21694 err = ENOTSUP; 21695 } else if (arg != NULL) { 21696 mhioc_resv_desc_t resv_desc; 21697 if (ddi_copyin((void *)arg, &resv_desc, 21698 sizeof (mhioc_resv_desc_t), flag) != 0) { 21699 err = EFAULT; 21700 } else { 21701 err = 21702 sd_send_scsi_PERSISTENT_RESERVE_OUT( 21703 ssc, SD_SCSI3_RESERVE, 21704 (uchar_t *)&resv_desc); 21705 if (err != 0) 21706 goto done_with_assess; 21707 } 21708 } 21709 } 21710 break; 21711 21712 case MHIOCGRP_PREEMPTANDABORT: 21713 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 21714 if ((err = drv_priv(cred_p)) != EPERM) { 21715 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21716 err = ENOTSUP; 21717 } else if (arg != NULL) { 21718 mhioc_preemptandabort_t preempt_abort; 21719 if (ddi_copyin((void *)arg, &preempt_abort, 21720 sizeof (mhioc_preemptandabort_t), 21721 flag) != 0) { 21722 err = EFAULT; 21723 } else { 21724 err = 21725 sd_send_scsi_PERSISTENT_RESERVE_OUT( 21726 ssc, SD_SCSI3_PREEMPTANDABORT, 21727 (uchar_t *)&preempt_abort); 21728 if (err != 0) 21729 goto done_with_assess; 21730 } 21731 } 21732 } 21733 break; 21734 21735 case MHIOCGRP_REGISTERANDIGNOREKEY: 21736 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTERANDIGNOREKEY\n"); 21737 if ((err = drv_priv(cred_p)) != EPERM) { 21738 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21739 err = ENOTSUP; 21740 } else if (arg != NULL) { 21741 mhioc_registerandignorekey_t r_and_i; 21742 if (ddi_copyin((void *)arg, (void *)&r_and_i, 21743 sizeof (mhioc_registerandignorekey_t), 21744 flag) != 0) { 21745 err = EFAULT; 21746 } else { 21747 err = 21748 sd_send_scsi_PERSISTENT_RESERVE_OUT( 21749 ssc, SD_SCSI3_REGISTERANDIGNOREKEY, 21750 (uchar_t *)&r_and_i); 21751 if (err != 0) 21752 goto done_with_assess; 21753 } 21754 } 21755 } 21756 break; 21757 21758 case USCSICMD: 21759 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n"); 21760 cr = ddi_get_cred(); 21761 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 21762 err = EPERM; 21763 } else { 21764 enum uio_seg uioseg; 21765 21766 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE : 21767 UIO_USERSPACE; 21768 if (un->un_f_format_in_progress == TRUE) { 21769 err = EAGAIN; 21770 break; 21771 } 21772 21773 err = sd_ssc_send(ssc, 21774 (struct uscsi_cmd *)arg, 21775 flag, uioseg, SD_PATH_STANDARD); 21776 if (err != 0) 21777 goto done_with_assess; 21778 else 21779 sd_ssc_assessment(ssc, SD_FMT_STANDARD); 21780 } 21781 break; 21782 21783 case CDROMPAUSE: 21784 case CDROMRESUME: 21785 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n"); 21786 if (!ISCD(un)) { 21787 err = ENOTTY; 21788 } else { 21789 err = sr_pause_resume(dev, cmd); 21790 } 21791 break; 21792 21793 case CDROMPLAYMSF: 21794 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n"); 21795 if (!ISCD(un)) { 21796 err = ENOTTY; 21797 } else { 21798 err = sr_play_msf(dev, (caddr_t)arg, flag); 21799 } 21800 break; 21801 21802 case CDROMPLAYTRKIND: 21803 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n"); 21804 #if defined(__i386) || defined(__amd64) 21805 /* 21806 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead 21807 */ 21808 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 21809 #else 21810 if (!ISCD(un)) { 21811 #endif 21812 err = ENOTTY; 21813 } else { 21814 err = sr_play_trkind(dev, (caddr_t)arg, flag); 21815 } 21816 break; 21817 21818 case CDROMREADTOCHDR: 21819 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n"); 21820 if (!ISCD(un)) { 21821 err = ENOTTY; 21822 } else { 21823 err = sr_read_tochdr(dev, (caddr_t)arg, flag); 21824 } 21825 break; 21826 21827 case CDROMREADTOCENTRY: 21828 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n"); 21829 if (!ISCD(un)) { 21830 err = ENOTTY; 21831 } else { 21832 err = sr_read_tocentry(dev, (caddr_t)arg, flag); 21833 } 21834 break; 21835 21836 case CDROMSTOP: 21837 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n"); 21838 if (!ISCD(un)) { 21839 err = ENOTTY; 21840 } else { 21841 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_STOP, 21842 SD_PATH_STANDARD); 21843 goto done_with_assess; 21844 } 21845 break; 21846 21847 case CDROMSTART: 21848 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n"); 21849 if (!ISCD(un)) { 21850 err = ENOTTY; 21851 } else { 21852 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_START, 21853 SD_PATH_STANDARD); 21854 goto done_with_assess; 21855 } 21856 break; 21857 21858 case CDROMCLOSETRAY: 21859 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n"); 21860 if (!ISCD(un)) { 21861 err = ENOTTY; 21862 } else { 21863 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_CLOSE, 21864 SD_PATH_STANDARD); 21865 goto done_with_assess; 21866 } 21867 break; 21868 21869 case FDEJECT: /* for eject command */ 21870 case DKIOCEJECT: 21871 case CDROMEJECT: 21872 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n"); 21873 if (!un->un_f_eject_media_supported) { 21874 err = ENOTTY; 21875 } else { 21876 err = sr_eject(dev); 21877 } 21878 break; 21879 21880 case CDROMVOLCTRL: 21881 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n"); 21882 if (!ISCD(un)) { 21883 err = ENOTTY; 21884 } else { 21885 err = sr_volume_ctrl(dev, (caddr_t)arg, flag); 21886 } 21887 break; 21888 21889 case CDROMSUBCHNL: 21890 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n"); 21891 if (!ISCD(un)) { 21892 err = ENOTTY; 21893 } else { 21894 err = sr_read_subchannel(dev, (caddr_t)arg, flag); 21895 } 21896 break; 21897 21898 case CDROMREADMODE2: 21899 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n"); 21900 if (!ISCD(un)) { 21901 err = ENOTTY; 21902 } else if (un->un_f_cfg_is_atapi == TRUE) { 21903 /* 21904 * If the drive supports READ CD, use that instead of 21905 * switching the LBA size via a MODE SELECT 21906 * Block Descriptor 21907 */ 21908 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag); 21909 } else { 21910 err = sr_read_mode2(dev, (caddr_t)arg, flag); 21911 } 21912 break; 21913 21914 case CDROMREADMODE1: 21915 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n"); 21916 if (!ISCD(un)) { 21917 err = ENOTTY; 21918 } else { 21919 err = sr_read_mode1(dev, (caddr_t)arg, flag); 21920 } 21921 break; 21922 21923 case CDROMREADOFFSET: 21924 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n"); 21925 if (!ISCD(un)) { 21926 err = ENOTTY; 21927 } else { 21928 err = sr_read_sony_session_offset(dev, (caddr_t)arg, 21929 flag); 21930 } 21931 break; 21932 21933 case CDROMSBLKMODE: 21934 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n"); 21935 /* 21936 * There is no means of changing block size in case of atapi 21937 * drives, thus return ENOTTY if drive type is atapi 21938 */ 21939 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 21940 err = ENOTTY; 21941 } else if (un->un_f_mmc_cap == TRUE) { 21942 21943 /* 21944 * MMC Devices do not support changing the 21945 * logical block size 21946 * 21947 * Note: EINVAL is being returned instead of ENOTTY to 21948 * maintain consistancy with the original mmc 21949 * driver update. 21950 */ 21951 err = EINVAL; 21952 } else { 21953 mutex_enter(SD_MUTEX(un)); 21954 if ((!(un->un_exclopen & (1<<SDPART(dev)))) || 21955 (un->un_ncmds_in_transport > 0)) { 21956 mutex_exit(SD_MUTEX(un)); 21957 err = EINVAL; 21958 } else { 21959 mutex_exit(SD_MUTEX(un)); 21960 err = sr_change_blkmode(dev, cmd, arg, flag); 21961 } 21962 } 21963 break; 21964 21965 case CDROMGBLKMODE: 21966 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n"); 21967 if (!ISCD(un)) { 21968 err = ENOTTY; 21969 } else if ((un->un_f_cfg_is_atapi != FALSE) && 21970 (un->un_f_blockcount_is_valid != FALSE)) { 21971 /* 21972 * Drive is an ATAPI drive so return target block 21973 * size for ATAPI drives since we cannot change the 21974 * blocksize on ATAPI drives. Used primarily to detect 21975 * if an ATAPI cdrom is present. 21976 */ 21977 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg, 21978 sizeof (int), flag) != 0) { 21979 err = EFAULT; 21980 } else { 21981 err = 0; 21982 } 21983 21984 } else { 21985 /* 21986 * Drive supports changing block sizes via a Mode 21987 * Select. 21988 */ 21989 err = sr_change_blkmode(dev, cmd, arg, flag); 21990 } 21991 break; 21992 21993 case CDROMGDRVSPEED: 21994 case CDROMSDRVSPEED: 21995 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n"); 21996 if (!ISCD(un)) { 21997 err = ENOTTY; 21998 } else if (un->un_f_mmc_cap == TRUE) { 21999 /* 22000 * Note: In the future the driver implementation 22001 * for getting and 22002 * setting cd speed should entail: 22003 * 1) If non-mmc try the Toshiba mode page 22004 * (sr_change_speed) 22005 * 2) If mmc but no support for Real Time Streaming try 22006 * the SET CD SPEED (0xBB) command 22007 * (sr_atapi_change_speed) 22008 * 3) If mmc and support for Real Time Streaming 22009 * try the GET PERFORMANCE and SET STREAMING 22010 * commands (not yet implemented, 4380808) 22011 */ 22012 /* 22013 * As per recent MMC spec, CD-ROM speed is variable 22014 * and changes with LBA. Since there is no such 22015 * things as drive speed now, fail this ioctl. 22016 * 22017 * Note: EINVAL is returned for consistancy of original 22018 * implementation which included support for getting 22019 * the drive speed of mmc devices but not setting 22020 * the drive speed. Thus EINVAL would be returned 22021 * if a set request was made for an mmc device. 22022 * We no longer support get or set speed for 22023 * mmc but need to remain consistent with regard 22024 * to the error code returned. 22025 */ 22026 err = EINVAL; 22027 } else if (un->un_f_cfg_is_atapi == TRUE) { 22028 err = sr_atapi_change_speed(dev, cmd, arg, flag); 22029 } else { 22030 err = sr_change_speed(dev, cmd, arg, flag); 22031 } 22032 break; 22033 22034 case CDROMCDDA: 22035 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n"); 22036 if (!ISCD(un)) { 22037 err = ENOTTY; 22038 } else { 22039 err = sr_read_cdda(dev, (void *)arg, flag); 22040 } 22041 break; 22042 22043 case CDROMCDXA: 22044 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n"); 22045 if (!ISCD(un)) { 22046 err = ENOTTY; 22047 } else { 22048 err = sr_read_cdxa(dev, (caddr_t)arg, flag); 22049 } 22050 break; 22051 22052 case CDROMSUBCODE: 22053 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n"); 22054 if (!ISCD(un)) { 22055 err = ENOTTY; 22056 } else { 22057 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag); 22058 } 22059 break; 22060 22061 22062 #ifdef SDDEBUG 22063 /* RESET/ABORTS testing ioctls */ 22064 case DKIOCRESET: { 22065 int reset_level; 22066 22067 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) { 22068 err = EFAULT; 22069 } else { 22070 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: " 22071 "reset_level = 0x%lx\n", reset_level); 22072 if (scsi_reset(SD_ADDRESS(un), reset_level)) { 22073 err = 0; 22074 } else { 22075 err = EIO; 22076 } 22077 } 22078 break; 22079 } 22080 22081 case DKIOCABORT: 22082 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n"); 22083 if (scsi_abort(SD_ADDRESS(un), NULL)) { 22084 err = 0; 22085 } else { 22086 err = EIO; 22087 } 22088 break; 22089 #endif 22090 22091 #ifdef SD_FAULT_INJECTION 22092 /* SDIOC FaultInjection testing ioctls */ 22093 case SDIOCSTART: 22094 case SDIOCSTOP: 22095 case SDIOCINSERTPKT: 22096 case SDIOCINSERTXB: 22097 case SDIOCINSERTUN: 22098 case SDIOCINSERTARQ: 22099 case SDIOCPUSH: 22100 case SDIOCRETRIEVE: 22101 case SDIOCRUN: 22102 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" 22103 "SDIOC detected cmd:0x%X:\n", cmd); 22104 /* call error generator */ 22105 sd_faultinjection_ioctl(cmd, arg, un); 22106 err = 0; 22107 break; 22108 22109 #endif /* SD_FAULT_INJECTION */ 22110 22111 case DKIOCFLUSHWRITECACHE: 22112 { 22113 struct dk_callback *dkc = (struct dk_callback *)arg; 22114 22115 mutex_enter(SD_MUTEX(un)); 22116 if (!un->un_f_sync_cache_supported || 22117 !un->un_f_write_cache_enabled) { 22118 err = un->un_f_sync_cache_supported ? 22119 0 : ENOTSUP; 22120 mutex_exit(SD_MUTEX(un)); 22121 if ((flag & FKIOCTL) && dkc != NULL && 22122 dkc->dkc_callback != NULL) { 22123 (*dkc->dkc_callback)(dkc->dkc_cookie, 22124 err); 22125 /* 22126 * Did callback and reported error. 22127 * Since we did a callback, ioctl 22128 * should return 0. 22129 */ 22130 err = 0; 22131 } 22132 break; 22133 } 22134 mutex_exit(SD_MUTEX(un)); 22135 22136 if ((flag & FKIOCTL) && dkc != NULL && 22137 dkc->dkc_callback != NULL) { 22138 /* async SYNC CACHE request */ 22139 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc); 22140 } else { 22141 /* synchronous SYNC CACHE request */ 22142 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL); 22143 } 22144 } 22145 break; 22146 22147 case DKIOCGETWCE: { 22148 22149 int wce; 22150 22151 if ((err = sd_get_write_cache_enabled(ssc, &wce)) != 0) { 22152 break; 22153 } 22154 22155 if (ddi_copyout(&wce, (void *)arg, sizeof (wce), flag)) { 22156 err = EFAULT; 22157 } 22158 break; 22159 } 22160 22161 case DKIOCSETWCE: { 22162 22163 int wce, sync_supported; 22164 22165 if (ddi_copyin((void *)arg, &wce, sizeof (wce), flag)) { 22166 err = EFAULT; 22167 break; 22168 } 22169 22170 /* 22171 * Synchronize multiple threads trying to enable 22172 * or disable the cache via the un_f_wcc_cv 22173 * condition variable. 22174 */ 22175 mutex_enter(SD_MUTEX(un)); 22176 22177 /* 22178 * Don't allow the cache to be enabled if the 22179 * config file has it disabled. 22180 */ 22181 if (un->un_f_opt_disable_cache && wce) { 22182 mutex_exit(SD_MUTEX(un)); 22183 err = EINVAL; 22184 break; 22185 } 22186 22187 /* 22188 * Wait for write cache change in progress 22189 * bit to be clear before proceeding. 22190 */ 22191 while (un->un_f_wcc_inprog) 22192 cv_wait(&un->un_wcc_cv, SD_MUTEX(un)); 22193 22194 un->un_f_wcc_inprog = 1; 22195 22196 if (un->un_f_write_cache_enabled && wce == 0) { 22197 /* 22198 * Disable the write cache. Don't clear 22199 * un_f_write_cache_enabled until after 22200 * the mode select and flush are complete. 22201 */ 22202 sync_supported = un->un_f_sync_cache_supported; 22203 22204 /* 22205 * If cache flush is suppressed, we assume that the 22206 * controller firmware will take care of managing the 22207 * write cache for us: no need to explicitly 22208 * disable it. 22209 */ 22210 if (!un->un_f_suppress_cache_flush) { 22211 mutex_exit(SD_MUTEX(un)); 22212 if ((err = sd_cache_control(ssc, 22213 SD_CACHE_NOCHANGE, 22214 SD_CACHE_DISABLE)) == 0 && 22215 sync_supported) { 22216 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, 22217 NULL); 22218 } 22219 } else { 22220 mutex_exit(SD_MUTEX(un)); 22221 } 22222 22223 mutex_enter(SD_MUTEX(un)); 22224 if (err == 0) { 22225 un->un_f_write_cache_enabled = 0; 22226 } 22227 22228 } else if (!un->un_f_write_cache_enabled && wce != 0) { 22229 /* 22230 * Set un_f_write_cache_enabled first, so there is 22231 * no window where the cache is enabled, but the 22232 * bit says it isn't. 22233 */ 22234 un->un_f_write_cache_enabled = 1; 22235 22236 /* 22237 * If cache flush is suppressed, we assume that the 22238 * controller firmware will take care of managing the 22239 * write cache for us: no need to explicitly 22240 * enable it. 22241 */ 22242 if (!un->un_f_suppress_cache_flush) { 22243 mutex_exit(SD_MUTEX(un)); 22244 err = sd_cache_control(ssc, SD_CACHE_NOCHANGE, 22245 SD_CACHE_ENABLE); 22246 } else { 22247 mutex_exit(SD_MUTEX(un)); 22248 } 22249 22250 mutex_enter(SD_MUTEX(un)); 22251 22252 if (err) { 22253 un->un_f_write_cache_enabled = 0; 22254 } 22255 } 22256 22257 un->un_f_wcc_inprog = 0; 22258 cv_broadcast(&un->un_wcc_cv); 22259 mutex_exit(SD_MUTEX(un)); 22260 break; 22261 } 22262 22263 default: 22264 err = ENOTTY; 22265 break; 22266 } 22267 mutex_enter(SD_MUTEX(un)); 22268 un->un_ncmds_in_driver--; 22269 ASSERT(un->un_ncmds_in_driver >= 0); 22270 mutex_exit(SD_MUTEX(un)); 22271 22272 22273 done_without_assess: 22274 sd_ssc_fini(ssc); 22275 22276 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 22277 return (err); 22278 22279 done_with_assess: 22280 mutex_enter(SD_MUTEX(un)); 22281 un->un_ncmds_in_driver--; 22282 ASSERT(un->un_ncmds_in_driver >= 0); 22283 mutex_exit(SD_MUTEX(un)); 22284 22285 done_quick_assess: 22286 if (err != 0) 22287 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22288 /* Uninitialize sd_ssc_t pointer */ 22289 sd_ssc_fini(ssc); 22290 22291 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 22292 return (err); 22293 } 22294 22295 22296 /* 22297 * Function: sd_dkio_ctrl_info 22298 * 22299 * Description: This routine is the driver entry point for handling controller 22300 * information ioctl requests (DKIOCINFO). 22301 * 22302 * Arguments: dev - the device number 22303 * arg - pointer to user provided dk_cinfo structure 22304 * specifying the controller type and attributes. 22305 * flag - this argument is a pass through to ddi_copyxxx() 22306 * directly from the mode argument of ioctl(). 22307 * 22308 * Return Code: 0 22309 * EFAULT 22310 * ENXIO 22311 */ 22312 22313 static int 22314 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag) 22315 { 22316 struct sd_lun *un = NULL; 22317 struct dk_cinfo *info; 22318 dev_info_t *pdip; 22319 int lun, tgt; 22320 22321 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22322 return (ENXIO); 22323 } 22324 22325 info = (struct dk_cinfo *) 22326 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); 22327 22328 switch (un->un_ctype) { 22329 case CTYPE_CDROM: 22330 info->dki_ctype = DKC_CDROM; 22331 break; 22332 default: 22333 info->dki_ctype = DKC_SCSI_CCS; 22334 break; 22335 } 22336 pdip = ddi_get_parent(SD_DEVINFO(un)); 22337 info->dki_cnum = ddi_get_instance(pdip); 22338 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) { 22339 (void) strcpy(info->dki_cname, ddi_get_name(pdip)); 22340 } else { 22341 (void) strncpy(info->dki_cname, ddi_node_name(pdip), 22342 DK_DEVLEN - 1); 22343 } 22344 22345 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 22346 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0); 22347 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 22348 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0); 22349 22350 /* Unit Information */ 22351 info->dki_unit = ddi_get_instance(SD_DEVINFO(un)); 22352 info->dki_slave = ((tgt << 3) | lun); 22353 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)), 22354 DK_DEVLEN - 1); 22355 info->dki_flags = DKI_FMTVOL; 22356 info->dki_partition = SDPART(dev); 22357 22358 /* Max Transfer size of this device in blocks */ 22359 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize; 22360 info->dki_addr = 0; 22361 info->dki_space = 0; 22362 info->dki_prio = 0; 22363 info->dki_vec = 0; 22364 22365 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) { 22366 kmem_free(info, sizeof (struct dk_cinfo)); 22367 return (EFAULT); 22368 } else { 22369 kmem_free(info, sizeof (struct dk_cinfo)); 22370 return (0); 22371 } 22372 } 22373 22374 22375 /* 22376 * Function: sd_get_media_info 22377 * 22378 * Description: This routine is the driver entry point for handling ioctl 22379 * requests for the media type or command set profile used by the 22380 * drive to operate on the media (DKIOCGMEDIAINFO). 22381 * 22382 * Arguments: dev - the device number 22383 * arg - pointer to user provided dk_minfo structure 22384 * specifying the media type, logical block size and 22385 * drive capacity. 22386 * flag - this argument is a pass through to ddi_copyxxx() 22387 * directly from the mode argument of ioctl(). 22388 * 22389 * Return Code: 0 22390 * EACCESS 22391 * EFAULT 22392 * ENXIO 22393 * EIO 22394 */ 22395 22396 static int 22397 sd_get_media_info(dev_t dev, caddr_t arg, int flag) 22398 { 22399 struct sd_lun *un = NULL; 22400 struct uscsi_cmd com; 22401 struct scsi_inquiry *sinq; 22402 struct dk_minfo media_info; 22403 u_longlong_t media_capacity; 22404 uint64_t capacity; 22405 uint_t lbasize; 22406 uchar_t *out_data; 22407 uchar_t *rqbuf; 22408 int rval = 0; 22409 int rtn; 22410 sd_ssc_t *ssc; 22411 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 22412 (un->un_state == SD_STATE_OFFLINE)) { 22413 return (ENXIO); 22414 } 22415 22416 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info: entry\n"); 22417 22418 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 22419 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 22420 22421 /* Issue a TUR to determine if the drive is ready with media present */ 22422 ssc = sd_ssc_init(un); 22423 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_CHECK_FOR_MEDIA); 22424 if (rval == ENXIO) { 22425 goto done; 22426 } else if (rval != 0) { 22427 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22428 } 22429 22430 /* Now get configuration data */ 22431 if (ISCD(un)) { 22432 media_info.dki_media_type = DK_CDROM; 22433 22434 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 22435 if (un->un_f_mmc_cap == TRUE) { 22436 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, 22437 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN, 22438 SD_PATH_STANDARD); 22439 22440 if (rtn) { 22441 /* 22442 * We ignore all failures for CD and need to 22443 * put the assessment before processing code 22444 * to avoid missing assessment for FMA. 22445 */ 22446 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22447 /* 22448 * Failed for other than an illegal request 22449 * or command not supported 22450 */ 22451 if ((com.uscsi_status == STATUS_CHECK) && 22452 (com.uscsi_rqstatus == STATUS_GOOD)) { 22453 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 22454 (rqbuf[12] != 0x20)) { 22455 rval = EIO; 22456 goto no_assessment; 22457 } 22458 } 22459 } else { 22460 /* 22461 * The GET CONFIGURATION command succeeded 22462 * so set the media type according to the 22463 * returned data 22464 */ 22465 media_info.dki_media_type = out_data[6]; 22466 media_info.dki_media_type <<= 8; 22467 media_info.dki_media_type |= out_data[7]; 22468 } 22469 } 22470 } else { 22471 /* 22472 * The profile list is not available, so we attempt to identify 22473 * the media type based on the inquiry data 22474 */ 22475 sinq = un->un_sd->sd_inq; 22476 if ((sinq->inq_dtype == DTYPE_DIRECT) || 22477 (sinq->inq_dtype == DTYPE_OPTICAL)) { 22478 /* This is a direct access device or optical disk */ 22479 media_info.dki_media_type = DK_FIXED_DISK; 22480 22481 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 22482 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 22483 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 22484 media_info.dki_media_type = DK_ZIP; 22485 } else if ( 22486 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 22487 media_info.dki_media_type = DK_JAZ; 22488 } 22489 } 22490 } else { 22491 /* 22492 * Not a CD, direct access or optical disk so return 22493 * unknown media 22494 */ 22495 media_info.dki_media_type = DK_UNKNOWN; 22496 } 22497 } 22498 22499 /* Now read the capacity so we can provide the lbasize and capacity */ 22500 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize, 22501 SD_PATH_DIRECT); 22502 switch (rval) { 22503 case 0: 22504 break; 22505 case EACCES: 22506 rval = EACCES; 22507 goto done; 22508 default: 22509 rval = EIO; 22510 goto done; 22511 } 22512 22513 /* 22514 * If lun is expanded dynamically, update the un structure. 22515 */ 22516 mutex_enter(SD_MUTEX(un)); 22517 if ((un->un_f_blockcount_is_valid == TRUE) && 22518 (un->un_f_tgt_blocksize_is_valid == TRUE) && 22519 (capacity > un->un_blockcount)) { 22520 sd_update_block_info(un, lbasize, capacity); 22521 } 22522 mutex_exit(SD_MUTEX(un)); 22523 22524 media_info.dki_lbsize = lbasize; 22525 media_capacity = capacity; 22526 22527 /* 22528 * sd_send_scsi_READ_CAPACITY() reports capacity in 22529 * un->un_sys_blocksize chunks. So we need to convert it into 22530 * cap.lbasize chunks. 22531 */ 22532 media_capacity *= un->un_sys_blocksize; 22533 media_capacity /= lbasize; 22534 media_info.dki_capacity = media_capacity; 22535 22536 if (ddi_copyout(&media_info, arg, sizeof (struct dk_minfo), flag)) { 22537 rval = EFAULT; 22538 /* Put goto. Anybody might add some code below in future */ 22539 goto no_assessment; 22540 } 22541 done: 22542 if (rval != 0) { 22543 if (rval == EIO) 22544 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 22545 else 22546 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22547 } 22548 no_assessment: 22549 sd_ssc_fini(ssc); 22550 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 22551 kmem_free(rqbuf, SENSE_LENGTH); 22552 return (rval); 22553 } 22554 22555 22556 /* 22557 * Function: sd_check_media 22558 * 22559 * Description: This utility routine implements the functionality for the 22560 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the 22561 * driver state changes from that specified by the user 22562 * (inserted or ejected). For example, if the user specifies 22563 * DKIO_EJECTED and the current media state is inserted this 22564 * routine will immediately return DKIO_INSERTED. However, if the 22565 * current media state is not inserted the user thread will be 22566 * blocked until the drive state changes. If DKIO_NONE is specified 22567 * the user thread will block until a drive state change occurs. 22568 * 22569 * Arguments: dev - the device number 22570 * state - user pointer to a dkio_state, updated with the current 22571 * drive state at return. 22572 * 22573 * Return Code: ENXIO 22574 * EIO 22575 * EAGAIN 22576 * EINTR 22577 */ 22578 22579 static int 22580 sd_check_media(dev_t dev, enum dkio_state state) 22581 { 22582 struct sd_lun *un = NULL; 22583 enum dkio_state prev_state; 22584 opaque_t token = NULL; 22585 int rval = 0; 22586 sd_ssc_t *ssc; 22587 dev_t sub_dev; 22588 22589 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22590 return (ENXIO); 22591 } 22592 22593 /* 22594 * sub_dev is used when submitting request to scsi watch. 22595 * All submissions are unified to use same device number. 22596 */ 22597 sub_dev = sd_make_device(SD_DEVINFO(un)); 22598 22599 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n"); 22600 22601 ssc = sd_ssc_init(un); 22602 22603 mutex_enter(SD_MUTEX(un)); 22604 22605 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: " 22606 "state=%x, mediastate=%x\n", state, un->un_mediastate); 22607 22608 prev_state = un->un_mediastate; 22609 22610 /* is there anything to do? */ 22611 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) { 22612 /* 22613 * submit the request to the scsi_watch service; 22614 * scsi_media_watch_cb() does the real work 22615 */ 22616 mutex_exit(SD_MUTEX(un)); 22617 22618 /* 22619 * This change handles the case where a scsi watch request is 22620 * added to a device that is powered down. To accomplish this 22621 * we power up the device before adding the scsi watch request, 22622 * since the scsi watch sends a TUR directly to the device 22623 * which the device cannot handle if it is powered down. 22624 */ 22625 if (sd_pm_entry(un) != DDI_SUCCESS) { 22626 mutex_enter(SD_MUTEX(un)); 22627 goto done; 22628 } 22629 22630 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), 22631 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 22632 (caddr_t)sub_dev); 22633 22634 sd_pm_exit(un); 22635 22636 mutex_enter(SD_MUTEX(un)); 22637 if (token == NULL) { 22638 rval = EAGAIN; 22639 goto done; 22640 } 22641 22642 /* 22643 * This is a special case IOCTL that doesn't return 22644 * until the media state changes. Routine sdpower 22645 * knows about and handles this so don't count it 22646 * as an active cmd in the driver, which would 22647 * keep the device busy to the pm framework. 22648 * If the count isn't decremented the device can't 22649 * be powered down. 22650 */ 22651 un->un_ncmds_in_driver--; 22652 ASSERT(un->un_ncmds_in_driver >= 0); 22653 22654 /* 22655 * if a prior request had been made, this will be the same 22656 * token, as scsi_watch was designed that way. 22657 */ 22658 un->un_swr_token = token; 22659 un->un_specified_mediastate = state; 22660 22661 /* 22662 * now wait for media change 22663 * we will not be signalled unless mediastate == state but it is 22664 * still better to test for this condition, since there is a 22665 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED 22666 */ 22667 SD_TRACE(SD_LOG_COMMON, un, 22668 "sd_check_media: waiting for media state change\n"); 22669 while (un->un_mediastate == state) { 22670 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) { 22671 SD_TRACE(SD_LOG_COMMON, un, 22672 "sd_check_media: waiting for media state " 22673 "was interrupted\n"); 22674 un->un_ncmds_in_driver++; 22675 rval = EINTR; 22676 goto done; 22677 } 22678 SD_TRACE(SD_LOG_COMMON, un, 22679 "sd_check_media: received signal, state=%x\n", 22680 un->un_mediastate); 22681 } 22682 /* 22683 * Inc the counter to indicate the device once again 22684 * has an active outstanding cmd. 22685 */ 22686 un->un_ncmds_in_driver++; 22687 } 22688 22689 /* invalidate geometry */ 22690 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) { 22691 sr_ejected(un); 22692 } 22693 22694 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) { 22695 uint64_t capacity; 22696 uint_t lbasize; 22697 22698 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n"); 22699 mutex_exit(SD_MUTEX(un)); 22700 /* 22701 * Since the following routines use SD_PATH_DIRECT, we must 22702 * call PM directly before the upcoming disk accesses. This 22703 * may cause the disk to be power/spin up. 22704 */ 22705 22706 if (sd_pm_entry(un) == DDI_SUCCESS) { 22707 rval = sd_send_scsi_READ_CAPACITY(ssc, 22708 &capacity, &lbasize, SD_PATH_DIRECT); 22709 if (rval != 0) { 22710 sd_pm_exit(un); 22711 if (rval == EIO) 22712 sd_ssc_assessment(ssc, 22713 SD_FMT_STATUS_CHECK); 22714 else 22715 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22716 mutex_enter(SD_MUTEX(un)); 22717 goto done; 22718 } 22719 } else { 22720 rval = EIO; 22721 mutex_enter(SD_MUTEX(un)); 22722 goto done; 22723 } 22724 mutex_enter(SD_MUTEX(un)); 22725 22726 sd_update_block_info(un, lbasize, capacity); 22727 22728 /* 22729 * Check if the media in the device is writable or not 22730 */ 22731 if (ISCD(un)) { 22732 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT); 22733 } 22734 22735 mutex_exit(SD_MUTEX(un)); 22736 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT); 22737 if ((cmlb_validate(un->un_cmlbhandle, 0, 22738 (void *)SD_PATH_DIRECT) == 0) && un->un_f_pkstats_enabled) { 22739 sd_set_pstats(un); 22740 SD_TRACE(SD_LOG_IO_PARTITION, un, 22741 "sd_check_media: un:0x%p pstats created and " 22742 "set\n", un); 22743 } 22744 22745 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT, 22746 SD_PATH_DIRECT); 22747 22748 sd_pm_exit(un); 22749 22750 if (rval != 0) { 22751 if (rval == EIO) 22752 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 22753 else 22754 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 22755 } 22756 22757 mutex_enter(SD_MUTEX(un)); 22758 } 22759 done: 22760 sd_ssc_fini(ssc); 22761 un->un_f_watcht_stopped = FALSE; 22762 if (token != NULL && un->un_swr_token != NULL) { 22763 /* 22764 * Use of this local token and the mutex ensures that we avoid 22765 * some race conditions associated with terminating the 22766 * scsi watch. 22767 */ 22768 token = un->un_swr_token; 22769 mutex_exit(SD_MUTEX(un)); 22770 (void) scsi_watch_request_terminate(token, 22771 SCSI_WATCH_TERMINATE_WAIT); 22772 if (scsi_watch_get_ref_count(token) == 0) { 22773 mutex_enter(SD_MUTEX(un)); 22774 un->un_swr_token = (opaque_t)NULL; 22775 } else { 22776 mutex_enter(SD_MUTEX(un)); 22777 } 22778 } 22779 22780 /* 22781 * Update the capacity kstat value, if no media previously 22782 * (capacity kstat is 0) and a media has been inserted 22783 * (un_f_blockcount_is_valid == TRUE) 22784 */ 22785 if (un->un_errstats) { 22786 struct sd_errstats *stp = NULL; 22787 22788 stp = (struct sd_errstats *)un->un_errstats->ks_data; 22789 if ((stp->sd_capacity.value.ui64 == 0) && 22790 (un->un_f_blockcount_is_valid == TRUE)) { 22791 stp->sd_capacity.value.ui64 = 22792 (uint64_t)((uint64_t)un->un_blockcount * 22793 un->un_sys_blocksize); 22794 } 22795 } 22796 mutex_exit(SD_MUTEX(un)); 22797 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n"); 22798 return (rval); 22799 } 22800 22801 22802 /* 22803 * Function: sd_delayed_cv_broadcast 22804 * 22805 * Description: Delayed cv_broadcast to allow for target to recover from media 22806 * insertion. 22807 * 22808 * Arguments: arg - driver soft state (unit) structure 22809 */ 22810 22811 static void 22812 sd_delayed_cv_broadcast(void *arg) 22813 { 22814 struct sd_lun *un = arg; 22815 22816 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n"); 22817 22818 mutex_enter(SD_MUTEX(un)); 22819 un->un_dcvb_timeid = NULL; 22820 cv_broadcast(&un->un_state_cv); 22821 mutex_exit(SD_MUTEX(un)); 22822 } 22823 22824 22825 /* 22826 * Function: sd_media_watch_cb 22827 * 22828 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This 22829 * routine processes the TUR sense data and updates the driver 22830 * state if a transition has occurred. The user thread 22831 * (sd_check_media) is then signalled. 22832 * 22833 * Arguments: arg - the device 'dev_t' is used for context to discriminate 22834 * among multiple watches that share this callback function 22835 * resultp - scsi watch facility result packet containing scsi 22836 * packet, status byte and sense data 22837 * 22838 * Return Code: 0 for success, -1 for failure 22839 */ 22840 22841 static int 22842 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 22843 { 22844 struct sd_lun *un; 22845 struct scsi_status *statusp = resultp->statusp; 22846 uint8_t *sensep = (uint8_t *)resultp->sensep; 22847 enum dkio_state state = DKIO_NONE; 22848 dev_t dev = (dev_t)arg; 22849 uchar_t actual_sense_length; 22850 uint8_t skey, asc, ascq; 22851 22852 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22853 return (-1); 22854 } 22855 actual_sense_length = resultp->actual_sense_length; 22856 22857 mutex_enter(SD_MUTEX(un)); 22858 SD_TRACE(SD_LOG_COMMON, un, 22859 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n", 22860 *((char *)statusp), (void *)sensep, actual_sense_length); 22861 22862 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) { 22863 un->un_mediastate = DKIO_DEV_GONE; 22864 cv_broadcast(&un->un_state_cv); 22865 mutex_exit(SD_MUTEX(un)); 22866 22867 return (0); 22868 } 22869 22870 /* 22871 * If there was a check condition then sensep points to valid sense data 22872 * If status was not a check condition but a reservation or busy status 22873 * then the new state is DKIO_NONE 22874 */ 22875 if (sensep != NULL) { 22876 skey = scsi_sense_key(sensep); 22877 asc = scsi_sense_asc(sensep); 22878 ascq = scsi_sense_ascq(sensep); 22879 22880 SD_INFO(SD_LOG_COMMON, un, 22881 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n", 22882 skey, asc, ascq); 22883 /* This routine only uses up to 13 bytes of sense data. */ 22884 if (actual_sense_length >= 13) { 22885 if (skey == KEY_UNIT_ATTENTION) { 22886 if (asc == 0x28) { 22887 state = DKIO_INSERTED; 22888 } 22889 } else if (skey == KEY_NOT_READY) { 22890 /* 22891 * Sense data of 02/06/00 means that the 22892 * drive could not read the media (No 22893 * reference position found). In this case 22894 * to prevent a hang on the DKIOCSTATE IOCTL 22895 * we set the media state to DKIO_INSERTED. 22896 */ 22897 if (asc == 0x06 && ascq == 0x00) 22898 state = DKIO_INSERTED; 22899 22900 /* 22901 * if 02/04/02 means that the host 22902 * should send start command. Explicitly 22903 * leave the media state as is 22904 * (inserted) as the media is inserted 22905 * and host has stopped device for PM 22906 * reasons. Upon next true read/write 22907 * to this media will bring the 22908 * device to the right state good for 22909 * media access. 22910 */ 22911 if (asc == 0x3a) { 22912 state = DKIO_EJECTED; 22913 } else { 22914 /* 22915 * If the drive is busy with an 22916 * operation or long write, keep the 22917 * media in an inserted state. 22918 */ 22919 22920 if ((asc == 0x04) && 22921 ((ascq == 0x02) || 22922 (ascq == 0x07) || 22923 (ascq == 0x08))) { 22924 state = DKIO_INSERTED; 22925 } 22926 } 22927 } else if (skey == KEY_NO_SENSE) { 22928 if ((asc == 0x00) && (ascq == 0x00)) { 22929 /* 22930 * Sense Data 00/00/00 does not provide 22931 * any information about the state of 22932 * the media. Ignore it. 22933 */ 22934 mutex_exit(SD_MUTEX(un)); 22935 return (0); 22936 } 22937 } 22938 } 22939 } else if ((*((char *)statusp) == STATUS_GOOD) && 22940 (resultp->pkt->pkt_reason == CMD_CMPLT)) { 22941 state = DKIO_INSERTED; 22942 } 22943 22944 SD_TRACE(SD_LOG_COMMON, un, 22945 "sd_media_watch_cb: state=%x, specified=%x\n", 22946 state, un->un_specified_mediastate); 22947 22948 /* 22949 * now signal the waiting thread if this is *not* the specified state; 22950 * delay the signal if the state is DKIO_INSERTED to allow the target 22951 * to recover 22952 */ 22953 if (state != un->un_specified_mediastate) { 22954 un->un_mediastate = state; 22955 if (state == DKIO_INSERTED) { 22956 /* 22957 * delay the signal to give the drive a chance 22958 * to do what it apparently needs to do 22959 */ 22960 SD_TRACE(SD_LOG_COMMON, un, 22961 "sd_media_watch_cb: delayed cv_broadcast\n"); 22962 if (un->un_dcvb_timeid == NULL) { 22963 un->un_dcvb_timeid = 22964 timeout(sd_delayed_cv_broadcast, un, 22965 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY)); 22966 } 22967 } else { 22968 SD_TRACE(SD_LOG_COMMON, un, 22969 "sd_media_watch_cb: immediate cv_broadcast\n"); 22970 cv_broadcast(&un->un_state_cv); 22971 } 22972 } 22973 mutex_exit(SD_MUTEX(un)); 22974 return (0); 22975 } 22976 22977 22978 /* 22979 * Function: sd_dkio_get_temp 22980 * 22981 * Description: This routine is the driver entry point for handling ioctl 22982 * requests to get the disk temperature. 22983 * 22984 * Arguments: dev - the device number 22985 * arg - pointer to user provided dk_temperature structure. 22986 * flag - this argument is a pass through to ddi_copyxxx() 22987 * directly from the mode argument of ioctl(). 22988 * 22989 * Return Code: 0 22990 * EFAULT 22991 * ENXIO 22992 * EAGAIN 22993 */ 22994 22995 static int 22996 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag) 22997 { 22998 struct sd_lun *un = NULL; 22999 struct dk_temperature *dktemp = NULL; 23000 uchar_t *temperature_page; 23001 int rval = 0; 23002 int path_flag = SD_PATH_STANDARD; 23003 sd_ssc_t *ssc; 23004 23005 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23006 return (ENXIO); 23007 } 23008 23009 ssc = sd_ssc_init(un); 23010 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP); 23011 23012 /* copyin the disk temp argument to get the user flags */ 23013 if (ddi_copyin((void *)arg, dktemp, 23014 sizeof (struct dk_temperature), flag) != 0) { 23015 rval = EFAULT; 23016 goto done; 23017 } 23018 23019 /* Initialize the temperature to invalid. */ 23020 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 23021 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 23022 23023 /* 23024 * Note: Investigate removing the "bypass pm" semantic. 23025 * Can we just bypass PM always? 23026 */ 23027 if (dktemp->dkt_flags & DKT_BYPASS_PM) { 23028 path_flag = SD_PATH_DIRECT; 23029 ASSERT(!mutex_owned(&un->un_pm_mutex)); 23030 mutex_enter(&un->un_pm_mutex); 23031 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 23032 /* 23033 * If DKT_BYPASS_PM is set, and the drive happens to be 23034 * in low power mode, we can not wake it up, Need to 23035 * return EAGAIN. 23036 */ 23037 mutex_exit(&un->un_pm_mutex); 23038 rval = EAGAIN; 23039 goto done; 23040 } else { 23041 /* 23042 * Indicate to PM the device is busy. This is required 23043 * to avoid a race - i.e. the ioctl is issuing a 23044 * command and the pm framework brings down the device 23045 * to low power mode (possible power cut-off on some 23046 * platforms). 23047 */ 23048 mutex_exit(&un->un_pm_mutex); 23049 if (sd_pm_entry(un) != DDI_SUCCESS) { 23050 rval = EAGAIN; 23051 goto done; 23052 } 23053 } 23054 } 23055 23056 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP); 23057 23058 rval = sd_send_scsi_LOG_SENSE(ssc, temperature_page, 23059 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag); 23060 if (rval != 0) 23061 goto done2; 23062 23063 /* 23064 * For the current temperature verify that the parameter length is 0x02 23065 * and the parameter code is 0x00 23066 */ 23067 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) && 23068 (temperature_page[5] == 0x00)) { 23069 if (temperature_page[9] == 0xFF) { 23070 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 23071 } else { 23072 dktemp->dkt_cur_temp = (short)(temperature_page[9]); 23073 } 23074 } 23075 23076 /* 23077 * For the reference temperature verify that the parameter 23078 * length is 0x02 and the parameter code is 0x01 23079 */ 23080 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) && 23081 (temperature_page[11] == 0x01)) { 23082 if (temperature_page[15] == 0xFF) { 23083 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 23084 } else { 23085 dktemp->dkt_ref_temp = (short)(temperature_page[15]); 23086 } 23087 } 23088 23089 /* Do the copyout regardless of the temperature commands status. */ 23090 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature), 23091 flag) != 0) { 23092 rval = EFAULT; 23093 goto done1; 23094 } 23095 23096 done2: 23097 if (rval != 0) { 23098 if (rval == EIO) 23099 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23100 else 23101 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23102 } 23103 done1: 23104 if (path_flag == SD_PATH_DIRECT) { 23105 sd_pm_exit(un); 23106 } 23107 23108 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE); 23109 done: 23110 sd_ssc_fini(ssc); 23111 if (dktemp != NULL) { 23112 kmem_free(dktemp, sizeof (struct dk_temperature)); 23113 } 23114 23115 return (rval); 23116 } 23117 23118 23119 /* 23120 * Function: sd_log_page_supported 23121 * 23122 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of 23123 * supported log pages. 23124 * 23125 * Arguments: ssc - ssc contains pointer to driver soft state (unit) 23126 * structure for this target. 23127 * log_page - 23128 * 23129 * Return Code: -1 - on error (log sense is optional and may not be supported). 23130 * 0 - log page not found. 23131 * 1 - log page found. 23132 */ 23133 23134 static int 23135 sd_log_page_supported(sd_ssc_t *ssc, int log_page) 23136 { 23137 uchar_t *log_page_data; 23138 int i; 23139 int match = 0; 23140 int log_size; 23141 int status = 0; 23142 struct sd_lun *un; 23143 23144 ASSERT(ssc != NULL); 23145 un = ssc->ssc_un; 23146 ASSERT(un != NULL); 23147 23148 log_page_data = kmem_zalloc(0xFF, KM_SLEEP); 23149 23150 status = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 0xFF, 0, 0x01, 0, 23151 SD_PATH_DIRECT); 23152 23153 if (status != 0) { 23154 if (status == EIO) { 23155 /* 23156 * Some disks do not support log sense, we 23157 * should ignore this kind of error(sense key is 23158 * 0x5 - illegal request). 23159 */ 23160 uint8_t *sensep; 23161 int senlen; 23162 23163 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 23164 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 23165 ssc->ssc_uscsi_cmd->uscsi_rqresid); 23166 23167 if (senlen > 0 && 23168 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) { 23169 sd_ssc_assessment(ssc, 23170 SD_FMT_IGNORE_COMPROMISE); 23171 } else { 23172 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23173 } 23174 } else { 23175 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23176 } 23177 23178 SD_ERROR(SD_LOG_COMMON, un, 23179 "sd_log_page_supported: failed log page retrieval\n"); 23180 kmem_free(log_page_data, 0xFF); 23181 return (-1); 23182 } 23183 23184 log_size = log_page_data[3]; 23185 23186 /* 23187 * The list of supported log pages start from the fourth byte. Check 23188 * until we run out of log pages or a match is found. 23189 */ 23190 for (i = 4; (i < (log_size + 4)) && !match; i++) { 23191 if (log_page_data[i] == log_page) { 23192 match++; 23193 } 23194 } 23195 kmem_free(log_page_data, 0xFF); 23196 return (match); 23197 } 23198 23199 23200 /* 23201 * Function: sd_mhdioc_failfast 23202 * 23203 * Description: This routine is the driver entry point for handling ioctl 23204 * requests to enable/disable the multihost failfast option. 23205 * (MHIOCENFAILFAST) 23206 * 23207 * Arguments: dev - the device number 23208 * arg - user specified probing interval. 23209 * flag - this argument is a pass through to ddi_copyxxx() 23210 * directly from the mode argument of ioctl(). 23211 * 23212 * Return Code: 0 23213 * EFAULT 23214 * ENXIO 23215 */ 23216 23217 static int 23218 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag) 23219 { 23220 struct sd_lun *un = NULL; 23221 int mh_time; 23222 int rval = 0; 23223 23224 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23225 return (ENXIO); 23226 } 23227 23228 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag)) 23229 return (EFAULT); 23230 23231 if (mh_time) { 23232 mutex_enter(SD_MUTEX(un)); 23233 un->un_resvd_status |= SD_FAILFAST; 23234 mutex_exit(SD_MUTEX(un)); 23235 /* 23236 * If mh_time is INT_MAX, then this ioctl is being used for 23237 * SCSI-3 PGR purposes, and we don't need to spawn watch thread. 23238 */ 23239 if (mh_time != INT_MAX) { 23240 rval = sd_check_mhd(dev, mh_time); 23241 } 23242 } else { 23243 (void) sd_check_mhd(dev, 0); 23244 mutex_enter(SD_MUTEX(un)); 23245 un->un_resvd_status &= ~SD_FAILFAST; 23246 mutex_exit(SD_MUTEX(un)); 23247 } 23248 return (rval); 23249 } 23250 23251 23252 /* 23253 * Function: sd_mhdioc_takeown 23254 * 23255 * Description: This routine is the driver entry point for handling ioctl 23256 * requests to forcefully acquire exclusive access rights to the 23257 * multihost disk (MHIOCTKOWN). 23258 * 23259 * Arguments: dev - the device number 23260 * arg - user provided structure specifying the delay 23261 * parameters in milliseconds 23262 * flag - this argument is a pass through to ddi_copyxxx() 23263 * directly from the mode argument of ioctl(). 23264 * 23265 * Return Code: 0 23266 * EFAULT 23267 * ENXIO 23268 */ 23269 23270 static int 23271 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag) 23272 { 23273 struct sd_lun *un = NULL; 23274 struct mhioctkown *tkown = NULL; 23275 int rval = 0; 23276 23277 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23278 return (ENXIO); 23279 } 23280 23281 if (arg != NULL) { 23282 tkown = (struct mhioctkown *) 23283 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP); 23284 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag); 23285 if (rval != 0) { 23286 rval = EFAULT; 23287 goto error; 23288 } 23289 } 23290 23291 rval = sd_take_ownership(dev, tkown); 23292 mutex_enter(SD_MUTEX(un)); 23293 if (rval == 0) { 23294 un->un_resvd_status |= SD_RESERVE; 23295 if (tkown != NULL && tkown->reinstate_resv_delay != 0) { 23296 sd_reinstate_resv_delay = 23297 tkown->reinstate_resv_delay * 1000; 23298 } else { 23299 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 23300 } 23301 /* 23302 * Give the scsi_watch routine interval set by 23303 * the MHIOCENFAILFAST ioctl precedence here. 23304 */ 23305 if ((un->un_resvd_status & SD_FAILFAST) == 0) { 23306 mutex_exit(SD_MUTEX(un)); 23307 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000); 23308 SD_TRACE(SD_LOG_IOCTL_MHD, un, 23309 "sd_mhdioc_takeown : %d\n", 23310 sd_reinstate_resv_delay); 23311 } else { 23312 mutex_exit(SD_MUTEX(un)); 23313 } 23314 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY, 23315 sd_mhd_reset_notify_cb, (caddr_t)un); 23316 } else { 23317 un->un_resvd_status &= ~SD_RESERVE; 23318 mutex_exit(SD_MUTEX(un)); 23319 } 23320 23321 error: 23322 if (tkown != NULL) { 23323 kmem_free(tkown, sizeof (struct mhioctkown)); 23324 } 23325 return (rval); 23326 } 23327 23328 23329 /* 23330 * Function: sd_mhdioc_release 23331 * 23332 * Description: This routine is the driver entry point for handling ioctl 23333 * requests to release exclusive access rights to the multihost 23334 * disk (MHIOCRELEASE). 23335 * 23336 * Arguments: dev - the device number 23337 * 23338 * Return Code: 0 23339 * ENXIO 23340 */ 23341 23342 static int 23343 sd_mhdioc_release(dev_t dev) 23344 { 23345 struct sd_lun *un = NULL; 23346 timeout_id_t resvd_timeid_save; 23347 int resvd_status_save; 23348 int rval = 0; 23349 23350 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23351 return (ENXIO); 23352 } 23353 23354 mutex_enter(SD_MUTEX(un)); 23355 resvd_status_save = un->un_resvd_status; 23356 un->un_resvd_status &= 23357 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE); 23358 if (un->un_resvd_timeid) { 23359 resvd_timeid_save = un->un_resvd_timeid; 23360 un->un_resvd_timeid = NULL; 23361 mutex_exit(SD_MUTEX(un)); 23362 (void) untimeout(resvd_timeid_save); 23363 } else { 23364 mutex_exit(SD_MUTEX(un)); 23365 } 23366 23367 /* 23368 * destroy any pending timeout thread that may be attempting to 23369 * reinstate reservation on this device. 23370 */ 23371 sd_rmv_resv_reclaim_req(dev); 23372 23373 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) { 23374 mutex_enter(SD_MUTEX(un)); 23375 if ((un->un_mhd_token) && 23376 ((un->un_resvd_status & SD_FAILFAST) == 0)) { 23377 mutex_exit(SD_MUTEX(un)); 23378 (void) sd_check_mhd(dev, 0); 23379 } else { 23380 mutex_exit(SD_MUTEX(un)); 23381 } 23382 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 23383 sd_mhd_reset_notify_cb, (caddr_t)un); 23384 } else { 23385 /* 23386 * sd_mhd_watch_cb will restart the resvd recover timeout thread 23387 */ 23388 mutex_enter(SD_MUTEX(un)); 23389 un->un_resvd_status = resvd_status_save; 23390 mutex_exit(SD_MUTEX(un)); 23391 } 23392 return (rval); 23393 } 23394 23395 23396 /* 23397 * Function: sd_mhdioc_register_devid 23398 * 23399 * Description: This routine is the driver entry point for handling ioctl 23400 * requests to register the device id (MHIOCREREGISTERDEVID). 23401 * 23402 * Note: The implementation for this ioctl has been updated to 23403 * be consistent with the original PSARC case (1999/357) 23404 * (4375899, 4241671, 4220005) 23405 * 23406 * Arguments: dev - the device number 23407 * 23408 * Return Code: 0 23409 * ENXIO 23410 */ 23411 23412 static int 23413 sd_mhdioc_register_devid(dev_t dev) 23414 { 23415 struct sd_lun *un = NULL; 23416 int rval = 0; 23417 sd_ssc_t *ssc; 23418 23419 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23420 return (ENXIO); 23421 } 23422 23423 ASSERT(!mutex_owned(SD_MUTEX(un))); 23424 23425 mutex_enter(SD_MUTEX(un)); 23426 23427 /* If a devid already exists, de-register it */ 23428 if (un->un_devid != NULL) { 23429 ddi_devid_unregister(SD_DEVINFO(un)); 23430 /* 23431 * After unregister devid, needs to free devid memory 23432 */ 23433 ddi_devid_free(un->un_devid); 23434 un->un_devid = NULL; 23435 } 23436 23437 /* Check for reservation conflict */ 23438 mutex_exit(SD_MUTEX(un)); 23439 ssc = sd_ssc_init(un); 23440 rval = sd_send_scsi_TEST_UNIT_READY(ssc, 0); 23441 mutex_enter(SD_MUTEX(un)); 23442 23443 switch (rval) { 23444 case 0: 23445 sd_register_devid(ssc, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED); 23446 break; 23447 case EACCES: 23448 break; 23449 default: 23450 rval = EIO; 23451 } 23452 23453 mutex_exit(SD_MUTEX(un)); 23454 if (rval != 0) { 23455 if (rval == EIO) 23456 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 23457 else 23458 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 23459 } 23460 sd_ssc_fini(ssc); 23461 return (rval); 23462 } 23463 23464 23465 /* 23466 * Function: sd_mhdioc_inkeys 23467 * 23468 * Description: This routine is the driver entry point for handling ioctl 23469 * requests to issue the SCSI-3 Persistent In Read Keys command 23470 * to the device (MHIOCGRP_INKEYS). 23471 * 23472 * Arguments: dev - the device number 23473 * arg - user provided in_keys structure 23474 * flag - this argument is a pass through to ddi_copyxxx() 23475 * directly from the mode argument of ioctl(). 23476 * 23477 * Return Code: code returned by sd_persistent_reservation_in_read_keys() 23478 * ENXIO 23479 * EFAULT 23480 */ 23481 23482 static int 23483 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag) 23484 { 23485 struct sd_lun *un; 23486 mhioc_inkeys_t inkeys; 23487 int rval = 0; 23488 23489 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23490 return (ENXIO); 23491 } 23492 23493 #ifdef _MULTI_DATAMODEL 23494 switch (ddi_model_convert_from(flag & FMODELS)) { 23495 case DDI_MODEL_ILP32: { 23496 struct mhioc_inkeys32 inkeys32; 23497 23498 if (ddi_copyin(arg, &inkeys32, 23499 sizeof (struct mhioc_inkeys32), flag) != 0) { 23500 return (EFAULT); 23501 } 23502 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li; 23503 if ((rval = sd_persistent_reservation_in_read_keys(un, 23504 &inkeys, flag)) != 0) { 23505 return (rval); 23506 } 23507 inkeys32.generation = inkeys.generation; 23508 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32), 23509 flag) != 0) { 23510 return (EFAULT); 23511 } 23512 break; 23513 } 23514 case DDI_MODEL_NONE: 23515 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), 23516 flag) != 0) { 23517 return (EFAULT); 23518 } 23519 if ((rval = sd_persistent_reservation_in_read_keys(un, 23520 &inkeys, flag)) != 0) { 23521 return (rval); 23522 } 23523 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), 23524 flag) != 0) { 23525 return (EFAULT); 23526 } 23527 break; 23528 } 23529 23530 #else /* ! _MULTI_DATAMODEL */ 23531 23532 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) { 23533 return (EFAULT); 23534 } 23535 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag); 23536 if (rval != 0) { 23537 return (rval); 23538 } 23539 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) { 23540 return (EFAULT); 23541 } 23542 23543 #endif /* _MULTI_DATAMODEL */ 23544 23545 return (rval); 23546 } 23547 23548 23549 /* 23550 * Function: sd_mhdioc_inresv 23551 * 23552 * Description: This routine is the driver entry point for handling ioctl 23553 * requests to issue the SCSI-3 Persistent In Read Reservations 23554 * command to the device (MHIOCGRP_INKEYS). 23555 * 23556 * Arguments: dev - the device number 23557 * arg - user provided in_resv structure 23558 * flag - this argument is a pass through to ddi_copyxxx() 23559 * directly from the mode argument of ioctl(). 23560 * 23561 * Return Code: code returned by sd_persistent_reservation_in_read_resv() 23562 * ENXIO 23563 * EFAULT 23564 */ 23565 23566 static int 23567 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag) 23568 { 23569 struct sd_lun *un; 23570 mhioc_inresvs_t inresvs; 23571 int rval = 0; 23572 23573 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23574 return (ENXIO); 23575 } 23576 23577 #ifdef _MULTI_DATAMODEL 23578 23579 switch (ddi_model_convert_from(flag & FMODELS)) { 23580 case DDI_MODEL_ILP32: { 23581 struct mhioc_inresvs32 inresvs32; 23582 23583 if (ddi_copyin(arg, &inresvs32, 23584 sizeof (struct mhioc_inresvs32), flag) != 0) { 23585 return (EFAULT); 23586 } 23587 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li; 23588 if ((rval = sd_persistent_reservation_in_read_resv(un, 23589 &inresvs, flag)) != 0) { 23590 return (rval); 23591 } 23592 inresvs32.generation = inresvs.generation; 23593 if (ddi_copyout(&inresvs32, arg, 23594 sizeof (struct mhioc_inresvs32), flag) != 0) { 23595 return (EFAULT); 23596 } 23597 break; 23598 } 23599 case DDI_MODEL_NONE: 23600 if (ddi_copyin(arg, &inresvs, 23601 sizeof (mhioc_inresvs_t), flag) != 0) { 23602 return (EFAULT); 23603 } 23604 if ((rval = sd_persistent_reservation_in_read_resv(un, 23605 &inresvs, flag)) != 0) { 23606 return (rval); 23607 } 23608 if (ddi_copyout(&inresvs, arg, 23609 sizeof (mhioc_inresvs_t), flag) != 0) { 23610 return (EFAULT); 23611 } 23612 break; 23613 } 23614 23615 #else /* ! _MULTI_DATAMODEL */ 23616 23617 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) { 23618 return (EFAULT); 23619 } 23620 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag); 23621 if (rval != 0) { 23622 return (rval); 23623 } 23624 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) { 23625 return (EFAULT); 23626 } 23627 23628 #endif /* ! _MULTI_DATAMODEL */ 23629 23630 return (rval); 23631 } 23632 23633 23634 /* 23635 * The following routines support the clustering functionality described below 23636 * and implement lost reservation reclaim functionality. 23637 * 23638 * Clustering 23639 * ---------- 23640 * The clustering code uses two different, independent forms of SCSI 23641 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3 23642 * Persistent Group Reservations. For any particular disk, it will use either 23643 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk. 23644 * 23645 * SCSI-2 23646 * The cluster software takes ownership of a multi-hosted disk by issuing the 23647 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the 23648 * MHIOCRELEASE ioctl. Closely related is the MHIOCENFAILFAST ioctl -- a 23649 * cluster, just after taking ownership of the disk with the MHIOCTKOWN ioctl 23650 * then issues the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the 23651 * driver. The meaning of failfast is that if the driver (on this host) ever 23652 * encounters the scsi error return code RESERVATION_CONFLICT from the device, 23653 * it should immediately panic the host. The motivation for this ioctl is that 23654 * if this host does encounter reservation conflict, the underlying cause is 23655 * that some other host of the cluster has decided that this host is no longer 23656 * in the cluster and has seized control of the disks for itself. Since this 23657 * host is no longer in the cluster, it ought to panic itself. The 23658 * MHIOCENFAILFAST ioctl does two things: 23659 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT 23660 * error to panic the host 23661 * (b) it sets up a periodic timer to test whether this host still has 23662 * "access" (in that no other host has reserved the device): if the 23663 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The 23664 * purpose of that periodic timer is to handle scenarios where the host is 23665 * otherwise temporarily quiescent, temporarily doing no real i/o. 23666 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host, 23667 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for 23668 * the device itself. 23669 * 23670 * SCSI-3 PGR 23671 * A direct semantic implementation of the SCSI-3 Persistent Reservation 23672 * facility is supported through the shared multihost disk ioctls 23673 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE, 23674 * MHIOCGRP_PREEMPTANDABORT) 23675 * 23676 * Reservation Reclaim: 23677 * -------------------- 23678 * To support the lost reservation reclaim operations this driver creates a 23679 * single thread to handle reinstating reservations on all devices that have 23680 * lost reservations sd_resv_reclaim_requests are logged for all devices that 23681 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb 23682 * and the reservation reclaim thread loops through the requests to regain the 23683 * lost reservations. 23684 */ 23685 23686 /* 23687 * Function: sd_check_mhd() 23688 * 23689 * Description: This function sets up and submits a scsi watch request or 23690 * terminates an existing watch request. This routine is used in 23691 * support of reservation reclaim. 23692 * 23693 * Arguments: dev - the device 'dev_t' is used for context to discriminate 23694 * among multiple watches that share the callback function 23695 * interval - the number of microseconds specifying the watch 23696 * interval for issuing TEST UNIT READY commands. If 23697 * set to 0 the watch should be terminated. If the 23698 * interval is set to 0 and if the device is required 23699 * to hold reservation while disabling failfast, the 23700 * watch is restarted with an interval of 23701 * reinstate_resv_delay. 23702 * 23703 * Return Code: 0 - Successful submit/terminate of scsi watch request 23704 * ENXIO - Indicates an invalid device was specified 23705 * EAGAIN - Unable to submit the scsi watch request 23706 */ 23707 23708 static int 23709 sd_check_mhd(dev_t dev, int interval) 23710 { 23711 struct sd_lun *un; 23712 opaque_t token; 23713 23714 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23715 return (ENXIO); 23716 } 23717 23718 /* is this a watch termination request? */ 23719 if (interval == 0) { 23720 mutex_enter(SD_MUTEX(un)); 23721 /* if there is an existing watch task then terminate it */ 23722 if (un->un_mhd_token) { 23723 token = un->un_mhd_token; 23724 un->un_mhd_token = NULL; 23725 mutex_exit(SD_MUTEX(un)); 23726 (void) scsi_watch_request_terminate(token, 23727 SCSI_WATCH_TERMINATE_ALL_WAIT); 23728 mutex_enter(SD_MUTEX(un)); 23729 } else { 23730 mutex_exit(SD_MUTEX(un)); 23731 /* 23732 * Note: If we return here we don't check for the 23733 * failfast case. This is the original legacy 23734 * implementation but perhaps we should be checking 23735 * the failfast case. 23736 */ 23737 return (0); 23738 } 23739 /* 23740 * If the device is required to hold reservation while 23741 * disabling failfast, we need to restart the scsi_watch 23742 * routine with an interval of reinstate_resv_delay. 23743 */ 23744 if (un->un_resvd_status & SD_RESERVE) { 23745 interval = sd_reinstate_resv_delay/1000; 23746 } else { 23747 /* no failfast so bail */ 23748 mutex_exit(SD_MUTEX(un)); 23749 return (0); 23750 } 23751 mutex_exit(SD_MUTEX(un)); 23752 } 23753 23754 /* 23755 * adjust minimum time interval to 1 second, 23756 * and convert from msecs to usecs 23757 */ 23758 if (interval > 0 && interval < 1000) { 23759 interval = 1000; 23760 } 23761 interval *= 1000; 23762 23763 /* 23764 * submit the request to the scsi_watch service 23765 */ 23766 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval, 23767 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev); 23768 if (token == NULL) { 23769 return (EAGAIN); 23770 } 23771 23772 /* 23773 * save token for termination later on 23774 */ 23775 mutex_enter(SD_MUTEX(un)); 23776 un->un_mhd_token = token; 23777 mutex_exit(SD_MUTEX(un)); 23778 return (0); 23779 } 23780 23781 23782 /* 23783 * Function: sd_mhd_watch_cb() 23784 * 23785 * Description: This function is the call back function used by the scsi watch 23786 * facility. The scsi watch facility sends the "Test Unit Ready" 23787 * and processes the status. If applicable (i.e. a "Unit Attention" 23788 * status and automatic "Request Sense" not used) the scsi watch 23789 * facility will send a "Request Sense" and retrieve the sense data 23790 * to be passed to this callback function. In either case the 23791 * automatic "Request Sense" or the facility submitting one, this 23792 * callback is passed the status and sense data. 23793 * 23794 * Arguments: arg - the device 'dev_t' is used for context to discriminate 23795 * among multiple watches that share this callback function 23796 * resultp - scsi watch facility result packet containing scsi 23797 * packet, status byte and sense data 23798 * 23799 * Return Code: 0 - continue the watch task 23800 * non-zero - terminate the watch task 23801 */ 23802 23803 static int 23804 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 23805 { 23806 struct sd_lun *un; 23807 struct scsi_status *statusp; 23808 uint8_t *sensep; 23809 struct scsi_pkt *pkt; 23810 uchar_t actual_sense_length; 23811 dev_t dev = (dev_t)arg; 23812 23813 ASSERT(resultp != NULL); 23814 statusp = resultp->statusp; 23815 sensep = (uint8_t *)resultp->sensep; 23816 pkt = resultp->pkt; 23817 actual_sense_length = resultp->actual_sense_length; 23818 23819 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23820 return (ENXIO); 23821 } 23822 23823 SD_TRACE(SD_LOG_IOCTL_MHD, un, 23824 "sd_mhd_watch_cb: reason '%s', status '%s'\n", 23825 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp))); 23826 23827 /* Begin processing of the status and/or sense data */ 23828 if (pkt->pkt_reason != CMD_CMPLT) { 23829 /* Handle the incomplete packet */ 23830 sd_mhd_watch_incomplete(un, pkt); 23831 return (0); 23832 } else if (*((unsigned char *)statusp) != STATUS_GOOD) { 23833 if (*((unsigned char *)statusp) 23834 == STATUS_RESERVATION_CONFLICT) { 23835 /* 23836 * Handle a reservation conflict by panicking if 23837 * configured for failfast or by logging the conflict 23838 * and updating the reservation status 23839 */ 23840 mutex_enter(SD_MUTEX(un)); 23841 if ((un->un_resvd_status & SD_FAILFAST) && 23842 (sd_failfast_enable)) { 23843 sd_panic_for_res_conflict(un); 23844 /*NOTREACHED*/ 23845 } 23846 SD_INFO(SD_LOG_IOCTL_MHD, un, 23847 "sd_mhd_watch_cb: Reservation Conflict\n"); 23848 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 23849 mutex_exit(SD_MUTEX(un)); 23850 } 23851 } 23852 23853 if (sensep != NULL) { 23854 if (actual_sense_length >= (SENSE_LENGTH - 2)) { 23855 mutex_enter(SD_MUTEX(un)); 23856 if ((scsi_sense_asc(sensep) == 23857 SD_SCSI_RESET_SENSE_CODE) && 23858 (un->un_resvd_status & SD_RESERVE)) { 23859 /* 23860 * The additional sense code indicates a power 23861 * on or bus device reset has occurred; update 23862 * the reservation status. 23863 */ 23864 un->un_resvd_status |= 23865 (SD_LOST_RESERVE | SD_WANT_RESERVE); 23866 SD_INFO(SD_LOG_IOCTL_MHD, un, 23867 "sd_mhd_watch_cb: Lost Reservation\n"); 23868 } 23869 } else { 23870 return (0); 23871 } 23872 } else { 23873 mutex_enter(SD_MUTEX(un)); 23874 } 23875 23876 if ((un->un_resvd_status & SD_RESERVE) && 23877 (un->un_resvd_status & SD_LOST_RESERVE)) { 23878 if (un->un_resvd_status & SD_WANT_RESERVE) { 23879 /* 23880 * A reset occurred in between the last probe and this 23881 * one so if a timeout is pending cancel it. 23882 */ 23883 if (un->un_resvd_timeid) { 23884 timeout_id_t temp_id = un->un_resvd_timeid; 23885 un->un_resvd_timeid = NULL; 23886 mutex_exit(SD_MUTEX(un)); 23887 (void) untimeout(temp_id); 23888 mutex_enter(SD_MUTEX(un)); 23889 } 23890 un->un_resvd_status &= ~SD_WANT_RESERVE; 23891 } 23892 if (un->un_resvd_timeid == 0) { 23893 /* Schedule a timeout to handle the lost reservation */ 23894 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover, 23895 (void *)dev, 23896 drv_usectohz(sd_reinstate_resv_delay)); 23897 } 23898 } 23899 mutex_exit(SD_MUTEX(un)); 23900 return (0); 23901 } 23902 23903 23904 /* 23905 * Function: sd_mhd_watch_incomplete() 23906 * 23907 * Description: This function is used to find out why a scsi pkt sent by the 23908 * scsi watch facility was not completed. Under some scenarios this 23909 * routine will return. Otherwise it will send a bus reset to see 23910 * if the drive is still online. 23911 * 23912 * Arguments: un - driver soft state (unit) structure 23913 * pkt - incomplete scsi pkt 23914 */ 23915 23916 static void 23917 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt) 23918 { 23919 int be_chatty; 23920 int perr; 23921 23922 ASSERT(pkt != NULL); 23923 ASSERT(un != NULL); 23924 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT)); 23925 perr = (pkt->pkt_statistics & STAT_PERR); 23926 23927 mutex_enter(SD_MUTEX(un)); 23928 if (un->un_state == SD_STATE_DUMPING) { 23929 mutex_exit(SD_MUTEX(un)); 23930 return; 23931 } 23932 23933 switch (pkt->pkt_reason) { 23934 case CMD_UNX_BUS_FREE: 23935 /* 23936 * If we had a parity error that caused the target to drop BSY*, 23937 * don't be chatty about it. 23938 */ 23939 if (perr && be_chatty) { 23940 be_chatty = 0; 23941 } 23942 break; 23943 case CMD_TAG_REJECT: 23944 /* 23945 * The SCSI-2 spec states that a tag reject will be sent by the 23946 * target if tagged queuing is not supported. A tag reject may 23947 * also be sent during certain initialization periods or to 23948 * control internal resources. For the latter case the target 23949 * may also return Queue Full. 23950 * 23951 * If this driver receives a tag reject from a target that is 23952 * going through an init period or controlling internal 23953 * resources tagged queuing will be disabled. This is a less 23954 * than optimal behavior but the driver is unable to determine 23955 * the target state and assumes tagged queueing is not supported 23956 */ 23957 pkt->pkt_flags = 0; 23958 un->un_tagflags = 0; 23959 23960 if (un->un_f_opt_queueing == TRUE) { 23961 un->un_throttle = min(un->un_throttle, 3); 23962 } else { 23963 un->un_throttle = 1; 23964 } 23965 mutex_exit(SD_MUTEX(un)); 23966 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 23967 mutex_enter(SD_MUTEX(un)); 23968 break; 23969 case CMD_INCOMPLETE: 23970 /* 23971 * The transport stopped with an abnormal state, fallthrough and 23972 * reset the target and/or bus unless selection did not complete 23973 * (indicated by STATE_GOT_BUS) in which case we don't want to 23974 * go through a target/bus reset 23975 */ 23976 if (pkt->pkt_state == STATE_GOT_BUS) { 23977 break; 23978 } 23979 /*FALLTHROUGH*/ 23980 23981 case CMD_TIMEOUT: 23982 default: 23983 /* 23984 * The lun may still be running the command, so a lun reset 23985 * should be attempted. If the lun reset fails or cannot be 23986 * issued, than try a target reset. Lastly try a bus reset. 23987 */ 23988 if ((pkt->pkt_statistics & 23989 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 23990 int reset_retval = 0; 23991 mutex_exit(SD_MUTEX(un)); 23992 if (un->un_f_allow_bus_device_reset == TRUE) { 23993 if (un->un_f_lun_reset_enabled == TRUE) { 23994 reset_retval = 23995 scsi_reset(SD_ADDRESS(un), 23996 RESET_LUN); 23997 } 23998 if (reset_retval == 0) { 23999 reset_retval = 24000 scsi_reset(SD_ADDRESS(un), 24001 RESET_TARGET); 24002 } 24003 } 24004 if (reset_retval == 0) { 24005 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 24006 } 24007 mutex_enter(SD_MUTEX(un)); 24008 } 24009 break; 24010 } 24011 24012 /* A device/bus reset has occurred; update the reservation status. */ 24013 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics & 24014 (STAT_BUS_RESET | STAT_DEV_RESET))) { 24015 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 24016 un->un_resvd_status |= 24017 (SD_LOST_RESERVE | SD_WANT_RESERVE); 24018 SD_INFO(SD_LOG_IOCTL_MHD, un, 24019 "sd_mhd_watch_incomplete: Lost Reservation\n"); 24020 } 24021 } 24022 24023 /* 24024 * The disk has been turned off; Update the device state. 24025 * 24026 * Note: Should we be offlining the disk here? 24027 */ 24028 if (pkt->pkt_state == STATE_GOT_BUS) { 24029 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: " 24030 "Disk not responding to selection\n"); 24031 if (un->un_state != SD_STATE_OFFLINE) { 24032 New_state(un, SD_STATE_OFFLINE); 24033 } 24034 } else if (be_chatty) { 24035 /* 24036 * suppress messages if they are all the same pkt reason; 24037 * with TQ, many (up to 256) are returned with the same 24038 * pkt_reason 24039 */ 24040 if (pkt->pkt_reason != un->un_last_pkt_reason) { 24041 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24042 "sd_mhd_watch_incomplete: " 24043 "SCSI transport failed: reason '%s'\n", 24044 scsi_rname(pkt->pkt_reason)); 24045 } 24046 } 24047 un->un_last_pkt_reason = pkt->pkt_reason; 24048 mutex_exit(SD_MUTEX(un)); 24049 } 24050 24051 24052 /* 24053 * Function: sd_sname() 24054 * 24055 * Description: This is a simple little routine to return a string containing 24056 * a printable description of command status byte for use in 24057 * logging. 24058 * 24059 * Arguments: status - pointer to a status byte 24060 * 24061 * Return Code: char * - string containing status description. 24062 */ 24063 24064 static char * 24065 sd_sname(uchar_t status) 24066 { 24067 switch (status & STATUS_MASK) { 24068 case STATUS_GOOD: 24069 return ("good status"); 24070 case STATUS_CHECK: 24071 return ("check condition"); 24072 case STATUS_MET: 24073 return ("condition met"); 24074 case STATUS_BUSY: 24075 return ("busy"); 24076 case STATUS_INTERMEDIATE: 24077 return ("intermediate"); 24078 case STATUS_INTERMEDIATE_MET: 24079 return ("intermediate - condition met"); 24080 case STATUS_RESERVATION_CONFLICT: 24081 return ("reservation_conflict"); 24082 case STATUS_TERMINATED: 24083 return ("command terminated"); 24084 case STATUS_QFULL: 24085 return ("queue full"); 24086 default: 24087 return ("<unknown status>"); 24088 } 24089 } 24090 24091 24092 /* 24093 * Function: sd_mhd_resvd_recover() 24094 * 24095 * Description: This function adds a reservation entry to the 24096 * sd_resv_reclaim_request list and signals the reservation 24097 * reclaim thread that there is work pending. If the reservation 24098 * reclaim thread has not been previously created this function 24099 * will kick it off. 24100 * 24101 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24102 * among multiple watches that share this callback function 24103 * 24104 * Context: This routine is called by timeout() and is run in interrupt 24105 * context. It must not sleep or call other functions which may 24106 * sleep. 24107 */ 24108 24109 static void 24110 sd_mhd_resvd_recover(void *arg) 24111 { 24112 dev_t dev = (dev_t)arg; 24113 struct sd_lun *un; 24114 struct sd_thr_request *sd_treq = NULL; 24115 struct sd_thr_request *sd_cur = NULL; 24116 struct sd_thr_request *sd_prev = NULL; 24117 int already_there = 0; 24118 24119 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24120 return; 24121 } 24122 24123 mutex_enter(SD_MUTEX(un)); 24124 un->un_resvd_timeid = NULL; 24125 if (un->un_resvd_status & SD_WANT_RESERVE) { 24126 /* 24127 * There was a reset so don't issue the reserve, allow the 24128 * sd_mhd_watch_cb callback function to notice this and 24129 * reschedule the timeout for reservation. 24130 */ 24131 mutex_exit(SD_MUTEX(un)); 24132 return; 24133 } 24134 mutex_exit(SD_MUTEX(un)); 24135 24136 /* 24137 * Add this device to the sd_resv_reclaim_request list and the 24138 * sd_resv_reclaim_thread should take care of the rest. 24139 * 24140 * Note: We can't sleep in this context so if the memory allocation 24141 * fails allow the sd_mhd_watch_cb callback function to notice this and 24142 * reschedule the timeout for reservation. (4378460) 24143 */ 24144 sd_treq = (struct sd_thr_request *) 24145 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP); 24146 if (sd_treq == NULL) { 24147 return; 24148 } 24149 24150 sd_treq->sd_thr_req_next = NULL; 24151 sd_treq->dev = dev; 24152 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24153 if (sd_tr.srq_thr_req_head == NULL) { 24154 sd_tr.srq_thr_req_head = sd_treq; 24155 } else { 24156 sd_cur = sd_prev = sd_tr.srq_thr_req_head; 24157 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) { 24158 if (sd_cur->dev == dev) { 24159 /* 24160 * already in Queue so don't log 24161 * another request for the device 24162 */ 24163 already_there = 1; 24164 break; 24165 } 24166 sd_prev = sd_cur; 24167 } 24168 if (!already_there) { 24169 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: " 24170 "logging request for %lx\n", dev); 24171 sd_prev->sd_thr_req_next = sd_treq; 24172 } else { 24173 kmem_free(sd_treq, sizeof (struct sd_thr_request)); 24174 } 24175 } 24176 24177 /* 24178 * Create a kernel thread to do the reservation reclaim and free up this 24179 * thread. We cannot block this thread while we go away to do the 24180 * reservation reclaim 24181 */ 24182 if (sd_tr.srq_resv_reclaim_thread == NULL) 24183 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0, 24184 sd_resv_reclaim_thread, NULL, 24185 0, &p0, TS_RUN, v.v_maxsyspri - 2); 24186 24187 /* Tell the reservation reclaim thread that it has work to do */ 24188 cv_signal(&sd_tr.srq_resv_reclaim_cv); 24189 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24190 } 24191 24192 /* 24193 * Function: sd_resv_reclaim_thread() 24194 * 24195 * Description: This function implements the reservation reclaim operations 24196 * 24197 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24198 * among multiple watches that share this callback function 24199 */ 24200 24201 static void 24202 sd_resv_reclaim_thread() 24203 { 24204 struct sd_lun *un; 24205 struct sd_thr_request *sd_mhreq; 24206 24207 /* Wait for work */ 24208 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24209 if (sd_tr.srq_thr_req_head == NULL) { 24210 cv_wait(&sd_tr.srq_resv_reclaim_cv, 24211 &sd_tr.srq_resv_reclaim_mutex); 24212 } 24213 24214 /* Loop while we have work */ 24215 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) { 24216 un = ddi_get_soft_state(sd_state, 24217 SDUNIT(sd_tr.srq_thr_cur_req->dev)); 24218 if (un == NULL) { 24219 /* 24220 * softstate structure is NULL so just 24221 * dequeue the request and continue 24222 */ 24223 sd_tr.srq_thr_req_head = 24224 sd_tr.srq_thr_cur_req->sd_thr_req_next; 24225 kmem_free(sd_tr.srq_thr_cur_req, 24226 sizeof (struct sd_thr_request)); 24227 continue; 24228 } 24229 24230 /* dequeue the request */ 24231 sd_mhreq = sd_tr.srq_thr_cur_req; 24232 sd_tr.srq_thr_req_head = 24233 sd_tr.srq_thr_cur_req->sd_thr_req_next; 24234 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24235 24236 /* 24237 * Reclaim reservation only if SD_RESERVE is still set. There 24238 * may have been a call to MHIOCRELEASE before we got here. 24239 */ 24240 mutex_enter(SD_MUTEX(un)); 24241 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 24242 /* 24243 * Note: The SD_LOST_RESERVE flag is cleared before 24244 * reclaiming the reservation. If this is done after the 24245 * call to sd_reserve_release a reservation loss in the 24246 * window between pkt completion of reserve cmd and 24247 * mutex_enter below may not be recognized 24248 */ 24249 un->un_resvd_status &= ~SD_LOST_RESERVE; 24250 mutex_exit(SD_MUTEX(un)); 24251 24252 if (sd_reserve_release(sd_mhreq->dev, 24253 SD_RESERVE) == 0) { 24254 mutex_enter(SD_MUTEX(un)); 24255 un->un_resvd_status |= SD_RESERVE; 24256 mutex_exit(SD_MUTEX(un)); 24257 SD_INFO(SD_LOG_IOCTL_MHD, un, 24258 "sd_resv_reclaim_thread: " 24259 "Reservation Recovered\n"); 24260 } else { 24261 mutex_enter(SD_MUTEX(un)); 24262 un->un_resvd_status |= SD_LOST_RESERVE; 24263 mutex_exit(SD_MUTEX(un)); 24264 SD_INFO(SD_LOG_IOCTL_MHD, un, 24265 "sd_resv_reclaim_thread: Failed " 24266 "Reservation Recovery\n"); 24267 } 24268 } else { 24269 mutex_exit(SD_MUTEX(un)); 24270 } 24271 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24272 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req); 24273 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 24274 sd_mhreq = sd_tr.srq_thr_cur_req = NULL; 24275 /* 24276 * wakeup the destroy thread if anyone is waiting on 24277 * us to complete. 24278 */ 24279 cv_signal(&sd_tr.srq_inprocess_cv); 24280 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24281 "sd_resv_reclaim_thread: cv_signalling current request \n"); 24282 } 24283 24284 /* 24285 * cleanup the sd_tr structure now that this thread will not exist 24286 */ 24287 ASSERT(sd_tr.srq_thr_req_head == NULL); 24288 ASSERT(sd_tr.srq_thr_cur_req == NULL); 24289 sd_tr.srq_resv_reclaim_thread = NULL; 24290 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24291 thread_exit(); 24292 } 24293 24294 24295 /* 24296 * Function: sd_rmv_resv_reclaim_req() 24297 * 24298 * Description: This function removes any pending reservation reclaim requests 24299 * for the specified device. 24300 * 24301 * Arguments: dev - the device 'dev_t' 24302 */ 24303 24304 static void 24305 sd_rmv_resv_reclaim_req(dev_t dev) 24306 { 24307 struct sd_thr_request *sd_mhreq; 24308 struct sd_thr_request *sd_prev; 24309 24310 /* Remove a reservation reclaim request from the list */ 24311 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24312 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) { 24313 /* 24314 * We are attempting to reinstate reservation for 24315 * this device. We wait for sd_reserve_release() 24316 * to return before we return. 24317 */ 24318 cv_wait(&sd_tr.srq_inprocess_cv, 24319 &sd_tr.srq_resv_reclaim_mutex); 24320 } else { 24321 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head; 24322 if (sd_mhreq && sd_mhreq->dev == dev) { 24323 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next; 24324 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 24325 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24326 return; 24327 } 24328 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) { 24329 if (sd_mhreq && sd_mhreq->dev == dev) { 24330 break; 24331 } 24332 sd_prev = sd_mhreq; 24333 } 24334 if (sd_mhreq != NULL) { 24335 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next; 24336 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 24337 } 24338 } 24339 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24340 } 24341 24342 24343 /* 24344 * Function: sd_mhd_reset_notify_cb() 24345 * 24346 * Description: This is a call back function for scsi_reset_notify. This 24347 * function updates the softstate reserved status and logs the 24348 * reset. The driver scsi watch facility callback function 24349 * (sd_mhd_watch_cb) and reservation reclaim thread functionality 24350 * will reclaim the reservation. 24351 * 24352 * Arguments: arg - driver soft state (unit) structure 24353 */ 24354 24355 static void 24356 sd_mhd_reset_notify_cb(caddr_t arg) 24357 { 24358 struct sd_lun *un = (struct sd_lun *)arg; 24359 24360 mutex_enter(SD_MUTEX(un)); 24361 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 24362 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE); 24363 SD_INFO(SD_LOG_IOCTL_MHD, un, 24364 "sd_mhd_reset_notify_cb: Lost Reservation\n"); 24365 } 24366 mutex_exit(SD_MUTEX(un)); 24367 } 24368 24369 24370 /* 24371 * Function: sd_take_ownership() 24372 * 24373 * Description: This routine implements an algorithm to achieve a stable 24374 * reservation on disks which don't implement priority reserve, 24375 * and makes sure that other host lose re-reservation attempts. 24376 * This algorithm contains of a loop that keeps issuing the RESERVE 24377 * for some period of time (min_ownership_delay, default 6 seconds) 24378 * During that loop, it looks to see if there has been a bus device 24379 * reset or bus reset (both of which cause an existing reservation 24380 * to be lost). If the reservation is lost issue RESERVE until a 24381 * period of min_ownership_delay with no resets has gone by, or 24382 * until max_ownership_delay has expired. This loop ensures that 24383 * the host really did manage to reserve the device, in spite of 24384 * resets. The looping for min_ownership_delay (default six 24385 * seconds) is important to early generation clustering products, 24386 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an 24387 * MHIOCENFAILFAST periodic timer of two seconds. By having 24388 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having 24389 * MHIOCENFAILFAST poll every two seconds, the idea is that by the 24390 * time the MHIOCTKOWN ioctl returns, the other host (if any) will 24391 * have already noticed, via the MHIOCENFAILFAST polling, that it 24392 * no longer "owns" the disk and will have panicked itself. Thus, 24393 * the host issuing the MHIOCTKOWN is assured (with timing 24394 * dependencies) that by the time it actually starts to use the 24395 * disk for real work, the old owner is no longer accessing it. 24396 * 24397 * min_ownership_delay is the minimum amount of time for which the 24398 * disk must be reserved continuously devoid of resets before the 24399 * MHIOCTKOWN ioctl will return success. 24400 * 24401 * max_ownership_delay indicates the amount of time by which the 24402 * take ownership should succeed or timeout with an error. 24403 * 24404 * Arguments: dev - the device 'dev_t' 24405 * *p - struct containing timing info. 24406 * 24407 * Return Code: 0 for success or error code 24408 */ 24409 24410 static int 24411 sd_take_ownership(dev_t dev, struct mhioctkown *p) 24412 { 24413 struct sd_lun *un; 24414 int rval; 24415 int err; 24416 int reservation_count = 0; 24417 int min_ownership_delay = 6000000; /* in usec */ 24418 int max_ownership_delay = 30000000; /* in usec */ 24419 clock_t start_time; /* starting time of this algorithm */ 24420 clock_t end_time; /* time limit for giving up */ 24421 clock_t ownership_time; /* time limit for stable ownership */ 24422 clock_t current_time; 24423 clock_t previous_current_time; 24424 24425 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24426 return (ENXIO); 24427 } 24428 24429 /* 24430 * Attempt a device reservation. A priority reservation is requested. 24431 */ 24432 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE)) 24433 != SD_SUCCESS) { 24434 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24435 "sd_take_ownership: return(1)=%d\n", rval); 24436 return (rval); 24437 } 24438 24439 /* Update the softstate reserved status to indicate the reservation */ 24440 mutex_enter(SD_MUTEX(un)); 24441 un->un_resvd_status |= SD_RESERVE; 24442 un->un_resvd_status &= 24443 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT); 24444 mutex_exit(SD_MUTEX(un)); 24445 24446 if (p != NULL) { 24447 if (p->min_ownership_delay != 0) { 24448 min_ownership_delay = p->min_ownership_delay * 1000; 24449 } 24450 if (p->max_ownership_delay != 0) { 24451 max_ownership_delay = p->max_ownership_delay * 1000; 24452 } 24453 } 24454 SD_INFO(SD_LOG_IOCTL_MHD, un, 24455 "sd_take_ownership: min, max delays: %d, %d\n", 24456 min_ownership_delay, max_ownership_delay); 24457 24458 start_time = ddi_get_lbolt(); 24459 current_time = start_time; 24460 ownership_time = current_time + drv_usectohz(min_ownership_delay); 24461 end_time = start_time + drv_usectohz(max_ownership_delay); 24462 24463 while (current_time - end_time < 0) { 24464 delay(drv_usectohz(500000)); 24465 24466 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) { 24467 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) { 24468 mutex_enter(SD_MUTEX(un)); 24469 rval = (un->un_resvd_status & 24470 SD_RESERVATION_CONFLICT) ? EACCES : EIO; 24471 mutex_exit(SD_MUTEX(un)); 24472 break; 24473 } 24474 } 24475 previous_current_time = current_time; 24476 current_time = ddi_get_lbolt(); 24477 mutex_enter(SD_MUTEX(un)); 24478 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) { 24479 ownership_time = ddi_get_lbolt() + 24480 drv_usectohz(min_ownership_delay); 24481 reservation_count = 0; 24482 } else { 24483 reservation_count++; 24484 } 24485 un->un_resvd_status |= SD_RESERVE; 24486 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE); 24487 mutex_exit(SD_MUTEX(un)); 24488 24489 SD_INFO(SD_LOG_IOCTL_MHD, un, 24490 "sd_take_ownership: ticks for loop iteration=%ld, " 24491 "reservation=%s\n", (current_time - previous_current_time), 24492 reservation_count ? "ok" : "reclaimed"); 24493 24494 if (current_time - ownership_time >= 0 && 24495 reservation_count >= 4) { 24496 rval = 0; /* Achieved a stable ownership */ 24497 break; 24498 } 24499 if (current_time - end_time >= 0) { 24500 rval = EACCES; /* No ownership in max possible time */ 24501 break; 24502 } 24503 } 24504 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24505 "sd_take_ownership: return(2)=%d\n", rval); 24506 return (rval); 24507 } 24508 24509 24510 /* 24511 * Function: sd_reserve_release() 24512 * 24513 * Description: This function builds and sends scsi RESERVE, RELEASE, and 24514 * PRIORITY RESERVE commands based on a user specified command type 24515 * 24516 * Arguments: dev - the device 'dev_t' 24517 * cmd - user specified command type; one of SD_PRIORITY_RESERVE, 24518 * SD_RESERVE, SD_RELEASE 24519 * 24520 * Return Code: 0 or Error Code 24521 */ 24522 24523 static int 24524 sd_reserve_release(dev_t dev, int cmd) 24525 { 24526 struct uscsi_cmd *com = NULL; 24527 struct sd_lun *un = NULL; 24528 char cdb[CDB_GROUP0]; 24529 int rval; 24530 24531 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) || 24532 (cmd == SD_PRIORITY_RESERVE)); 24533 24534 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24535 return (ENXIO); 24536 } 24537 24538 /* instantiate and initialize the command and cdb */ 24539 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 24540 bzero(cdb, CDB_GROUP0); 24541 com->uscsi_flags = USCSI_SILENT; 24542 com->uscsi_timeout = un->un_reserve_release_time; 24543 com->uscsi_cdblen = CDB_GROUP0; 24544 com->uscsi_cdb = cdb; 24545 if (cmd == SD_RELEASE) { 24546 cdb[0] = SCMD_RELEASE; 24547 } else { 24548 cdb[0] = SCMD_RESERVE; 24549 } 24550 24551 /* Send the command. */ 24552 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24553 SD_PATH_STANDARD); 24554 24555 /* 24556 * "break" a reservation that is held by another host, by issuing a 24557 * reset if priority reserve is desired, and we could not get the 24558 * device. 24559 */ 24560 if ((cmd == SD_PRIORITY_RESERVE) && 24561 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 24562 /* 24563 * First try to reset the LUN. If we cannot, then try a target 24564 * reset, followed by a bus reset if the target reset fails. 24565 */ 24566 int reset_retval = 0; 24567 if (un->un_f_lun_reset_enabled == TRUE) { 24568 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 24569 } 24570 if (reset_retval == 0) { 24571 /* The LUN reset either failed or was not issued */ 24572 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 24573 } 24574 if ((reset_retval == 0) && 24575 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) { 24576 rval = EIO; 24577 kmem_free(com, sizeof (*com)); 24578 return (rval); 24579 } 24580 24581 bzero(com, sizeof (struct uscsi_cmd)); 24582 com->uscsi_flags = USCSI_SILENT; 24583 com->uscsi_cdb = cdb; 24584 com->uscsi_cdblen = CDB_GROUP0; 24585 com->uscsi_timeout = 5; 24586 24587 /* 24588 * Reissue the last reserve command, this time without request 24589 * sense. Assume that it is just a regular reserve command. 24590 */ 24591 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 24592 SD_PATH_STANDARD); 24593 } 24594 24595 /* Return an error if still getting a reservation conflict. */ 24596 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 24597 rval = EACCES; 24598 } 24599 24600 kmem_free(com, sizeof (*com)); 24601 return (rval); 24602 } 24603 24604 24605 #define SD_NDUMP_RETRIES 12 24606 /* 24607 * System Crash Dump routine 24608 */ 24609 24610 static int 24611 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 24612 { 24613 int instance; 24614 int partition; 24615 int i; 24616 int err; 24617 struct sd_lun *un; 24618 struct scsi_pkt *wr_pktp; 24619 struct buf *wr_bp; 24620 struct buf wr_buf; 24621 daddr_t tgt_byte_offset; /* rmw - byte offset for target */ 24622 daddr_t tgt_blkno; /* rmw - blkno for target */ 24623 size_t tgt_byte_count; /* rmw - # of bytes to xfer */ 24624 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */ 24625 size_t io_start_offset; 24626 int doing_rmw = FALSE; 24627 int rval; 24628 ssize_t dma_resid; 24629 daddr_t oblkno; 24630 diskaddr_t nblks = 0; 24631 diskaddr_t start_block; 24632 24633 instance = SDUNIT(dev); 24634 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 24635 !SD_IS_VALID_LABEL(un) || ISCD(un)) { 24636 return (ENXIO); 24637 } 24638 24639 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 24640 24641 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); 24642 24643 partition = SDPART(dev); 24644 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition); 24645 24646 /* Validate blocks to dump at against partition size. */ 24647 24648 (void) cmlb_partinfo(un->un_cmlbhandle, partition, 24649 &nblks, &start_block, NULL, NULL, (void *)SD_PATH_DIRECT); 24650 24651 if ((blkno + nblk) > nblks) { 24652 SD_TRACE(SD_LOG_DUMP, un, 24653 "sddump: dump range larger than partition: " 24654 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 24655 blkno, nblk, nblks); 24656 return (EINVAL); 24657 } 24658 24659 mutex_enter(&un->un_pm_mutex); 24660 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 24661 struct scsi_pkt *start_pktp; 24662 24663 mutex_exit(&un->un_pm_mutex); 24664 24665 /* 24666 * use pm framework to power on HBA 1st 24667 */ 24668 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 24669 24670 /* 24671 * Dump no long uses sdpower to power on a device, it's 24672 * in-line here so it can be done in polled mode. 24673 */ 24674 24675 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n"); 24676 24677 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL, 24678 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL); 24679 24680 if (start_pktp == NULL) { 24681 /* We were not given a SCSI packet, fail. */ 24682 return (EIO); 24683 } 24684 bzero(start_pktp->pkt_cdbp, CDB_GROUP0); 24685 start_pktp->pkt_cdbp[0] = SCMD_START_STOP; 24686 start_pktp->pkt_cdbp[4] = SD_TARGET_START; 24687 start_pktp->pkt_flags = FLAG_NOINTR; 24688 24689 mutex_enter(SD_MUTEX(un)); 24690 SD_FILL_SCSI1_LUN(un, start_pktp); 24691 mutex_exit(SD_MUTEX(un)); 24692 /* 24693 * Scsi_poll returns 0 (success) if the command completes and 24694 * the status block is STATUS_GOOD. 24695 */ 24696 if (sd_scsi_poll(un, start_pktp) != 0) { 24697 scsi_destroy_pkt(start_pktp); 24698 return (EIO); 24699 } 24700 scsi_destroy_pkt(start_pktp); 24701 (void) sd_ddi_pm_resume(un); 24702 } else { 24703 mutex_exit(&un->un_pm_mutex); 24704 } 24705 24706 mutex_enter(SD_MUTEX(un)); 24707 un->un_throttle = 0; 24708 24709 /* 24710 * The first time through, reset the specific target device. 24711 * However, when cpr calls sddump we know that sd is in a 24712 * a good state so no bus reset is required. 24713 * Clear sense data via Request Sense cmd. 24714 * In sddump we don't care about allow_bus_device_reset anymore 24715 */ 24716 24717 if ((un->un_state != SD_STATE_SUSPENDED) && 24718 (un->un_state != SD_STATE_DUMPING)) { 24719 24720 New_state(un, SD_STATE_DUMPING); 24721 24722 if (un->un_f_is_fibre == FALSE) { 24723 mutex_exit(SD_MUTEX(un)); 24724 /* 24725 * Attempt a bus reset for parallel scsi. 24726 * 24727 * Note: A bus reset is required because on some host 24728 * systems (i.e. E420R) a bus device reset is 24729 * insufficient to reset the state of the target. 24730 * 24731 * Note: Don't issue the reset for fibre-channel, 24732 * because this tends to hang the bus (loop) for 24733 * too long while everyone is logging out and in 24734 * and the deadman timer for dumping will fire 24735 * before the dump is complete. 24736 */ 24737 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) { 24738 mutex_enter(SD_MUTEX(un)); 24739 Restore_state(un); 24740 mutex_exit(SD_MUTEX(un)); 24741 return (EIO); 24742 } 24743 24744 /* Delay to give the device some recovery time. */ 24745 drv_usecwait(10000); 24746 24747 if (sd_send_polled_RQS(un) == SD_FAILURE) { 24748 SD_INFO(SD_LOG_DUMP, un, 24749 "sddump: sd_send_polled_RQS failed\n"); 24750 } 24751 mutex_enter(SD_MUTEX(un)); 24752 } 24753 } 24754 24755 /* 24756 * Convert the partition-relative block number to a 24757 * disk physical block number. 24758 */ 24759 blkno += start_block; 24760 24761 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno); 24762 24763 24764 /* 24765 * Check if the device has a non-512 block size. 24766 */ 24767 wr_bp = NULL; 24768 if (NOT_DEVBSIZE(un)) { 24769 tgt_byte_offset = blkno * un->un_sys_blocksize; 24770 tgt_byte_count = nblk * un->un_sys_blocksize; 24771 if ((tgt_byte_offset % un->un_tgt_blocksize) || 24772 (tgt_byte_count % un->un_tgt_blocksize)) { 24773 doing_rmw = TRUE; 24774 /* 24775 * Calculate the block number and number of block 24776 * in terms of the media block size. 24777 */ 24778 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 24779 tgt_nblk = 24780 ((tgt_byte_offset + tgt_byte_count + 24781 (un->un_tgt_blocksize - 1)) / 24782 un->un_tgt_blocksize) - tgt_blkno; 24783 24784 /* 24785 * Invoke the routine which is going to do read part 24786 * of read-modify-write. 24787 * Note that this routine returns a pointer to 24788 * a valid bp in wr_bp. 24789 */ 24790 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk, 24791 &wr_bp); 24792 if (err) { 24793 mutex_exit(SD_MUTEX(un)); 24794 return (err); 24795 } 24796 /* 24797 * Offset is being calculated as - 24798 * (original block # * system block size) - 24799 * (new block # * target block size) 24800 */ 24801 io_start_offset = 24802 ((uint64_t)(blkno * un->un_sys_blocksize)) - 24803 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize)); 24804 24805 ASSERT((io_start_offset >= 0) && 24806 (io_start_offset < un->un_tgt_blocksize)); 24807 /* 24808 * Do the modify portion of read modify write. 24809 */ 24810 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset], 24811 (size_t)nblk * un->un_sys_blocksize); 24812 } else { 24813 doing_rmw = FALSE; 24814 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 24815 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize; 24816 } 24817 24818 /* Convert blkno and nblk to target blocks */ 24819 blkno = tgt_blkno; 24820 nblk = tgt_nblk; 24821 } else { 24822 wr_bp = &wr_buf; 24823 bzero(wr_bp, sizeof (struct buf)); 24824 wr_bp->b_flags = B_BUSY; 24825 wr_bp->b_un.b_addr = addr; 24826 wr_bp->b_bcount = nblk << DEV_BSHIFT; 24827 wr_bp->b_resid = 0; 24828 } 24829 24830 mutex_exit(SD_MUTEX(un)); 24831 24832 /* 24833 * Obtain a SCSI packet for the write command. 24834 * It should be safe to call the allocator here without 24835 * worrying about being locked for DVMA mapping because 24836 * the address we're passed is already a DVMA mapping 24837 * 24838 * We are also not going to worry about semaphore ownership 24839 * in the dump buffer. Dumping is single threaded at present. 24840 */ 24841 24842 wr_pktp = NULL; 24843 24844 dma_resid = wr_bp->b_bcount; 24845 oblkno = blkno; 24846 24847 while (dma_resid != 0) { 24848 24849 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 24850 wr_bp->b_flags &= ~B_ERROR; 24851 24852 if (un->un_partial_dma_supported == 1) { 24853 blkno = oblkno + 24854 ((wr_bp->b_bcount - dma_resid) / 24855 un->un_tgt_blocksize); 24856 nblk = dma_resid / un->un_tgt_blocksize; 24857 24858 if (wr_pktp) { 24859 /* 24860 * Partial DMA transfers after initial transfer 24861 */ 24862 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp, 24863 blkno, nblk); 24864 } else { 24865 /* Initial transfer */ 24866 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 24867 un->un_pkt_flags, NULL_FUNC, NULL, 24868 blkno, nblk); 24869 } 24870 } else { 24871 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 24872 0, NULL_FUNC, NULL, blkno, nblk); 24873 } 24874 24875 if (rval == 0) { 24876 /* We were given a SCSI packet, continue. */ 24877 break; 24878 } 24879 24880 if (i == 0) { 24881 if (wr_bp->b_flags & B_ERROR) { 24882 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24883 "no resources for dumping; " 24884 "error code: 0x%x, retrying", 24885 geterror(wr_bp)); 24886 } else { 24887 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24888 "no resources for dumping; retrying"); 24889 } 24890 } else if (i != (SD_NDUMP_RETRIES - 1)) { 24891 if (wr_bp->b_flags & B_ERROR) { 24892 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 24893 "no resources for dumping; error code: " 24894 "0x%x, retrying\n", geterror(wr_bp)); 24895 } 24896 } else { 24897 if (wr_bp->b_flags & B_ERROR) { 24898 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 24899 "no resources for dumping; " 24900 "error code: 0x%x, retries failed, " 24901 "giving up.\n", geterror(wr_bp)); 24902 } else { 24903 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 24904 "no resources for dumping; " 24905 "retries failed, giving up.\n"); 24906 } 24907 mutex_enter(SD_MUTEX(un)); 24908 Restore_state(un); 24909 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) { 24910 mutex_exit(SD_MUTEX(un)); 24911 scsi_free_consistent_buf(wr_bp); 24912 } else { 24913 mutex_exit(SD_MUTEX(un)); 24914 } 24915 return (EIO); 24916 } 24917 drv_usecwait(10000); 24918 } 24919 24920 if (un->un_partial_dma_supported == 1) { 24921 /* 24922 * save the resid from PARTIAL_DMA 24923 */ 24924 dma_resid = wr_pktp->pkt_resid; 24925 if (dma_resid != 0) 24926 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid); 24927 wr_pktp->pkt_resid = 0; 24928 } else { 24929 dma_resid = 0; 24930 } 24931 24932 /* SunBug 1222170 */ 24933 wr_pktp->pkt_flags = FLAG_NOINTR; 24934 24935 err = EIO; 24936 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 24937 24938 /* 24939 * Scsi_poll returns 0 (success) if the command completes and 24940 * the status block is STATUS_GOOD. We should only check 24941 * errors if this condition is not true. Even then we should 24942 * send our own request sense packet only if we have a check 24943 * condition and auto request sense has not been performed by 24944 * the hba. 24945 */ 24946 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n"); 24947 24948 if ((sd_scsi_poll(un, wr_pktp) == 0) && 24949 (wr_pktp->pkt_resid == 0)) { 24950 err = SD_SUCCESS; 24951 break; 24952 } 24953 24954 /* 24955 * Check CMD_DEV_GONE 1st, give up if device is gone. 24956 */ 24957 if (wr_pktp->pkt_reason == CMD_DEV_GONE) { 24958 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 24959 "Error while dumping state...Device is gone\n"); 24960 break; 24961 } 24962 24963 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) { 24964 SD_INFO(SD_LOG_DUMP, un, 24965 "sddump: write failed with CHECK, try # %d\n", i); 24966 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) { 24967 (void) sd_send_polled_RQS(un); 24968 } 24969 24970 continue; 24971 } 24972 24973 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) { 24974 int reset_retval = 0; 24975 24976 SD_INFO(SD_LOG_DUMP, un, 24977 "sddump: write failed with BUSY, try # %d\n", i); 24978 24979 if (un->un_f_lun_reset_enabled == TRUE) { 24980 reset_retval = scsi_reset(SD_ADDRESS(un), 24981 RESET_LUN); 24982 } 24983 if (reset_retval == 0) { 24984 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 24985 } 24986 (void) sd_send_polled_RQS(un); 24987 24988 } else { 24989 SD_INFO(SD_LOG_DUMP, un, 24990 "sddump: write failed with 0x%x, try # %d\n", 24991 SD_GET_PKT_STATUS(wr_pktp), i); 24992 mutex_enter(SD_MUTEX(un)); 24993 sd_reset_target(un, wr_pktp); 24994 mutex_exit(SD_MUTEX(un)); 24995 } 24996 24997 /* 24998 * If we are not getting anywhere with lun/target resets, 24999 * let's reset the bus. 25000 */ 25001 if (i == SD_NDUMP_RETRIES/2) { 25002 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 25003 (void) sd_send_polled_RQS(un); 25004 } 25005 } 25006 } 25007 25008 scsi_destroy_pkt(wr_pktp); 25009 mutex_enter(SD_MUTEX(un)); 25010 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) { 25011 mutex_exit(SD_MUTEX(un)); 25012 scsi_free_consistent_buf(wr_bp); 25013 } else { 25014 mutex_exit(SD_MUTEX(un)); 25015 } 25016 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err); 25017 return (err); 25018 } 25019 25020 /* 25021 * Function: sd_scsi_poll() 25022 * 25023 * Description: This is a wrapper for the scsi_poll call. 25024 * 25025 * Arguments: sd_lun - The unit structure 25026 * scsi_pkt - The scsi packet being sent to the device. 25027 * 25028 * Return Code: 0 - Command completed successfully with good status 25029 * -1 - Command failed. This could indicate a check condition 25030 * or other status value requiring recovery action. 25031 * 25032 * NOTE: This code is only called off sddump(). 25033 */ 25034 25035 static int 25036 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp) 25037 { 25038 int status; 25039 25040 ASSERT(un != NULL); 25041 ASSERT(!mutex_owned(SD_MUTEX(un))); 25042 ASSERT(pktp != NULL); 25043 25044 status = SD_SUCCESS; 25045 25046 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) { 25047 pktp->pkt_flags |= un->un_tagflags; 25048 pktp->pkt_flags &= ~FLAG_NODISCON; 25049 } 25050 25051 status = sd_ddi_scsi_poll(pktp); 25052 /* 25053 * Scsi_poll returns 0 (success) if the command completes and the 25054 * status block is STATUS_GOOD. We should only check errors if this 25055 * condition is not true. Even then we should send our own request 25056 * sense packet only if we have a check condition and auto 25057 * request sense has not been performed by the hba. 25058 * Don't get RQS data if pkt_reason is CMD_DEV_GONE. 25059 */ 25060 if ((status != SD_SUCCESS) && 25061 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) && 25062 (pktp->pkt_state & STATE_ARQ_DONE) == 0 && 25063 (pktp->pkt_reason != CMD_DEV_GONE)) 25064 (void) sd_send_polled_RQS(un); 25065 25066 return (status); 25067 } 25068 25069 /* 25070 * Function: sd_send_polled_RQS() 25071 * 25072 * Description: This sends the request sense command to a device. 25073 * 25074 * Arguments: sd_lun - The unit structure 25075 * 25076 * Return Code: 0 - Command completed successfully with good status 25077 * -1 - Command failed. 25078 * 25079 */ 25080 25081 static int 25082 sd_send_polled_RQS(struct sd_lun *un) 25083 { 25084 int ret_val; 25085 struct scsi_pkt *rqs_pktp; 25086 struct buf *rqs_bp; 25087 25088 ASSERT(un != NULL); 25089 ASSERT(!mutex_owned(SD_MUTEX(un))); 25090 25091 ret_val = SD_SUCCESS; 25092 25093 rqs_pktp = un->un_rqs_pktp; 25094 rqs_bp = un->un_rqs_bp; 25095 25096 mutex_enter(SD_MUTEX(un)); 25097 25098 if (un->un_sense_isbusy) { 25099 ret_val = SD_FAILURE; 25100 mutex_exit(SD_MUTEX(un)); 25101 return (ret_val); 25102 } 25103 25104 /* 25105 * If the request sense buffer (and packet) is not in use, 25106 * let's set the un_sense_isbusy and send our packet 25107 */ 25108 un->un_sense_isbusy = 1; 25109 rqs_pktp->pkt_resid = 0; 25110 rqs_pktp->pkt_reason = 0; 25111 rqs_pktp->pkt_flags |= FLAG_NOINTR; 25112 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH); 25113 25114 mutex_exit(SD_MUTEX(un)); 25115 25116 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at" 25117 " 0x%p\n", rqs_bp->b_un.b_addr); 25118 25119 /* 25120 * Can't send this to sd_scsi_poll, we wrap ourselves around the 25121 * axle - it has a call into us! 25122 */ 25123 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) { 25124 SD_INFO(SD_LOG_COMMON, un, 25125 "sd_send_polled_RQS: RQS failed\n"); 25126 } 25127 25128 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:", 25129 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX); 25130 25131 mutex_enter(SD_MUTEX(un)); 25132 un->un_sense_isbusy = 0; 25133 mutex_exit(SD_MUTEX(un)); 25134 25135 return (ret_val); 25136 } 25137 25138 /* 25139 * Defines needed for localized version of the scsi_poll routine. 25140 */ 25141 #define CSEC 10000 /* usecs */ 25142 #define SEC_TO_CSEC (1000000/CSEC) 25143 25144 /* 25145 * Function: sd_ddi_scsi_poll() 25146 * 25147 * Description: Localized version of the scsi_poll routine. The purpose is to 25148 * send a scsi_pkt to a device as a polled command. This version 25149 * is to ensure more robust handling of transport errors. 25150 * Specifically this routine cures not ready, coming ready 25151 * transition for power up and reset of sonoma's. This can take 25152 * up to 45 seconds for power-on and 20 seconds for reset of a 25153 * sonoma lun. 25154 * 25155 * Arguments: scsi_pkt - The scsi_pkt being sent to a device 25156 * 25157 * Return Code: 0 - Command completed successfully with good status 25158 * -1 - Command failed. 25159 * 25160 * NOTE: This code is almost identical to scsi_poll, however before 6668774 can 25161 * be fixed (removing this code), we need to determine how to handle the 25162 * KEY_UNIT_ATTENTION condition below in conditions not as limited as sddump(). 25163 * 25164 * NOTE: This code is only called off sddump(). 25165 */ 25166 static int 25167 sd_ddi_scsi_poll(struct scsi_pkt *pkt) 25168 { 25169 int rval = -1; 25170 int savef; 25171 long savet; 25172 void (*savec)(); 25173 int timeout; 25174 int busy_count; 25175 int poll_delay; 25176 int rc; 25177 uint8_t *sensep; 25178 struct scsi_arq_status *arqstat; 25179 extern int do_polled_io; 25180 25181 ASSERT(pkt->pkt_scbp); 25182 25183 /* 25184 * save old flags.. 25185 */ 25186 savef = pkt->pkt_flags; 25187 savec = pkt->pkt_comp; 25188 savet = pkt->pkt_time; 25189 25190 pkt->pkt_flags |= FLAG_NOINTR; 25191 25192 /* 25193 * XXX there is nothing in the SCSA spec that states that we should not 25194 * do a callback for polled cmds; however, removing this will break sd 25195 * and probably other target drivers 25196 */ 25197 pkt->pkt_comp = NULL; 25198 25199 /* 25200 * we don't like a polled command without timeout. 25201 * 60 seconds seems long enough. 25202 */ 25203 if (pkt->pkt_time == 0) 25204 pkt->pkt_time = SCSI_POLL_TIMEOUT; 25205 25206 /* 25207 * Send polled cmd. 25208 * 25209 * We do some error recovery for various errors. Tran_busy, 25210 * queue full, and non-dispatched commands are retried every 10 msec. 25211 * as they are typically transient failures. Busy status and Not 25212 * Ready are retried every second as this status takes a while to 25213 * change. 25214 */ 25215 timeout = pkt->pkt_time * SEC_TO_CSEC; 25216 25217 for (busy_count = 0; busy_count < timeout; busy_count++) { 25218 /* 25219 * Initialize pkt status variables. 25220 */ 25221 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0; 25222 25223 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) { 25224 if (rc != TRAN_BUSY) { 25225 /* Transport failed - give up. */ 25226 break; 25227 } else { 25228 /* Transport busy - try again. */ 25229 poll_delay = 1 * CSEC; /* 10 msec. */ 25230 } 25231 } else { 25232 /* 25233 * Transport accepted - check pkt status. 25234 */ 25235 rc = (*pkt->pkt_scbp) & STATUS_MASK; 25236 if ((pkt->pkt_reason == CMD_CMPLT) && 25237 (rc == STATUS_CHECK) && 25238 (pkt->pkt_state & STATE_ARQ_DONE)) { 25239 arqstat = 25240 (struct scsi_arq_status *)(pkt->pkt_scbp); 25241 sensep = (uint8_t *)&arqstat->sts_sensedata; 25242 } else { 25243 sensep = NULL; 25244 } 25245 25246 if ((pkt->pkt_reason == CMD_CMPLT) && 25247 (rc == STATUS_GOOD)) { 25248 /* No error - we're done */ 25249 rval = 0; 25250 break; 25251 25252 } else if (pkt->pkt_reason == CMD_DEV_GONE) { 25253 /* Lost connection - give up */ 25254 break; 25255 25256 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) && 25257 (pkt->pkt_state == 0)) { 25258 /* Pkt not dispatched - try again. */ 25259 poll_delay = 1 * CSEC; /* 10 msec. */ 25260 25261 } else if ((pkt->pkt_reason == CMD_CMPLT) && 25262 (rc == STATUS_QFULL)) { 25263 /* Queue full - try again. */ 25264 poll_delay = 1 * CSEC; /* 10 msec. */ 25265 25266 } else if ((pkt->pkt_reason == CMD_CMPLT) && 25267 (rc == STATUS_BUSY)) { 25268 /* Busy - try again. */ 25269 poll_delay = 100 * CSEC; /* 1 sec. */ 25270 busy_count += (SEC_TO_CSEC - 1); 25271 25272 } else if ((sensep != NULL) && 25273 (scsi_sense_key(sensep) == KEY_UNIT_ATTENTION)) { 25274 /* 25275 * Unit Attention - try again. 25276 * Pretend it took 1 sec. 25277 * NOTE: 'continue' avoids poll_delay 25278 */ 25279 busy_count += (SEC_TO_CSEC - 1); 25280 continue; 25281 25282 } else if ((sensep != NULL) && 25283 (scsi_sense_key(sensep) == KEY_NOT_READY) && 25284 (scsi_sense_asc(sensep) == 0x04) && 25285 (scsi_sense_ascq(sensep) == 0x01)) { 25286 /* 25287 * Not ready -> ready - try again. 25288 * 04h/01h: LUN IS IN PROCESS OF BECOMING READY 25289 * ...same as STATUS_BUSY 25290 */ 25291 poll_delay = 100 * CSEC; /* 1 sec. */ 25292 busy_count += (SEC_TO_CSEC - 1); 25293 25294 } else { 25295 /* BAD status - give up. */ 25296 break; 25297 } 25298 } 25299 25300 if (((curthread->t_flag & T_INTR_THREAD) == 0) && 25301 !do_polled_io) { 25302 delay(drv_usectohz(poll_delay)); 25303 } else { 25304 /* we busy wait during cpr_dump or interrupt threads */ 25305 drv_usecwait(poll_delay); 25306 } 25307 } 25308 25309 pkt->pkt_flags = savef; 25310 pkt->pkt_comp = savec; 25311 pkt->pkt_time = savet; 25312 25313 /* return on error */ 25314 if (rval) 25315 return (rval); 25316 25317 /* 25318 * This is not a performance critical code path. 25319 * 25320 * As an accommodation for scsi_poll callers, to avoid ddi_dma_sync() 25321 * issues associated with looking at DMA memory prior to 25322 * scsi_pkt_destroy(), we scsi_sync_pkt() prior to return. 25323 */ 25324 scsi_sync_pkt(pkt); 25325 return (0); 25326 } 25327 25328 25329 25330 /* 25331 * Function: sd_persistent_reservation_in_read_keys 25332 * 25333 * Description: This routine is the driver entry point for handling CD-ROM 25334 * multi-host persistent reservation requests (MHIOCGRP_INKEYS) 25335 * by sending the SCSI-3 PRIN commands to the device. 25336 * Processes the read keys command response by copying the 25337 * reservation key information into the user provided buffer. 25338 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented. 25339 * 25340 * Arguments: un - Pointer to soft state struct for the target. 25341 * usrp - user provided pointer to multihost Persistent In Read 25342 * Keys structure (mhioc_inkeys_t) 25343 * flag - this argument is a pass through to ddi_copyxxx() 25344 * directly from the mode argument of ioctl(). 25345 * 25346 * Return Code: 0 - Success 25347 * EACCES 25348 * ENOTSUP 25349 * errno return code from sd_send_scsi_cmd() 25350 * 25351 * Context: Can sleep. Does not return until command is completed. 25352 */ 25353 25354 static int 25355 sd_persistent_reservation_in_read_keys(struct sd_lun *un, 25356 mhioc_inkeys_t *usrp, int flag) 25357 { 25358 #ifdef _MULTI_DATAMODEL 25359 struct mhioc_key_list32 li32; 25360 #endif 25361 sd_prin_readkeys_t *in; 25362 mhioc_inkeys_t *ptr; 25363 mhioc_key_list_t li; 25364 uchar_t *data_bufp; 25365 int data_len; 25366 int rval = 0; 25367 size_t copysz; 25368 sd_ssc_t *ssc; 25369 25370 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) { 25371 return (EINVAL); 25372 } 25373 bzero(&li, sizeof (mhioc_key_list_t)); 25374 25375 ssc = sd_ssc_init(un); 25376 25377 /* 25378 * Get the listsize from user 25379 */ 25380 #ifdef _MULTI_DATAMODEL 25381 25382 switch (ddi_model_convert_from(flag & FMODELS)) { 25383 case DDI_MODEL_ILP32: 25384 copysz = sizeof (struct mhioc_key_list32); 25385 if (ddi_copyin(ptr->li, &li32, copysz, flag)) { 25386 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25387 "sd_persistent_reservation_in_read_keys: " 25388 "failed ddi_copyin: mhioc_key_list32_t\n"); 25389 rval = EFAULT; 25390 goto done; 25391 } 25392 li.listsize = li32.listsize; 25393 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list; 25394 break; 25395 25396 case DDI_MODEL_NONE: 25397 copysz = sizeof (mhioc_key_list_t); 25398 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 25399 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25400 "sd_persistent_reservation_in_read_keys: " 25401 "failed ddi_copyin: mhioc_key_list_t\n"); 25402 rval = EFAULT; 25403 goto done; 25404 } 25405 break; 25406 } 25407 25408 #else /* ! _MULTI_DATAMODEL */ 25409 copysz = sizeof (mhioc_key_list_t); 25410 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 25411 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25412 "sd_persistent_reservation_in_read_keys: " 25413 "failed ddi_copyin: mhioc_key_list_t\n"); 25414 rval = EFAULT; 25415 goto done; 25416 } 25417 #endif 25418 25419 data_len = li.listsize * MHIOC_RESV_KEY_SIZE; 25420 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t)); 25421 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 25422 25423 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS, 25424 data_len, data_bufp); 25425 if (rval != 0) { 25426 if (rval == EIO) 25427 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 25428 else 25429 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 25430 goto done; 25431 } 25432 in = (sd_prin_readkeys_t *)data_bufp; 25433 ptr->generation = BE_32(in->generation); 25434 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE; 25435 25436 /* 25437 * Return the min(listsize, listlen) keys 25438 */ 25439 #ifdef _MULTI_DATAMODEL 25440 25441 switch (ddi_model_convert_from(flag & FMODELS)) { 25442 case DDI_MODEL_ILP32: 25443 li32.listlen = li.listlen; 25444 if (ddi_copyout(&li32, ptr->li, copysz, flag)) { 25445 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25446 "sd_persistent_reservation_in_read_keys: " 25447 "failed ddi_copyout: mhioc_key_list32_t\n"); 25448 rval = EFAULT; 25449 goto done; 25450 } 25451 break; 25452 25453 case DDI_MODEL_NONE: 25454 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 25455 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25456 "sd_persistent_reservation_in_read_keys: " 25457 "failed ddi_copyout: mhioc_key_list_t\n"); 25458 rval = EFAULT; 25459 goto done; 25460 } 25461 break; 25462 } 25463 25464 #else /* ! _MULTI_DATAMODEL */ 25465 25466 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 25467 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25468 "sd_persistent_reservation_in_read_keys: " 25469 "failed ddi_copyout: mhioc_key_list_t\n"); 25470 rval = EFAULT; 25471 goto done; 25472 } 25473 25474 #endif /* _MULTI_DATAMODEL */ 25475 25476 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE, 25477 li.listsize * MHIOC_RESV_KEY_SIZE); 25478 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) { 25479 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25480 "sd_persistent_reservation_in_read_keys: " 25481 "failed ddi_copyout: keylist\n"); 25482 rval = EFAULT; 25483 } 25484 done: 25485 sd_ssc_fini(ssc); 25486 kmem_free(data_bufp, data_len); 25487 return (rval); 25488 } 25489 25490 25491 /* 25492 * Function: sd_persistent_reservation_in_read_resv 25493 * 25494 * Description: This routine is the driver entry point for handling CD-ROM 25495 * multi-host persistent reservation requests (MHIOCGRP_INRESV) 25496 * by sending the SCSI-3 PRIN commands to the device. 25497 * Process the read persistent reservations command response by 25498 * copying the reservation information into the user provided 25499 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented. 25500 * 25501 * Arguments: un - Pointer to soft state struct for the target. 25502 * usrp - user provided pointer to multihost Persistent In Read 25503 * Keys structure (mhioc_inkeys_t) 25504 * flag - this argument is a pass through to ddi_copyxxx() 25505 * directly from the mode argument of ioctl(). 25506 * 25507 * Return Code: 0 - Success 25508 * EACCES 25509 * ENOTSUP 25510 * errno return code from sd_send_scsi_cmd() 25511 * 25512 * Context: Can sleep. Does not return until command is completed. 25513 */ 25514 25515 static int 25516 sd_persistent_reservation_in_read_resv(struct sd_lun *un, 25517 mhioc_inresvs_t *usrp, int flag) 25518 { 25519 #ifdef _MULTI_DATAMODEL 25520 struct mhioc_resv_desc_list32 resvlist32; 25521 #endif 25522 sd_prin_readresv_t *in; 25523 mhioc_inresvs_t *ptr; 25524 sd_readresv_desc_t *readresv_ptr; 25525 mhioc_resv_desc_list_t resvlist; 25526 mhioc_resv_desc_t resvdesc; 25527 uchar_t *data_bufp = NULL; 25528 int data_len; 25529 int rval = 0; 25530 int i; 25531 size_t copysz; 25532 mhioc_resv_desc_t *bufp; 25533 sd_ssc_t *ssc; 25534 25535 if ((ptr = usrp) == NULL) { 25536 return (EINVAL); 25537 } 25538 25539 ssc = sd_ssc_init(un); 25540 25541 /* 25542 * Get the listsize from user 25543 */ 25544 #ifdef _MULTI_DATAMODEL 25545 switch (ddi_model_convert_from(flag & FMODELS)) { 25546 case DDI_MODEL_ILP32: 25547 copysz = sizeof (struct mhioc_resv_desc_list32); 25548 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) { 25549 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25550 "sd_persistent_reservation_in_read_resv: " 25551 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 25552 rval = EFAULT; 25553 goto done; 25554 } 25555 resvlist.listsize = resvlist32.listsize; 25556 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list; 25557 break; 25558 25559 case DDI_MODEL_NONE: 25560 copysz = sizeof (mhioc_resv_desc_list_t); 25561 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 25562 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25563 "sd_persistent_reservation_in_read_resv: " 25564 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 25565 rval = EFAULT; 25566 goto done; 25567 } 25568 break; 25569 } 25570 #else /* ! _MULTI_DATAMODEL */ 25571 copysz = sizeof (mhioc_resv_desc_list_t); 25572 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 25573 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25574 "sd_persistent_reservation_in_read_resv: " 25575 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 25576 rval = EFAULT; 25577 goto done; 25578 } 25579 #endif /* ! _MULTI_DATAMODEL */ 25580 25581 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN; 25582 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t)); 25583 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 25584 25585 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_RESV, 25586 data_len, data_bufp); 25587 if (rval != 0) { 25588 if (rval == EIO) 25589 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE); 25590 else 25591 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 25592 goto done; 25593 } 25594 in = (sd_prin_readresv_t *)data_bufp; 25595 ptr->generation = BE_32(in->generation); 25596 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN; 25597 25598 /* 25599 * Return the min(listsize, listlen( keys 25600 */ 25601 #ifdef _MULTI_DATAMODEL 25602 25603 switch (ddi_model_convert_from(flag & FMODELS)) { 25604 case DDI_MODEL_ILP32: 25605 resvlist32.listlen = resvlist.listlen; 25606 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) { 25607 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25608 "sd_persistent_reservation_in_read_resv: " 25609 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 25610 rval = EFAULT; 25611 goto done; 25612 } 25613 break; 25614 25615 case DDI_MODEL_NONE: 25616 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 25617 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25618 "sd_persistent_reservation_in_read_resv: " 25619 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 25620 rval = EFAULT; 25621 goto done; 25622 } 25623 break; 25624 } 25625 25626 #else /* ! _MULTI_DATAMODEL */ 25627 25628 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 25629 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25630 "sd_persistent_reservation_in_read_resv: " 25631 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 25632 rval = EFAULT; 25633 goto done; 25634 } 25635 25636 #endif /* ! _MULTI_DATAMODEL */ 25637 25638 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc; 25639 bufp = resvlist.list; 25640 copysz = sizeof (mhioc_resv_desc_t); 25641 for (i = 0; i < min(resvlist.listlen, resvlist.listsize); 25642 i++, readresv_ptr++, bufp++) { 25643 25644 bcopy(&readresv_ptr->resvkey, &resvdesc.key, 25645 MHIOC_RESV_KEY_SIZE); 25646 resvdesc.type = readresv_ptr->type; 25647 resvdesc.scope = readresv_ptr->scope; 25648 resvdesc.scope_specific_addr = 25649 BE_32(readresv_ptr->scope_specific_addr); 25650 25651 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) { 25652 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25653 "sd_persistent_reservation_in_read_resv: " 25654 "failed ddi_copyout: resvlist\n"); 25655 rval = EFAULT; 25656 goto done; 25657 } 25658 } 25659 done: 25660 sd_ssc_fini(ssc); 25661 /* only if data_bufp is allocated, we need to free it */ 25662 if (data_bufp) { 25663 kmem_free(data_bufp, data_len); 25664 } 25665 return (rval); 25666 } 25667 25668 25669 /* 25670 * Function: sr_change_blkmode() 25671 * 25672 * Description: This routine is the driver entry point for handling CD-ROM 25673 * block mode ioctl requests. Support for returning and changing 25674 * the current block size in use by the device is implemented. The 25675 * LBA size is changed via a MODE SELECT Block Descriptor. 25676 * 25677 * This routine issues a mode sense with an allocation length of 25678 * 12 bytes for the mode page header and a single block descriptor. 25679 * 25680 * Arguments: dev - the device 'dev_t' 25681 * cmd - the request type; one of CDROMGBLKMODE (get) or 25682 * CDROMSBLKMODE (set) 25683 * data - current block size or requested block size 25684 * flag - this argument is a pass through to ddi_copyxxx() directly 25685 * from the mode argument of ioctl(). 25686 * 25687 * Return Code: the code returned by sd_send_scsi_cmd() 25688 * EINVAL if invalid arguments are provided 25689 * EFAULT if ddi_copyxxx() fails 25690 * ENXIO if fail ddi_get_soft_state 25691 * EIO if invalid mode sense block descriptor length 25692 * 25693 */ 25694 25695 static int 25696 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag) 25697 { 25698 struct sd_lun *un = NULL; 25699 struct mode_header *sense_mhp, *select_mhp; 25700 struct block_descriptor *sense_desc, *select_desc; 25701 int current_bsize; 25702 int rval = EINVAL; 25703 uchar_t *sense = NULL; 25704 uchar_t *select = NULL; 25705 sd_ssc_t *ssc; 25706 25707 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE)); 25708 25709 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25710 return (ENXIO); 25711 } 25712 25713 /* 25714 * The block length is changed via the Mode Select block descriptor, the 25715 * "Read/Write Error Recovery" mode page (0x1) contents are not actually 25716 * required as part of this routine. Therefore the mode sense allocation 25717 * length is specified to be the length of a mode page header and a 25718 * block descriptor. 25719 */ 25720 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 25721 25722 ssc = sd_ssc_init(un); 25723 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 25724 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD); 25725 sd_ssc_fini(ssc); 25726 if (rval != 0) { 25727 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25728 "sr_change_blkmode: Mode Sense Failed\n"); 25729 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 25730 return (rval); 25731 } 25732 25733 /* Check the block descriptor len to handle only 1 block descriptor */ 25734 sense_mhp = (struct mode_header *)sense; 25735 if ((sense_mhp->bdesc_length == 0) || 25736 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) { 25737 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25738 "sr_change_blkmode: Mode Sense returned invalid block" 25739 " descriptor length\n"); 25740 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 25741 return (EIO); 25742 } 25743 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH); 25744 current_bsize = ((sense_desc->blksize_hi << 16) | 25745 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo); 25746 25747 /* Process command */ 25748 switch (cmd) { 25749 case CDROMGBLKMODE: 25750 /* Return the block size obtained during the mode sense */ 25751 if (ddi_copyout(¤t_bsize, (void *)data, 25752 sizeof (int), flag) != 0) 25753 rval = EFAULT; 25754 break; 25755 case CDROMSBLKMODE: 25756 /* Validate the requested block size */ 25757 switch (data) { 25758 case CDROM_BLK_512: 25759 case CDROM_BLK_1024: 25760 case CDROM_BLK_2048: 25761 case CDROM_BLK_2056: 25762 case CDROM_BLK_2336: 25763 case CDROM_BLK_2340: 25764 case CDROM_BLK_2352: 25765 case CDROM_BLK_2368: 25766 case CDROM_BLK_2448: 25767 case CDROM_BLK_2646: 25768 case CDROM_BLK_2647: 25769 break; 25770 default: 25771 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25772 "sr_change_blkmode: " 25773 "Block Size '%ld' Not Supported\n", data); 25774 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 25775 return (EINVAL); 25776 } 25777 25778 /* 25779 * The current block size matches the requested block size so 25780 * there is no need to send the mode select to change the size 25781 */ 25782 if (current_bsize == data) { 25783 break; 25784 } 25785 25786 /* Build the select data for the requested block size */ 25787 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 25788 select_mhp = (struct mode_header *)select; 25789 select_desc = 25790 (struct block_descriptor *)(select + MODE_HEADER_LENGTH); 25791 /* 25792 * The LBA size is changed via the block descriptor, so the 25793 * descriptor is built according to the user data 25794 */ 25795 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH; 25796 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16); 25797 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8); 25798 select_desc->blksize_lo = (char)((data) & 0x000000ff); 25799 25800 /* Send the mode select for the requested block size */ 25801 ssc = sd_ssc_init(un); 25802 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, 25803 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 25804 SD_PATH_STANDARD); 25805 sd_ssc_fini(ssc); 25806 if (rval != 0) { 25807 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25808 "sr_change_blkmode: Mode Select Failed\n"); 25809 /* 25810 * The mode select failed for the requested block size, 25811 * so reset the data for the original block size and 25812 * send it to the target. The error is indicated by the 25813 * return value for the failed mode select. 25814 */ 25815 select_desc->blksize_hi = sense_desc->blksize_hi; 25816 select_desc->blksize_mid = sense_desc->blksize_mid; 25817 select_desc->blksize_lo = sense_desc->blksize_lo; 25818 ssc = sd_ssc_init(un); 25819 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, 25820 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 25821 SD_PATH_STANDARD); 25822 sd_ssc_fini(ssc); 25823 } else { 25824 ASSERT(!mutex_owned(SD_MUTEX(un))); 25825 mutex_enter(SD_MUTEX(un)); 25826 sd_update_block_info(un, (uint32_t)data, 0); 25827 mutex_exit(SD_MUTEX(un)); 25828 } 25829 break; 25830 default: 25831 /* should not reach here, but check anyway */ 25832 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25833 "sr_change_blkmode: Command '%x' Not Supported\n", cmd); 25834 rval = EINVAL; 25835 break; 25836 } 25837 25838 if (select) { 25839 kmem_free(select, BUFLEN_CHG_BLK_MODE); 25840 } 25841 if (sense) { 25842 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 25843 } 25844 return (rval); 25845 } 25846 25847 25848 /* 25849 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines 25850 * implement driver support for getting and setting the CD speed. The command 25851 * set used will be based on the device type. If the device has not been 25852 * identified as MMC the Toshiba vendor specific mode page will be used. If 25853 * the device is MMC but does not support the Real Time Streaming feature 25854 * the SET CD SPEED command will be used to set speed and mode page 0x2A will 25855 * be used to read the speed. 25856 */ 25857 25858 /* 25859 * Function: sr_change_speed() 25860 * 25861 * Description: This routine is the driver entry point for handling CD-ROM 25862 * drive speed ioctl requests for devices supporting the Toshiba 25863 * vendor specific drive speed mode page. Support for returning 25864 * and changing the current drive speed in use by the device is 25865 * implemented. 25866 * 25867 * Arguments: dev - the device 'dev_t' 25868 * cmd - the request type; one of CDROMGDRVSPEED (get) or 25869 * CDROMSDRVSPEED (set) 25870 * data - current drive speed or requested drive speed 25871 * flag - this argument is a pass through to ddi_copyxxx() directly 25872 * from the mode argument of ioctl(). 25873 * 25874 * Return Code: the code returned by sd_send_scsi_cmd() 25875 * EINVAL if invalid arguments are provided 25876 * EFAULT if ddi_copyxxx() fails 25877 * ENXIO if fail ddi_get_soft_state 25878 * EIO if invalid mode sense block descriptor length 25879 */ 25880 25881 static int 25882 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 25883 { 25884 struct sd_lun *un = NULL; 25885 struct mode_header *sense_mhp, *select_mhp; 25886 struct mode_speed *sense_page, *select_page; 25887 int current_speed; 25888 int rval = EINVAL; 25889 int bd_len; 25890 uchar_t *sense = NULL; 25891 uchar_t *select = NULL; 25892 sd_ssc_t *ssc; 25893 25894 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 25895 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25896 return (ENXIO); 25897 } 25898 25899 /* 25900 * Note: The drive speed is being modified here according to a Toshiba 25901 * vendor specific mode page (0x31). 25902 */ 25903 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 25904 25905 ssc = sd_ssc_init(un); 25906 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 25907 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED, 25908 SD_PATH_STANDARD); 25909 sd_ssc_fini(ssc); 25910 if (rval != 0) { 25911 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25912 "sr_change_speed: Mode Sense Failed\n"); 25913 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 25914 return (rval); 25915 } 25916 sense_mhp = (struct mode_header *)sense; 25917 25918 /* Check the block descriptor len to handle only 1 block descriptor */ 25919 bd_len = sense_mhp->bdesc_length; 25920 if (bd_len > MODE_BLK_DESC_LENGTH) { 25921 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25922 "sr_change_speed: Mode Sense returned invalid block " 25923 "descriptor length\n"); 25924 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 25925 return (EIO); 25926 } 25927 25928 sense_page = (struct mode_speed *) 25929 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 25930 current_speed = sense_page->speed; 25931 25932 /* Process command */ 25933 switch (cmd) { 25934 case CDROMGDRVSPEED: 25935 /* Return the drive speed obtained during the mode sense */ 25936 if (current_speed == 0x2) { 25937 current_speed = CDROM_TWELVE_SPEED; 25938 } 25939 if (ddi_copyout(¤t_speed, (void *)data, 25940 sizeof (int), flag) != 0) { 25941 rval = EFAULT; 25942 } 25943 break; 25944 case CDROMSDRVSPEED: 25945 /* Validate the requested drive speed */ 25946 switch ((uchar_t)data) { 25947 case CDROM_TWELVE_SPEED: 25948 data = 0x2; 25949 /*FALLTHROUGH*/ 25950 case CDROM_NORMAL_SPEED: 25951 case CDROM_DOUBLE_SPEED: 25952 case CDROM_QUAD_SPEED: 25953 case CDROM_MAXIMUM_SPEED: 25954 break; 25955 default: 25956 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25957 "sr_change_speed: " 25958 "Drive Speed '%d' Not Supported\n", (uchar_t)data); 25959 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 25960 return (EINVAL); 25961 } 25962 25963 /* 25964 * The current drive speed matches the requested drive speed so 25965 * there is no need to send the mode select to change the speed 25966 */ 25967 if (current_speed == data) { 25968 break; 25969 } 25970 25971 /* Build the select data for the requested drive speed */ 25972 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 25973 select_mhp = (struct mode_header *)select; 25974 select_mhp->bdesc_length = 0; 25975 select_page = 25976 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 25977 select_page = 25978 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 25979 select_page->mode_page.code = CDROM_MODE_SPEED; 25980 select_page->mode_page.length = 2; 25981 select_page->speed = (uchar_t)data; 25982 25983 /* Send the mode select for the requested block size */ 25984 ssc = sd_ssc_init(un); 25985 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 25986 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 25987 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 25988 sd_ssc_fini(ssc); 25989 if (rval != 0) { 25990 /* 25991 * The mode select failed for the requested drive speed, 25992 * so reset the data for the original drive speed and 25993 * send it to the target. The error is indicated by the 25994 * return value for the failed mode select. 25995 */ 25996 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25997 "sr_drive_speed: Mode Select Failed\n"); 25998 select_page->speed = sense_page->speed; 25999 ssc = sd_ssc_init(un); 26000 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 26001 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 26002 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 26003 sd_ssc_fini(ssc); 26004 } 26005 break; 26006 default: 26007 /* should not reach here, but check anyway */ 26008 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26009 "sr_change_speed: Command '%x' Not Supported\n", cmd); 26010 rval = EINVAL; 26011 break; 26012 } 26013 26014 if (select) { 26015 kmem_free(select, BUFLEN_MODE_CDROM_SPEED); 26016 } 26017 if (sense) { 26018 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26019 } 26020 26021 return (rval); 26022 } 26023 26024 26025 /* 26026 * Function: sr_atapi_change_speed() 26027 * 26028 * Description: This routine is the driver entry point for handling CD-ROM 26029 * drive speed ioctl requests for MMC devices that do not support 26030 * the Real Time Streaming feature (0x107). 26031 * 26032 * Note: This routine will use the SET SPEED command which may not 26033 * be supported by all devices. 26034 * 26035 * Arguments: dev- the device 'dev_t' 26036 * cmd- the request type; one of CDROMGDRVSPEED (get) or 26037 * CDROMSDRVSPEED (set) 26038 * data- current drive speed or requested drive speed 26039 * flag- this argument is a pass through to ddi_copyxxx() directly 26040 * from the mode argument of ioctl(). 26041 * 26042 * Return Code: the code returned by sd_send_scsi_cmd() 26043 * EINVAL if invalid arguments are provided 26044 * EFAULT if ddi_copyxxx() fails 26045 * ENXIO if fail ddi_get_soft_state 26046 * EIO if invalid mode sense block descriptor length 26047 */ 26048 26049 static int 26050 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 26051 { 26052 struct sd_lun *un; 26053 struct uscsi_cmd *com = NULL; 26054 struct mode_header_grp2 *sense_mhp; 26055 uchar_t *sense_page; 26056 uchar_t *sense = NULL; 26057 char cdb[CDB_GROUP5]; 26058 int bd_len; 26059 int current_speed = 0; 26060 int max_speed = 0; 26061 int rval; 26062 sd_ssc_t *ssc; 26063 26064 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 26065 26066 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26067 return (ENXIO); 26068 } 26069 26070 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 26071 26072 ssc = sd_ssc_init(un); 26073 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, 26074 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, 26075 SD_PATH_STANDARD); 26076 sd_ssc_fini(ssc); 26077 if (rval != 0) { 26078 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26079 "sr_atapi_change_speed: Mode Sense Failed\n"); 26080 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26081 return (rval); 26082 } 26083 26084 /* Check the block descriptor len to handle only 1 block descriptor */ 26085 sense_mhp = (struct mode_header_grp2 *)sense; 26086 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 26087 if (bd_len > MODE_BLK_DESC_LENGTH) { 26088 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26089 "sr_atapi_change_speed: Mode Sense returned invalid " 26090 "block descriptor length\n"); 26091 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26092 return (EIO); 26093 } 26094 26095 /* Calculate the current and maximum drive speeds */ 26096 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 26097 current_speed = (sense_page[14] << 8) | sense_page[15]; 26098 max_speed = (sense_page[8] << 8) | sense_page[9]; 26099 26100 /* Process the command */ 26101 switch (cmd) { 26102 case CDROMGDRVSPEED: 26103 current_speed /= SD_SPEED_1X; 26104 if (ddi_copyout(¤t_speed, (void *)data, 26105 sizeof (int), flag) != 0) 26106 rval = EFAULT; 26107 break; 26108 case CDROMSDRVSPEED: 26109 /* Convert the speed code to KB/sec */ 26110 switch ((uchar_t)data) { 26111 case CDROM_NORMAL_SPEED: 26112 current_speed = SD_SPEED_1X; 26113 break; 26114 case CDROM_DOUBLE_SPEED: 26115 current_speed = 2 * SD_SPEED_1X; 26116 break; 26117 case CDROM_QUAD_SPEED: 26118 current_speed = 4 * SD_SPEED_1X; 26119 break; 26120 case CDROM_TWELVE_SPEED: 26121 current_speed = 12 * SD_SPEED_1X; 26122 break; 26123 case CDROM_MAXIMUM_SPEED: 26124 current_speed = 0xffff; 26125 break; 26126 default: 26127 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26128 "sr_atapi_change_speed: invalid drive speed %d\n", 26129 (uchar_t)data); 26130 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26131 return (EINVAL); 26132 } 26133 26134 /* Check the request against the drive's max speed. */ 26135 if (current_speed != 0xffff) { 26136 if (current_speed > max_speed) { 26137 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26138 return (EINVAL); 26139 } 26140 } 26141 26142 /* 26143 * Build and send the SET SPEED command 26144 * 26145 * Note: The SET SPEED (0xBB) command used in this routine is 26146 * obsolete per the SCSI MMC spec but still supported in the 26147 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 26148 * therefore the command is still implemented in this routine. 26149 */ 26150 bzero(cdb, sizeof (cdb)); 26151 cdb[0] = (char)SCMD_SET_CDROM_SPEED; 26152 cdb[2] = (uchar_t)(current_speed >> 8); 26153 cdb[3] = (uchar_t)current_speed; 26154 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26155 com->uscsi_cdb = (caddr_t)cdb; 26156 com->uscsi_cdblen = CDB_GROUP5; 26157 com->uscsi_bufaddr = NULL; 26158 com->uscsi_buflen = 0; 26159 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26160 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, 0, SD_PATH_STANDARD); 26161 break; 26162 default: 26163 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26164 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd); 26165 rval = EINVAL; 26166 } 26167 26168 if (sense) { 26169 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26170 } 26171 if (com) { 26172 kmem_free(com, sizeof (*com)); 26173 } 26174 return (rval); 26175 } 26176 26177 26178 /* 26179 * Function: sr_pause_resume() 26180 * 26181 * Description: This routine is the driver entry point for handling CD-ROM 26182 * pause/resume ioctl requests. This only affects the audio play 26183 * operation. 26184 * 26185 * Arguments: dev - the device 'dev_t' 26186 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used 26187 * for setting the resume bit of the cdb. 26188 * 26189 * Return Code: the code returned by sd_send_scsi_cmd() 26190 * EINVAL if invalid mode specified 26191 * 26192 */ 26193 26194 static int 26195 sr_pause_resume(dev_t dev, int cmd) 26196 { 26197 struct sd_lun *un; 26198 struct uscsi_cmd *com; 26199 char cdb[CDB_GROUP1]; 26200 int rval; 26201 26202 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26203 return (ENXIO); 26204 } 26205 26206 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26207 bzero(cdb, CDB_GROUP1); 26208 cdb[0] = SCMD_PAUSE_RESUME; 26209 switch (cmd) { 26210 case CDROMRESUME: 26211 cdb[8] = 1; 26212 break; 26213 case CDROMPAUSE: 26214 cdb[8] = 0; 26215 break; 26216 default: 26217 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:" 26218 " Command '%x' Not Supported\n", cmd); 26219 rval = EINVAL; 26220 goto done; 26221 } 26222 26223 com->uscsi_cdb = cdb; 26224 com->uscsi_cdblen = CDB_GROUP1; 26225 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26226 26227 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26228 SD_PATH_STANDARD); 26229 26230 done: 26231 kmem_free(com, sizeof (*com)); 26232 return (rval); 26233 } 26234 26235 26236 /* 26237 * Function: sr_play_msf() 26238 * 26239 * Description: This routine is the driver entry point for handling CD-ROM 26240 * ioctl requests to output the audio signals at the specified 26241 * starting address and continue the audio play until the specified 26242 * ending address (CDROMPLAYMSF) The address is in Minute Second 26243 * Frame (MSF) format. 26244 * 26245 * Arguments: dev - the device 'dev_t' 26246 * data - pointer to user provided audio msf structure, 26247 * specifying start/end addresses. 26248 * flag - this argument is a pass through to ddi_copyxxx() 26249 * directly from the mode argument of ioctl(). 26250 * 26251 * Return Code: the code returned by sd_send_scsi_cmd() 26252 * EFAULT if ddi_copyxxx() fails 26253 * ENXIO if fail ddi_get_soft_state 26254 * EINVAL if data pointer is NULL 26255 */ 26256 26257 static int 26258 sr_play_msf(dev_t dev, caddr_t data, int flag) 26259 { 26260 struct sd_lun *un; 26261 struct uscsi_cmd *com; 26262 struct cdrom_msf msf_struct; 26263 struct cdrom_msf *msf = &msf_struct; 26264 char cdb[CDB_GROUP1]; 26265 int rval; 26266 26267 if (data == NULL) { 26268 return (EINVAL); 26269 } 26270 26271 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26272 return (ENXIO); 26273 } 26274 26275 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) { 26276 return (EFAULT); 26277 } 26278 26279 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26280 bzero(cdb, CDB_GROUP1); 26281 cdb[0] = SCMD_PLAYAUDIO_MSF; 26282 if (un->un_f_cfg_playmsf_bcd == TRUE) { 26283 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0); 26284 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0); 26285 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0); 26286 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1); 26287 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1); 26288 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1); 26289 } else { 26290 cdb[3] = msf->cdmsf_min0; 26291 cdb[4] = msf->cdmsf_sec0; 26292 cdb[5] = msf->cdmsf_frame0; 26293 cdb[6] = msf->cdmsf_min1; 26294 cdb[7] = msf->cdmsf_sec1; 26295 cdb[8] = msf->cdmsf_frame1; 26296 } 26297 com->uscsi_cdb = cdb; 26298 com->uscsi_cdblen = CDB_GROUP1; 26299 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26300 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26301 SD_PATH_STANDARD); 26302 kmem_free(com, sizeof (*com)); 26303 return (rval); 26304 } 26305 26306 26307 /* 26308 * Function: sr_play_trkind() 26309 * 26310 * Description: This routine is the driver entry point for handling CD-ROM 26311 * ioctl requests to output the audio signals at the specified 26312 * starting address and continue the audio play until the specified 26313 * ending address (CDROMPLAYTRKIND). The address is in Track Index 26314 * format. 26315 * 26316 * Arguments: dev - the device 'dev_t' 26317 * data - pointer to user provided audio track/index structure, 26318 * specifying start/end addresses. 26319 * flag - this argument is a pass through to ddi_copyxxx() 26320 * directly from the mode argument of ioctl(). 26321 * 26322 * Return Code: the code returned by sd_send_scsi_cmd() 26323 * EFAULT if ddi_copyxxx() fails 26324 * ENXIO if fail ddi_get_soft_state 26325 * EINVAL if data pointer is NULL 26326 */ 26327 26328 static int 26329 sr_play_trkind(dev_t dev, caddr_t data, int flag) 26330 { 26331 struct cdrom_ti ti_struct; 26332 struct cdrom_ti *ti = &ti_struct; 26333 struct uscsi_cmd *com = NULL; 26334 char cdb[CDB_GROUP1]; 26335 int rval; 26336 26337 if (data == NULL) { 26338 return (EINVAL); 26339 } 26340 26341 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) { 26342 return (EFAULT); 26343 } 26344 26345 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26346 bzero(cdb, CDB_GROUP1); 26347 cdb[0] = SCMD_PLAYAUDIO_TI; 26348 cdb[4] = ti->cdti_trk0; 26349 cdb[5] = ti->cdti_ind0; 26350 cdb[7] = ti->cdti_trk1; 26351 cdb[8] = ti->cdti_ind1; 26352 com->uscsi_cdb = cdb; 26353 com->uscsi_cdblen = CDB_GROUP1; 26354 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26355 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26356 SD_PATH_STANDARD); 26357 kmem_free(com, sizeof (*com)); 26358 return (rval); 26359 } 26360 26361 26362 /* 26363 * Function: sr_read_all_subcodes() 26364 * 26365 * Description: This routine is the driver entry point for handling CD-ROM 26366 * ioctl requests to return raw subcode data while the target is 26367 * playing audio (CDROMSUBCODE). 26368 * 26369 * Arguments: dev - the device 'dev_t' 26370 * data - pointer to user provided cdrom subcode structure, 26371 * specifying the transfer length and address. 26372 * flag - this argument is a pass through to ddi_copyxxx() 26373 * directly from the mode argument of ioctl(). 26374 * 26375 * Return Code: the code returned by sd_send_scsi_cmd() 26376 * EFAULT if ddi_copyxxx() fails 26377 * ENXIO if fail ddi_get_soft_state 26378 * EINVAL if data pointer is NULL 26379 */ 26380 26381 static int 26382 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag) 26383 { 26384 struct sd_lun *un = NULL; 26385 struct uscsi_cmd *com = NULL; 26386 struct cdrom_subcode *subcode = NULL; 26387 int rval; 26388 size_t buflen; 26389 char cdb[CDB_GROUP5]; 26390 26391 #ifdef _MULTI_DATAMODEL 26392 /* To support ILP32 applications in an LP64 world */ 26393 struct cdrom_subcode32 cdrom_subcode32; 26394 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32; 26395 #endif 26396 if (data == NULL) { 26397 return (EINVAL); 26398 } 26399 26400 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26401 return (ENXIO); 26402 } 26403 26404 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP); 26405 26406 #ifdef _MULTI_DATAMODEL 26407 switch (ddi_model_convert_from(flag & FMODELS)) { 26408 case DDI_MODEL_ILP32: 26409 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) { 26410 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26411 "sr_read_all_subcodes: ddi_copyin Failed\n"); 26412 kmem_free(subcode, sizeof (struct cdrom_subcode)); 26413 return (EFAULT); 26414 } 26415 /* Convert the ILP32 uscsi data from the application to LP64 */ 26416 cdrom_subcode32tocdrom_subcode(cdsc32, subcode); 26417 break; 26418 case DDI_MODEL_NONE: 26419 if (ddi_copyin(data, subcode, 26420 sizeof (struct cdrom_subcode), flag)) { 26421 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26422 "sr_read_all_subcodes: ddi_copyin Failed\n"); 26423 kmem_free(subcode, sizeof (struct cdrom_subcode)); 26424 return (EFAULT); 26425 } 26426 break; 26427 } 26428 #else /* ! _MULTI_DATAMODEL */ 26429 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) { 26430 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26431 "sr_read_all_subcodes: ddi_copyin Failed\n"); 26432 kmem_free(subcode, sizeof (struct cdrom_subcode)); 26433 return (EFAULT); 26434 } 26435 #endif /* _MULTI_DATAMODEL */ 26436 26437 /* 26438 * Since MMC-2 expects max 3 bytes for length, check if the 26439 * length input is greater than 3 bytes 26440 */ 26441 if ((subcode->cdsc_length & 0xFF000000) != 0) { 26442 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26443 "sr_read_all_subcodes: " 26444 "cdrom transfer length too large: %d (limit %d)\n", 26445 subcode->cdsc_length, 0xFFFFFF); 26446 kmem_free(subcode, sizeof (struct cdrom_subcode)); 26447 return (EINVAL); 26448 } 26449 26450 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length; 26451 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26452 bzero(cdb, CDB_GROUP5); 26453 26454 if (un->un_f_mmc_cap == TRUE) { 26455 cdb[0] = (char)SCMD_READ_CD; 26456 cdb[2] = (char)0xff; 26457 cdb[3] = (char)0xff; 26458 cdb[4] = (char)0xff; 26459 cdb[5] = (char)0xff; 26460 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 26461 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 26462 cdb[8] = ((subcode->cdsc_length) & 0x000000ff); 26463 cdb[10] = 1; 26464 } else { 26465 /* 26466 * Note: A vendor specific command (0xDF) is being used her to 26467 * request a read of all subcodes. 26468 */ 26469 cdb[0] = (char)SCMD_READ_ALL_SUBCODES; 26470 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24); 26471 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 26472 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 26473 cdb[9] = ((subcode->cdsc_length) & 0x000000ff); 26474 } 26475 com->uscsi_cdb = cdb; 26476 com->uscsi_cdblen = CDB_GROUP5; 26477 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr; 26478 com->uscsi_buflen = buflen; 26479 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26480 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 26481 SD_PATH_STANDARD); 26482 kmem_free(subcode, sizeof (struct cdrom_subcode)); 26483 kmem_free(com, sizeof (*com)); 26484 return (rval); 26485 } 26486 26487 26488 /* 26489 * Function: sr_read_subchannel() 26490 * 26491 * Description: This routine is the driver entry point for handling CD-ROM 26492 * ioctl requests to return the Q sub-channel data of the CD 26493 * current position block. (CDROMSUBCHNL) The data includes the 26494 * track number, index number, absolute CD-ROM address (LBA or MSF 26495 * format per the user) , track relative CD-ROM address (LBA or MSF 26496 * format per the user), control data and audio status. 26497 * 26498 * Arguments: dev - the device 'dev_t' 26499 * data - pointer to user provided cdrom sub-channel structure 26500 * flag - this argument is a pass through to ddi_copyxxx() 26501 * directly from the mode argument of ioctl(). 26502 * 26503 * Return Code: the code returned by sd_send_scsi_cmd() 26504 * EFAULT if ddi_copyxxx() fails 26505 * ENXIO if fail ddi_get_soft_state 26506 * EINVAL if data pointer is NULL 26507 */ 26508 26509 static int 26510 sr_read_subchannel(dev_t dev, caddr_t data, int flag) 26511 { 26512 struct sd_lun *un; 26513 struct uscsi_cmd *com; 26514 struct cdrom_subchnl subchanel; 26515 struct cdrom_subchnl *subchnl = &subchanel; 26516 char cdb[CDB_GROUP1]; 26517 caddr_t buffer; 26518 int rval; 26519 26520 if (data == NULL) { 26521 return (EINVAL); 26522 } 26523 26524 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26525 (un->un_state == SD_STATE_OFFLINE)) { 26526 return (ENXIO); 26527 } 26528 26529 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) { 26530 return (EFAULT); 26531 } 26532 26533 buffer = kmem_zalloc((size_t)16, KM_SLEEP); 26534 bzero(cdb, CDB_GROUP1); 26535 cdb[0] = SCMD_READ_SUBCHANNEL; 26536 /* Set the MSF bit based on the user requested address format */ 26537 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02; 26538 /* 26539 * Set the Q bit in byte 2 to indicate that Q sub-channel data be 26540 * returned 26541 */ 26542 cdb[2] = 0x40; 26543 /* 26544 * Set byte 3 to specify the return data format. A value of 0x01 26545 * indicates that the CD-ROM current position should be returned. 26546 */ 26547 cdb[3] = 0x01; 26548 cdb[8] = 0x10; 26549 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26550 com->uscsi_cdb = cdb; 26551 com->uscsi_cdblen = CDB_GROUP1; 26552 com->uscsi_bufaddr = buffer; 26553 com->uscsi_buflen = 16; 26554 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26555 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26556 SD_PATH_STANDARD); 26557 if (rval != 0) { 26558 kmem_free(buffer, 16); 26559 kmem_free(com, sizeof (*com)); 26560 return (rval); 26561 } 26562 26563 /* Process the returned Q sub-channel data */ 26564 subchnl->cdsc_audiostatus = buffer[1]; 26565 subchnl->cdsc_adr = (buffer[5] & 0xF0); 26566 subchnl->cdsc_ctrl = (buffer[5] & 0x0F); 26567 subchnl->cdsc_trk = buffer[6]; 26568 subchnl->cdsc_ind = buffer[7]; 26569 if (subchnl->cdsc_format & CDROM_LBA) { 26570 subchnl->cdsc_absaddr.lba = 26571 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 26572 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 26573 subchnl->cdsc_reladdr.lba = 26574 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) + 26575 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]); 26576 } else if (un->un_f_cfg_readsub_bcd == TRUE) { 26577 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]); 26578 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]); 26579 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]); 26580 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]); 26581 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]); 26582 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]); 26583 } else { 26584 subchnl->cdsc_absaddr.msf.minute = buffer[9]; 26585 subchnl->cdsc_absaddr.msf.second = buffer[10]; 26586 subchnl->cdsc_absaddr.msf.frame = buffer[11]; 26587 subchnl->cdsc_reladdr.msf.minute = buffer[13]; 26588 subchnl->cdsc_reladdr.msf.second = buffer[14]; 26589 subchnl->cdsc_reladdr.msf.frame = buffer[15]; 26590 } 26591 kmem_free(buffer, 16); 26592 kmem_free(com, sizeof (*com)); 26593 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag) 26594 != 0) { 26595 return (EFAULT); 26596 } 26597 return (rval); 26598 } 26599 26600 26601 /* 26602 * Function: sr_read_tocentry() 26603 * 26604 * Description: This routine is the driver entry point for handling CD-ROM 26605 * ioctl requests to read from the Table of Contents (TOC) 26606 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL 26607 * fields, the starting address (LBA or MSF format per the user) 26608 * and the data mode if the user specified track is a data track. 26609 * 26610 * Note: The READ HEADER (0x44) command used in this routine is 26611 * obsolete per the SCSI MMC spec but still supported in the 26612 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 26613 * therefore the command is still implemented in this routine. 26614 * 26615 * Arguments: dev - the device 'dev_t' 26616 * data - pointer to user provided toc entry structure, 26617 * specifying the track # and the address format 26618 * (LBA or MSF). 26619 * flag - this argument is a pass through to ddi_copyxxx() 26620 * directly from the mode argument of ioctl(). 26621 * 26622 * Return Code: the code returned by sd_send_scsi_cmd() 26623 * EFAULT if ddi_copyxxx() fails 26624 * ENXIO if fail ddi_get_soft_state 26625 * EINVAL if data pointer is NULL 26626 */ 26627 26628 static int 26629 sr_read_tocentry(dev_t dev, caddr_t data, int flag) 26630 { 26631 struct sd_lun *un = NULL; 26632 struct uscsi_cmd *com; 26633 struct cdrom_tocentry toc_entry; 26634 struct cdrom_tocentry *entry = &toc_entry; 26635 caddr_t buffer; 26636 int rval; 26637 char cdb[CDB_GROUP1]; 26638 26639 if (data == NULL) { 26640 return (EINVAL); 26641 } 26642 26643 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26644 (un->un_state == SD_STATE_OFFLINE)) { 26645 return (ENXIO); 26646 } 26647 26648 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) { 26649 return (EFAULT); 26650 } 26651 26652 /* Validate the requested track and address format */ 26653 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) { 26654 return (EINVAL); 26655 } 26656 26657 if (entry->cdte_track == 0) { 26658 return (EINVAL); 26659 } 26660 26661 buffer = kmem_zalloc((size_t)12, KM_SLEEP); 26662 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26663 bzero(cdb, CDB_GROUP1); 26664 26665 cdb[0] = SCMD_READ_TOC; 26666 /* Set the MSF bit based on the user requested address format */ 26667 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2); 26668 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 26669 cdb[6] = BYTE_TO_BCD(entry->cdte_track); 26670 } else { 26671 cdb[6] = entry->cdte_track; 26672 } 26673 26674 /* 26675 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 26676 * (4 byte TOC response header + 8 byte track descriptor) 26677 */ 26678 cdb[8] = 12; 26679 com->uscsi_cdb = cdb; 26680 com->uscsi_cdblen = CDB_GROUP1; 26681 com->uscsi_bufaddr = buffer; 26682 com->uscsi_buflen = 0x0C; 26683 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ); 26684 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26685 SD_PATH_STANDARD); 26686 if (rval != 0) { 26687 kmem_free(buffer, 12); 26688 kmem_free(com, sizeof (*com)); 26689 return (rval); 26690 } 26691 26692 /* Process the toc entry */ 26693 entry->cdte_adr = (buffer[5] & 0xF0) >> 4; 26694 entry->cdte_ctrl = (buffer[5] & 0x0F); 26695 if (entry->cdte_format & CDROM_LBA) { 26696 entry->cdte_addr.lba = 26697 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 26698 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 26699 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) { 26700 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]); 26701 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]); 26702 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]); 26703 /* 26704 * Send a READ TOC command using the LBA address format to get 26705 * the LBA for the track requested so it can be used in the 26706 * READ HEADER request 26707 * 26708 * Note: The MSF bit of the READ HEADER command specifies the 26709 * output format. The block address specified in that command 26710 * must be in LBA format. 26711 */ 26712 cdb[1] = 0; 26713 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26714 SD_PATH_STANDARD); 26715 if (rval != 0) { 26716 kmem_free(buffer, 12); 26717 kmem_free(com, sizeof (*com)); 26718 return (rval); 26719 } 26720 } else { 26721 entry->cdte_addr.msf.minute = buffer[9]; 26722 entry->cdte_addr.msf.second = buffer[10]; 26723 entry->cdte_addr.msf.frame = buffer[11]; 26724 /* 26725 * Send a READ TOC command using the LBA address format to get 26726 * the LBA for the track requested so it can be used in the 26727 * READ HEADER request 26728 * 26729 * Note: The MSF bit of the READ HEADER command specifies the 26730 * output format. The block address specified in that command 26731 * must be in LBA format. 26732 */ 26733 cdb[1] = 0; 26734 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26735 SD_PATH_STANDARD); 26736 if (rval != 0) { 26737 kmem_free(buffer, 12); 26738 kmem_free(com, sizeof (*com)); 26739 return (rval); 26740 } 26741 } 26742 26743 /* 26744 * Build and send the READ HEADER command to determine the data mode of 26745 * the user specified track. 26746 */ 26747 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) && 26748 (entry->cdte_track != CDROM_LEADOUT)) { 26749 bzero(cdb, CDB_GROUP1); 26750 cdb[0] = SCMD_READ_HEADER; 26751 cdb[2] = buffer[8]; 26752 cdb[3] = buffer[9]; 26753 cdb[4] = buffer[10]; 26754 cdb[5] = buffer[11]; 26755 cdb[8] = 0x08; 26756 com->uscsi_buflen = 0x08; 26757 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26758 SD_PATH_STANDARD); 26759 if (rval == 0) { 26760 entry->cdte_datamode = buffer[0]; 26761 } else { 26762 /* 26763 * READ HEADER command failed, since this is 26764 * obsoleted in one spec, its better to return 26765 * -1 for an invlid track so that we can still 26766 * receive the rest of the TOC data. 26767 */ 26768 entry->cdte_datamode = (uchar_t)-1; 26769 } 26770 } else { 26771 entry->cdte_datamode = (uchar_t)-1; 26772 } 26773 26774 kmem_free(buffer, 12); 26775 kmem_free(com, sizeof (*com)); 26776 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0) 26777 return (EFAULT); 26778 26779 return (rval); 26780 } 26781 26782 26783 /* 26784 * Function: sr_read_tochdr() 26785 * 26786 * Description: This routine is the driver entry point for handling CD-ROM 26787 * ioctl requests to read the Table of Contents (TOC) header 26788 * (CDROMREADTOHDR). The TOC header consists of the disk starting 26789 * and ending track numbers 26790 * 26791 * Arguments: dev - the device 'dev_t' 26792 * data - pointer to user provided toc header structure, 26793 * specifying the starting and ending track numbers. 26794 * flag - this argument is a pass through to ddi_copyxxx() 26795 * directly from the mode argument of ioctl(). 26796 * 26797 * Return Code: the code returned by sd_send_scsi_cmd() 26798 * EFAULT if ddi_copyxxx() fails 26799 * ENXIO if fail ddi_get_soft_state 26800 * EINVAL if data pointer is NULL 26801 */ 26802 26803 static int 26804 sr_read_tochdr(dev_t dev, caddr_t data, int flag) 26805 { 26806 struct sd_lun *un; 26807 struct uscsi_cmd *com; 26808 struct cdrom_tochdr toc_header; 26809 struct cdrom_tochdr *hdr = &toc_header; 26810 char cdb[CDB_GROUP1]; 26811 int rval; 26812 caddr_t buffer; 26813 26814 if (data == NULL) { 26815 return (EINVAL); 26816 } 26817 26818 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26819 (un->un_state == SD_STATE_OFFLINE)) { 26820 return (ENXIO); 26821 } 26822 26823 buffer = kmem_zalloc(4, KM_SLEEP); 26824 bzero(cdb, CDB_GROUP1); 26825 cdb[0] = SCMD_READ_TOC; 26826 /* 26827 * Specifying a track number of 0x00 in the READ TOC command indicates 26828 * that the TOC header should be returned 26829 */ 26830 cdb[6] = 0x00; 26831 /* 26832 * Bytes 7 & 8 are the 4 byte allocation length for TOC header. 26833 * (2 byte data len + 1 byte starting track # + 1 byte ending track #) 26834 */ 26835 cdb[8] = 0x04; 26836 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26837 com->uscsi_cdb = cdb; 26838 com->uscsi_cdblen = CDB_GROUP1; 26839 com->uscsi_bufaddr = buffer; 26840 com->uscsi_buflen = 0x04; 26841 com->uscsi_timeout = 300; 26842 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 26843 26844 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 26845 SD_PATH_STANDARD); 26846 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 26847 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]); 26848 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]); 26849 } else { 26850 hdr->cdth_trk0 = buffer[2]; 26851 hdr->cdth_trk1 = buffer[3]; 26852 } 26853 kmem_free(buffer, 4); 26854 kmem_free(com, sizeof (*com)); 26855 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) { 26856 return (EFAULT); 26857 } 26858 return (rval); 26859 } 26860 26861 26862 /* 26863 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(), 26864 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for 26865 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data, 26866 * digital audio and extended architecture digital audio. These modes are 26867 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3 26868 * MMC specs. 26869 * 26870 * In addition to support for the various data formats these routines also 26871 * include support for devices that implement only the direct access READ 26872 * commands (0x08, 0x28), devices that implement the READ_CD commands 26873 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and 26874 * READ CDXA commands (0xD8, 0xDB) 26875 */ 26876 26877 /* 26878 * Function: sr_read_mode1() 26879 * 26880 * Description: This routine is the driver entry point for handling CD-ROM 26881 * ioctl read mode1 requests (CDROMREADMODE1). 26882 * 26883 * Arguments: dev - the device 'dev_t' 26884 * data - pointer to user provided cd read structure specifying 26885 * the lba buffer address and length. 26886 * flag - this argument is a pass through to ddi_copyxxx() 26887 * directly from the mode argument of ioctl(). 26888 * 26889 * Return Code: the code returned by sd_send_scsi_cmd() 26890 * EFAULT if ddi_copyxxx() fails 26891 * ENXIO if fail ddi_get_soft_state 26892 * EINVAL if data pointer is NULL 26893 */ 26894 26895 static int 26896 sr_read_mode1(dev_t dev, caddr_t data, int flag) 26897 { 26898 struct sd_lun *un; 26899 struct cdrom_read mode1_struct; 26900 struct cdrom_read *mode1 = &mode1_struct; 26901 int rval; 26902 sd_ssc_t *ssc; 26903 26904 #ifdef _MULTI_DATAMODEL 26905 /* To support ILP32 applications in an LP64 world */ 26906 struct cdrom_read32 cdrom_read32; 26907 struct cdrom_read32 *cdrd32 = &cdrom_read32; 26908 #endif /* _MULTI_DATAMODEL */ 26909 26910 if (data == NULL) { 26911 return (EINVAL); 26912 } 26913 26914 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26915 (un->un_state == SD_STATE_OFFLINE)) { 26916 return (ENXIO); 26917 } 26918 26919 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 26920 "sd_read_mode1: entry: un:0x%p\n", un); 26921 26922 #ifdef _MULTI_DATAMODEL 26923 switch (ddi_model_convert_from(flag & FMODELS)) { 26924 case DDI_MODEL_ILP32: 26925 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 26926 return (EFAULT); 26927 } 26928 /* Convert the ILP32 uscsi data from the application to LP64 */ 26929 cdrom_read32tocdrom_read(cdrd32, mode1); 26930 break; 26931 case DDI_MODEL_NONE: 26932 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 26933 return (EFAULT); 26934 } 26935 } 26936 #else /* ! _MULTI_DATAMODEL */ 26937 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 26938 return (EFAULT); 26939 } 26940 #endif /* _MULTI_DATAMODEL */ 26941 26942 ssc = sd_ssc_init(un); 26943 rval = sd_send_scsi_READ(ssc, mode1->cdread_bufaddr, 26944 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD); 26945 sd_ssc_fini(ssc); 26946 26947 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 26948 "sd_read_mode1: exit: un:0x%p\n", un); 26949 26950 return (rval); 26951 } 26952 26953 26954 /* 26955 * Function: sr_read_cd_mode2() 26956 * 26957 * Description: This routine is the driver entry point for handling CD-ROM 26958 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 26959 * support the READ CD (0xBE) command or the 1st generation 26960 * READ CD (0xD4) command. 26961 * 26962 * Arguments: dev - the device 'dev_t' 26963 * data - pointer to user provided cd read structure specifying 26964 * the lba buffer address and length. 26965 * flag - this argument is a pass through to ddi_copyxxx() 26966 * directly from the mode argument of ioctl(). 26967 * 26968 * Return Code: the code returned by sd_send_scsi_cmd() 26969 * EFAULT if ddi_copyxxx() fails 26970 * ENXIO if fail ddi_get_soft_state 26971 * EINVAL if data pointer is NULL 26972 */ 26973 26974 static int 26975 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag) 26976 { 26977 struct sd_lun *un; 26978 struct uscsi_cmd *com; 26979 struct cdrom_read mode2_struct; 26980 struct cdrom_read *mode2 = &mode2_struct; 26981 uchar_t cdb[CDB_GROUP5]; 26982 int nblocks; 26983 int rval; 26984 #ifdef _MULTI_DATAMODEL 26985 /* To support ILP32 applications in an LP64 world */ 26986 struct cdrom_read32 cdrom_read32; 26987 struct cdrom_read32 *cdrd32 = &cdrom_read32; 26988 #endif /* _MULTI_DATAMODEL */ 26989 26990 if (data == NULL) { 26991 return (EINVAL); 26992 } 26993 26994 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 26995 (un->un_state == SD_STATE_OFFLINE)) { 26996 return (ENXIO); 26997 } 26998 26999 #ifdef _MULTI_DATAMODEL 27000 switch (ddi_model_convert_from(flag & FMODELS)) { 27001 case DDI_MODEL_ILP32: 27002 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 27003 return (EFAULT); 27004 } 27005 /* Convert the ILP32 uscsi data from the application to LP64 */ 27006 cdrom_read32tocdrom_read(cdrd32, mode2); 27007 break; 27008 case DDI_MODEL_NONE: 27009 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 27010 return (EFAULT); 27011 } 27012 break; 27013 } 27014 27015 #else /* ! _MULTI_DATAMODEL */ 27016 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 27017 return (EFAULT); 27018 } 27019 #endif /* _MULTI_DATAMODEL */ 27020 27021 bzero(cdb, sizeof (cdb)); 27022 if (un->un_f_cfg_read_cd_xd4 == TRUE) { 27023 /* Read command supported by 1st generation atapi drives */ 27024 cdb[0] = SCMD_READ_CDD4; 27025 } else { 27026 /* Universal CD Access Command */ 27027 cdb[0] = SCMD_READ_CD; 27028 } 27029 27030 /* 27031 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book 27032 */ 27033 cdb[1] = CDROM_SECTOR_TYPE_MODE2; 27034 27035 /* set the start address */ 27036 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF); 27037 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF); 27038 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 27039 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF); 27040 27041 /* set the transfer length */ 27042 nblocks = mode2->cdread_buflen / 2336; 27043 cdb[6] = (uchar_t)(nblocks >> 16); 27044 cdb[7] = (uchar_t)(nblocks >> 8); 27045 cdb[8] = (uchar_t)nblocks; 27046 27047 /* set the filter bits */ 27048 cdb[9] = CDROM_READ_CD_USERDATA; 27049 27050 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27051 com->uscsi_cdb = (caddr_t)cdb; 27052 com->uscsi_cdblen = sizeof (cdb); 27053 com->uscsi_bufaddr = mode2->cdread_bufaddr; 27054 com->uscsi_buflen = mode2->cdread_buflen; 27055 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27056 27057 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27058 SD_PATH_STANDARD); 27059 kmem_free(com, sizeof (*com)); 27060 return (rval); 27061 } 27062 27063 27064 /* 27065 * Function: sr_read_mode2() 27066 * 27067 * Description: This routine is the driver entry point for handling CD-ROM 27068 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 27069 * do not support the READ CD (0xBE) command. 27070 * 27071 * Arguments: dev - the device 'dev_t' 27072 * data - pointer to user provided cd read structure specifying 27073 * the lba buffer address and length. 27074 * flag - this argument is a pass through to ddi_copyxxx() 27075 * directly from the mode argument of ioctl(). 27076 * 27077 * Return Code: the code returned by sd_send_scsi_cmd() 27078 * EFAULT if ddi_copyxxx() fails 27079 * ENXIO if fail ddi_get_soft_state 27080 * EINVAL if data pointer is NULL 27081 * EIO if fail to reset block size 27082 * EAGAIN if commands are in progress in the driver 27083 */ 27084 27085 static int 27086 sr_read_mode2(dev_t dev, caddr_t data, int flag) 27087 { 27088 struct sd_lun *un; 27089 struct cdrom_read mode2_struct; 27090 struct cdrom_read *mode2 = &mode2_struct; 27091 int rval; 27092 uint32_t restore_blksize; 27093 struct uscsi_cmd *com; 27094 uchar_t cdb[CDB_GROUP0]; 27095 int nblocks; 27096 27097 #ifdef _MULTI_DATAMODEL 27098 /* To support ILP32 applications in an LP64 world */ 27099 struct cdrom_read32 cdrom_read32; 27100 struct cdrom_read32 *cdrd32 = &cdrom_read32; 27101 #endif /* _MULTI_DATAMODEL */ 27102 27103 if (data == NULL) { 27104 return (EINVAL); 27105 } 27106 27107 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27108 (un->un_state == SD_STATE_OFFLINE)) { 27109 return (ENXIO); 27110 } 27111 27112 /* 27113 * Because this routine will update the device and driver block size 27114 * being used we want to make sure there are no commands in progress. 27115 * If commands are in progress the user will have to try again. 27116 * 27117 * We check for 1 instead of 0 because we increment un_ncmds_in_driver 27118 * in sdioctl to protect commands from sdioctl through to the top of 27119 * sd_uscsi_strategy. See sdioctl for details. 27120 */ 27121 mutex_enter(SD_MUTEX(un)); 27122 if (un->un_ncmds_in_driver != 1) { 27123 mutex_exit(SD_MUTEX(un)); 27124 return (EAGAIN); 27125 } 27126 mutex_exit(SD_MUTEX(un)); 27127 27128 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27129 "sd_read_mode2: entry: un:0x%p\n", un); 27130 27131 #ifdef _MULTI_DATAMODEL 27132 switch (ddi_model_convert_from(flag & FMODELS)) { 27133 case DDI_MODEL_ILP32: 27134 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 27135 return (EFAULT); 27136 } 27137 /* Convert the ILP32 uscsi data from the application to LP64 */ 27138 cdrom_read32tocdrom_read(cdrd32, mode2); 27139 break; 27140 case DDI_MODEL_NONE: 27141 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 27142 return (EFAULT); 27143 } 27144 break; 27145 } 27146 #else /* ! _MULTI_DATAMODEL */ 27147 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) { 27148 return (EFAULT); 27149 } 27150 #endif /* _MULTI_DATAMODEL */ 27151 27152 /* Store the current target block size for restoration later */ 27153 restore_blksize = un->un_tgt_blocksize; 27154 27155 /* Change the device and soft state target block size to 2336 */ 27156 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) { 27157 rval = EIO; 27158 goto done; 27159 } 27160 27161 27162 bzero(cdb, sizeof (cdb)); 27163 27164 /* set READ operation */ 27165 cdb[0] = SCMD_READ; 27166 27167 /* adjust lba for 2kbyte blocks from 512 byte blocks */ 27168 mode2->cdread_lba >>= 2; 27169 27170 /* set the start address */ 27171 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F); 27172 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 27173 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF); 27174 27175 /* set the transfer length */ 27176 nblocks = mode2->cdread_buflen / 2336; 27177 cdb[4] = (uchar_t)nblocks & 0xFF; 27178 27179 /* build command */ 27180 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27181 com->uscsi_cdb = (caddr_t)cdb; 27182 com->uscsi_cdblen = sizeof (cdb); 27183 com->uscsi_bufaddr = mode2->cdread_bufaddr; 27184 com->uscsi_buflen = mode2->cdread_buflen; 27185 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27186 27187 /* 27188 * Issue SCSI command with user space address for read buffer. 27189 * 27190 * This sends the command through main channel in the driver. 27191 * 27192 * Since this is accessed via an IOCTL call, we go through the 27193 * standard path, so that if the device was powered down, then 27194 * it would be 'awakened' to handle the command. 27195 */ 27196 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27197 SD_PATH_STANDARD); 27198 27199 kmem_free(com, sizeof (*com)); 27200 27201 /* Restore the device and soft state target block size */ 27202 if (sr_sector_mode(dev, restore_blksize) != 0) { 27203 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27204 "can't do switch back to mode 1\n"); 27205 /* 27206 * If sd_send_scsi_READ succeeded we still need to report 27207 * an error because we failed to reset the block size 27208 */ 27209 if (rval == 0) { 27210 rval = EIO; 27211 } 27212 } 27213 27214 done: 27215 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27216 "sd_read_mode2: exit: un:0x%p\n", un); 27217 27218 return (rval); 27219 } 27220 27221 27222 /* 27223 * Function: sr_sector_mode() 27224 * 27225 * Description: This utility function is used by sr_read_mode2 to set the target 27226 * block size based on the user specified size. This is a legacy 27227 * implementation based upon a vendor specific mode page 27228 * 27229 * Arguments: dev - the device 'dev_t' 27230 * data - flag indicating if block size is being set to 2336 or 27231 * 512. 27232 * 27233 * Return Code: the code returned by sd_send_scsi_cmd() 27234 * EFAULT if ddi_copyxxx() fails 27235 * ENXIO if fail ddi_get_soft_state 27236 * EINVAL if data pointer is NULL 27237 */ 27238 27239 static int 27240 sr_sector_mode(dev_t dev, uint32_t blksize) 27241 { 27242 struct sd_lun *un; 27243 uchar_t *sense; 27244 uchar_t *select; 27245 int rval; 27246 sd_ssc_t *ssc; 27247 27248 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27249 (un->un_state == SD_STATE_OFFLINE)) { 27250 return (ENXIO); 27251 } 27252 27253 sense = kmem_zalloc(20, KM_SLEEP); 27254 27255 /* Note: This is a vendor specific mode page (0x81) */ 27256 ssc = sd_ssc_init(un); 27257 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 20, 0x81, 27258 SD_PATH_STANDARD); 27259 sd_ssc_fini(ssc); 27260 if (rval != 0) { 27261 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 27262 "sr_sector_mode: Mode Sense failed\n"); 27263 kmem_free(sense, 20); 27264 return (rval); 27265 } 27266 select = kmem_zalloc(20, KM_SLEEP); 27267 select[3] = 0x08; 27268 select[10] = ((blksize >> 8) & 0xff); 27269 select[11] = (blksize & 0xff); 27270 select[12] = 0x01; 27271 select[13] = 0x06; 27272 select[14] = sense[14]; 27273 select[15] = sense[15]; 27274 if (blksize == SD_MODE2_BLKSIZE) { 27275 select[14] |= 0x01; 27276 } 27277 27278 ssc = sd_ssc_init(un); 27279 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 20, 27280 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 27281 sd_ssc_fini(ssc); 27282 if (rval != 0) { 27283 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 27284 "sr_sector_mode: Mode Select failed\n"); 27285 } else { 27286 /* 27287 * Only update the softstate block size if we successfully 27288 * changed the device block mode. 27289 */ 27290 mutex_enter(SD_MUTEX(un)); 27291 sd_update_block_info(un, blksize, 0); 27292 mutex_exit(SD_MUTEX(un)); 27293 } 27294 kmem_free(sense, 20); 27295 kmem_free(select, 20); 27296 return (rval); 27297 } 27298 27299 27300 /* 27301 * Function: sr_read_cdda() 27302 * 27303 * Description: This routine is the driver entry point for handling CD-ROM 27304 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If 27305 * the target supports CDDA these requests are handled via a vendor 27306 * specific command (0xD8) If the target does not support CDDA 27307 * these requests are handled via the READ CD command (0xBE). 27308 * 27309 * Arguments: dev - the device 'dev_t' 27310 * data - pointer to user provided CD-DA structure specifying 27311 * the track starting address, transfer length, and 27312 * subcode options. 27313 * flag - this argument is a pass through to ddi_copyxxx() 27314 * directly from the mode argument of ioctl(). 27315 * 27316 * Return Code: the code returned by sd_send_scsi_cmd() 27317 * EFAULT if ddi_copyxxx() fails 27318 * ENXIO if fail ddi_get_soft_state 27319 * EINVAL if invalid arguments are provided 27320 * ENOTTY 27321 */ 27322 27323 static int 27324 sr_read_cdda(dev_t dev, caddr_t data, int flag) 27325 { 27326 struct sd_lun *un; 27327 struct uscsi_cmd *com; 27328 struct cdrom_cdda *cdda; 27329 int rval; 27330 size_t buflen; 27331 char cdb[CDB_GROUP5]; 27332 27333 #ifdef _MULTI_DATAMODEL 27334 /* To support ILP32 applications in an LP64 world */ 27335 struct cdrom_cdda32 cdrom_cdda32; 27336 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32; 27337 #endif /* _MULTI_DATAMODEL */ 27338 27339 if (data == NULL) { 27340 return (EINVAL); 27341 } 27342 27343 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27344 return (ENXIO); 27345 } 27346 27347 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP); 27348 27349 #ifdef _MULTI_DATAMODEL 27350 switch (ddi_model_convert_from(flag & FMODELS)) { 27351 case DDI_MODEL_ILP32: 27352 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) { 27353 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27354 "sr_read_cdda: ddi_copyin Failed\n"); 27355 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27356 return (EFAULT); 27357 } 27358 /* Convert the ILP32 uscsi data from the application to LP64 */ 27359 cdrom_cdda32tocdrom_cdda(cdda32, cdda); 27360 break; 27361 case DDI_MODEL_NONE: 27362 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 27363 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27364 "sr_read_cdda: ddi_copyin Failed\n"); 27365 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27366 return (EFAULT); 27367 } 27368 break; 27369 } 27370 #else /* ! _MULTI_DATAMODEL */ 27371 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 27372 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27373 "sr_read_cdda: ddi_copyin Failed\n"); 27374 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27375 return (EFAULT); 27376 } 27377 #endif /* _MULTI_DATAMODEL */ 27378 27379 /* 27380 * Since MMC-2 expects max 3 bytes for length, check if the 27381 * length input is greater than 3 bytes 27382 */ 27383 if ((cdda->cdda_length & 0xFF000000) != 0) { 27384 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: " 27385 "cdrom transfer length too large: %d (limit %d)\n", 27386 cdda->cdda_length, 0xFFFFFF); 27387 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27388 return (EINVAL); 27389 } 27390 27391 switch (cdda->cdda_subcode) { 27392 case CDROM_DA_NO_SUBCODE: 27393 buflen = CDROM_BLK_2352 * cdda->cdda_length; 27394 break; 27395 case CDROM_DA_SUBQ: 27396 buflen = CDROM_BLK_2368 * cdda->cdda_length; 27397 break; 27398 case CDROM_DA_ALL_SUBCODE: 27399 buflen = CDROM_BLK_2448 * cdda->cdda_length; 27400 break; 27401 case CDROM_DA_SUBCODE_ONLY: 27402 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length; 27403 break; 27404 default: 27405 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27406 "sr_read_cdda: Subcode '0x%x' Not Supported\n", 27407 cdda->cdda_subcode); 27408 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27409 return (EINVAL); 27410 } 27411 27412 /* Build and send the command */ 27413 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27414 bzero(cdb, CDB_GROUP5); 27415 27416 if (un->un_f_cfg_cdda == TRUE) { 27417 cdb[0] = (char)SCMD_READ_CD; 27418 cdb[1] = 0x04; 27419 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 27420 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 27421 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 27422 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 27423 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 27424 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 27425 cdb[8] = ((cdda->cdda_length) & 0x000000ff); 27426 cdb[9] = 0x10; 27427 switch (cdda->cdda_subcode) { 27428 case CDROM_DA_NO_SUBCODE : 27429 cdb[10] = 0x0; 27430 break; 27431 case CDROM_DA_SUBQ : 27432 cdb[10] = 0x2; 27433 break; 27434 case CDROM_DA_ALL_SUBCODE : 27435 cdb[10] = 0x1; 27436 break; 27437 case CDROM_DA_SUBCODE_ONLY : 27438 /* FALLTHROUGH */ 27439 default : 27440 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27441 kmem_free(com, sizeof (*com)); 27442 return (ENOTTY); 27443 } 27444 } else { 27445 cdb[0] = (char)SCMD_READ_CDDA; 27446 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 27447 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 27448 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 27449 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 27450 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24); 27451 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 27452 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 27453 cdb[9] = ((cdda->cdda_length) & 0x000000ff); 27454 cdb[10] = cdda->cdda_subcode; 27455 } 27456 27457 com->uscsi_cdb = cdb; 27458 com->uscsi_cdblen = CDB_GROUP5; 27459 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data; 27460 com->uscsi_buflen = buflen; 27461 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27462 27463 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27464 SD_PATH_STANDARD); 27465 27466 kmem_free(cdda, sizeof (struct cdrom_cdda)); 27467 kmem_free(com, sizeof (*com)); 27468 return (rval); 27469 } 27470 27471 27472 /* 27473 * Function: sr_read_cdxa() 27474 * 27475 * Description: This routine is the driver entry point for handling CD-ROM 27476 * ioctl requests to return CD-XA (Extended Architecture) data. 27477 * (CDROMCDXA). 27478 * 27479 * Arguments: dev - the device 'dev_t' 27480 * data - pointer to user provided CD-XA structure specifying 27481 * the data starting address, transfer length, and format 27482 * flag - this argument is a pass through to ddi_copyxxx() 27483 * directly from the mode argument of ioctl(). 27484 * 27485 * Return Code: the code returned by sd_send_scsi_cmd() 27486 * EFAULT if ddi_copyxxx() fails 27487 * ENXIO if fail ddi_get_soft_state 27488 * EINVAL if data pointer is NULL 27489 */ 27490 27491 static int 27492 sr_read_cdxa(dev_t dev, caddr_t data, int flag) 27493 { 27494 struct sd_lun *un; 27495 struct uscsi_cmd *com; 27496 struct cdrom_cdxa *cdxa; 27497 int rval; 27498 size_t buflen; 27499 char cdb[CDB_GROUP5]; 27500 uchar_t read_flags; 27501 27502 #ifdef _MULTI_DATAMODEL 27503 /* To support ILP32 applications in an LP64 world */ 27504 struct cdrom_cdxa32 cdrom_cdxa32; 27505 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32; 27506 #endif /* _MULTI_DATAMODEL */ 27507 27508 if (data == NULL) { 27509 return (EINVAL); 27510 } 27511 27512 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27513 return (ENXIO); 27514 } 27515 27516 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP); 27517 27518 #ifdef _MULTI_DATAMODEL 27519 switch (ddi_model_convert_from(flag & FMODELS)) { 27520 case DDI_MODEL_ILP32: 27521 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) { 27522 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27523 return (EFAULT); 27524 } 27525 /* 27526 * Convert the ILP32 uscsi data from the 27527 * application to LP64 for internal use. 27528 */ 27529 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa); 27530 break; 27531 case DDI_MODEL_NONE: 27532 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 27533 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27534 return (EFAULT); 27535 } 27536 break; 27537 } 27538 #else /* ! _MULTI_DATAMODEL */ 27539 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 27540 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27541 return (EFAULT); 27542 } 27543 #endif /* _MULTI_DATAMODEL */ 27544 27545 /* 27546 * Since MMC-2 expects max 3 bytes for length, check if the 27547 * length input is greater than 3 bytes 27548 */ 27549 if ((cdxa->cdxa_length & 0xFF000000) != 0) { 27550 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: " 27551 "cdrom transfer length too large: %d (limit %d)\n", 27552 cdxa->cdxa_length, 0xFFFFFF); 27553 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27554 return (EINVAL); 27555 } 27556 27557 switch (cdxa->cdxa_format) { 27558 case CDROM_XA_DATA: 27559 buflen = CDROM_BLK_2048 * cdxa->cdxa_length; 27560 read_flags = 0x10; 27561 break; 27562 case CDROM_XA_SECTOR_DATA: 27563 buflen = CDROM_BLK_2352 * cdxa->cdxa_length; 27564 read_flags = 0xf8; 27565 break; 27566 case CDROM_XA_DATA_W_ERROR: 27567 buflen = CDROM_BLK_2646 * cdxa->cdxa_length; 27568 read_flags = 0xfc; 27569 break; 27570 default: 27571 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27572 "sr_read_cdxa: Format '0x%x' Not Supported\n", 27573 cdxa->cdxa_format); 27574 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27575 return (EINVAL); 27576 } 27577 27578 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27579 bzero(cdb, CDB_GROUP5); 27580 if (un->un_f_mmc_cap == TRUE) { 27581 cdb[0] = (char)SCMD_READ_CD; 27582 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 27583 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 27584 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 27585 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 27586 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 27587 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 27588 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff); 27589 cdb[9] = (char)read_flags; 27590 } else { 27591 /* 27592 * Note: A vendor specific command (0xDB) is being used her to 27593 * request a read of all subcodes. 27594 */ 27595 cdb[0] = (char)SCMD_READ_CDXA; 27596 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 27597 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 27598 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 27599 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 27600 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24); 27601 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 27602 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 27603 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff); 27604 cdb[10] = cdxa->cdxa_format; 27605 } 27606 com->uscsi_cdb = cdb; 27607 com->uscsi_cdblen = CDB_GROUP5; 27608 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data; 27609 com->uscsi_buflen = buflen; 27610 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27611 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE, 27612 SD_PATH_STANDARD); 27613 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 27614 kmem_free(com, sizeof (*com)); 27615 return (rval); 27616 } 27617 27618 27619 /* 27620 * Function: sr_eject() 27621 * 27622 * Description: This routine is the driver entry point for handling CD-ROM 27623 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT) 27624 * 27625 * Arguments: dev - the device 'dev_t' 27626 * 27627 * Return Code: the code returned by sd_send_scsi_cmd() 27628 */ 27629 27630 static int 27631 sr_eject(dev_t dev) 27632 { 27633 struct sd_lun *un; 27634 int rval; 27635 sd_ssc_t *ssc; 27636 27637 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27638 (un->un_state == SD_STATE_OFFLINE)) { 27639 return (ENXIO); 27640 } 27641 27642 /* 27643 * To prevent race conditions with the eject 27644 * command, keep track of an eject command as 27645 * it progresses. If we are already handling 27646 * an eject command in the driver for the given 27647 * unit and another request to eject is received 27648 * immediately return EAGAIN so we don't lose 27649 * the command if the current eject command fails. 27650 */ 27651 mutex_enter(SD_MUTEX(un)); 27652 if (un->un_f_ejecting == TRUE) { 27653 mutex_exit(SD_MUTEX(un)); 27654 return (EAGAIN); 27655 } 27656 un->un_f_ejecting = TRUE; 27657 mutex_exit(SD_MUTEX(un)); 27658 27659 ssc = sd_ssc_init(un); 27660 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW, 27661 SD_PATH_STANDARD); 27662 sd_ssc_fini(ssc); 27663 27664 if (rval != 0) { 27665 mutex_enter(SD_MUTEX(un)); 27666 un->un_f_ejecting = FALSE; 27667 mutex_exit(SD_MUTEX(un)); 27668 return (rval); 27669 } 27670 27671 ssc = sd_ssc_init(un); 27672 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_TARGET_EJECT, 27673 SD_PATH_STANDARD); 27674 sd_ssc_fini(ssc); 27675 27676 if (rval == 0) { 27677 mutex_enter(SD_MUTEX(un)); 27678 sr_ejected(un); 27679 un->un_mediastate = DKIO_EJECTED; 27680 un->un_f_ejecting = FALSE; 27681 cv_broadcast(&un->un_state_cv); 27682 mutex_exit(SD_MUTEX(un)); 27683 } else { 27684 mutex_enter(SD_MUTEX(un)); 27685 un->un_f_ejecting = FALSE; 27686 mutex_exit(SD_MUTEX(un)); 27687 } 27688 return (rval); 27689 } 27690 27691 27692 /* 27693 * Function: sr_ejected() 27694 * 27695 * Description: This routine updates the soft state structure to invalidate the 27696 * geometry information after the media has been ejected or a 27697 * media eject has been detected. 27698 * 27699 * Arguments: un - driver soft state (unit) structure 27700 */ 27701 27702 static void 27703 sr_ejected(struct sd_lun *un) 27704 { 27705 struct sd_errstats *stp; 27706 27707 ASSERT(un != NULL); 27708 ASSERT(mutex_owned(SD_MUTEX(un))); 27709 27710 un->un_f_blockcount_is_valid = FALSE; 27711 un->un_f_tgt_blocksize_is_valid = FALSE; 27712 mutex_exit(SD_MUTEX(un)); 27713 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY); 27714 mutex_enter(SD_MUTEX(un)); 27715 27716 if (un->un_errstats != NULL) { 27717 stp = (struct sd_errstats *)un->un_errstats->ks_data; 27718 stp->sd_capacity.value.ui64 = 0; 27719 } 27720 } 27721 27722 27723 /* 27724 * Function: sr_check_wp() 27725 * 27726 * Description: This routine checks the write protection of a removable 27727 * media disk and hotpluggable devices via the write protect bit of 27728 * the Mode Page Header device specific field. Some devices choke 27729 * on unsupported mode page. In order to workaround this issue, 27730 * this routine has been implemented to use 0x3f mode page(request 27731 * for all pages) for all device types. 27732 * 27733 * Arguments: dev - the device 'dev_t' 27734 * 27735 * Return Code: int indicating if the device is write protected (1) or not (0) 27736 * 27737 * Context: Kernel thread. 27738 * 27739 */ 27740 27741 static int 27742 sr_check_wp(dev_t dev) 27743 { 27744 struct sd_lun *un; 27745 uchar_t device_specific; 27746 uchar_t *sense; 27747 int hdrlen; 27748 int rval = FALSE; 27749 int status; 27750 sd_ssc_t *ssc; 27751 27752 /* 27753 * Note: The return codes for this routine should be reworked to 27754 * properly handle the case of a NULL softstate. 27755 */ 27756 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27757 return (FALSE); 27758 } 27759 27760 if (un->un_f_cfg_is_atapi == TRUE) { 27761 /* 27762 * The mode page contents are not required; set the allocation 27763 * length for the mode page header only 27764 */ 27765 hdrlen = MODE_HEADER_LENGTH_GRP2; 27766 sense = kmem_zalloc(hdrlen, KM_SLEEP); 27767 ssc = sd_ssc_init(un); 27768 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, hdrlen, 27769 MODEPAGE_ALLPAGES, SD_PATH_STANDARD); 27770 sd_ssc_fini(ssc); 27771 if (status != 0) 27772 goto err_exit; 27773 device_specific = 27774 ((struct mode_header_grp2 *)sense)->device_specific; 27775 } else { 27776 hdrlen = MODE_HEADER_LENGTH; 27777 sense = kmem_zalloc(hdrlen, KM_SLEEP); 27778 ssc = sd_ssc_init(un); 27779 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, hdrlen, 27780 MODEPAGE_ALLPAGES, SD_PATH_STANDARD); 27781 sd_ssc_fini(ssc); 27782 if (status != 0) 27783 goto err_exit; 27784 device_specific = 27785 ((struct mode_header *)sense)->device_specific; 27786 } 27787 27788 27789 /* 27790 * Write protect mode sense failed; not all disks 27791 * understand this query. Return FALSE assuming that 27792 * these devices are not writable. 27793 */ 27794 if (device_specific & WRITE_PROTECT) { 27795 rval = TRUE; 27796 } 27797 27798 err_exit: 27799 kmem_free(sense, hdrlen); 27800 return (rval); 27801 } 27802 27803 /* 27804 * Function: sr_volume_ctrl() 27805 * 27806 * Description: This routine is the driver entry point for handling CD-ROM 27807 * audio output volume ioctl requests. (CDROMVOLCTRL) 27808 * 27809 * Arguments: dev - the device 'dev_t' 27810 * data - pointer to user audio volume control structure 27811 * flag - this argument is a pass through to ddi_copyxxx() 27812 * directly from the mode argument of ioctl(). 27813 * 27814 * Return Code: the code returned by sd_send_scsi_cmd() 27815 * EFAULT if ddi_copyxxx() fails 27816 * ENXIO if fail ddi_get_soft_state 27817 * EINVAL if data pointer is NULL 27818 * 27819 */ 27820 27821 static int 27822 sr_volume_ctrl(dev_t dev, caddr_t data, int flag) 27823 { 27824 struct sd_lun *un; 27825 struct cdrom_volctrl volume; 27826 struct cdrom_volctrl *vol = &volume; 27827 uchar_t *sense_page; 27828 uchar_t *select_page; 27829 uchar_t *sense; 27830 uchar_t *select; 27831 int sense_buflen; 27832 int select_buflen; 27833 int rval; 27834 sd_ssc_t *ssc; 27835 27836 if (data == NULL) { 27837 return (EINVAL); 27838 } 27839 27840 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27841 (un->un_state == SD_STATE_OFFLINE)) { 27842 return (ENXIO); 27843 } 27844 27845 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) { 27846 return (EFAULT); 27847 } 27848 27849 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 27850 struct mode_header_grp2 *sense_mhp; 27851 struct mode_header_grp2 *select_mhp; 27852 int bd_len; 27853 27854 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN; 27855 select_buflen = MODE_HEADER_LENGTH_GRP2 + 27856 MODEPAGE_AUDIO_CTRL_LEN; 27857 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 27858 select = kmem_zalloc(select_buflen, KM_SLEEP); 27859 ssc = sd_ssc_init(un); 27860 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, 27861 sense_buflen, MODEPAGE_AUDIO_CTRL, 27862 SD_PATH_STANDARD); 27863 sd_ssc_fini(ssc); 27864 27865 if (rval != 0) { 27866 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 27867 "sr_volume_ctrl: Mode Sense Failed\n"); 27868 kmem_free(sense, sense_buflen); 27869 kmem_free(select, select_buflen); 27870 return (rval); 27871 } 27872 sense_mhp = (struct mode_header_grp2 *)sense; 27873 select_mhp = (struct mode_header_grp2 *)select; 27874 bd_len = (sense_mhp->bdesc_length_hi << 8) | 27875 sense_mhp->bdesc_length_lo; 27876 if (bd_len > MODE_BLK_DESC_LENGTH) { 27877 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27878 "sr_volume_ctrl: Mode Sense returned invalid " 27879 "block descriptor length\n"); 27880 kmem_free(sense, sense_buflen); 27881 kmem_free(select, select_buflen); 27882 return (EIO); 27883 } 27884 sense_page = (uchar_t *) 27885 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 27886 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2); 27887 select_mhp->length_msb = 0; 27888 select_mhp->length_lsb = 0; 27889 select_mhp->bdesc_length_hi = 0; 27890 select_mhp->bdesc_length_lo = 0; 27891 } else { 27892 struct mode_header *sense_mhp, *select_mhp; 27893 27894 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 27895 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 27896 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 27897 select = kmem_zalloc(select_buflen, KM_SLEEP); 27898 ssc = sd_ssc_init(un); 27899 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 27900 sense_buflen, MODEPAGE_AUDIO_CTRL, 27901 SD_PATH_STANDARD); 27902 sd_ssc_fini(ssc); 27903 27904 if (rval != 0) { 27905 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27906 "sr_volume_ctrl: Mode Sense Failed\n"); 27907 kmem_free(sense, sense_buflen); 27908 kmem_free(select, select_buflen); 27909 return (rval); 27910 } 27911 sense_mhp = (struct mode_header *)sense; 27912 select_mhp = (struct mode_header *)select; 27913 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) { 27914 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27915 "sr_volume_ctrl: Mode Sense returned invalid " 27916 "block descriptor length\n"); 27917 kmem_free(sense, sense_buflen); 27918 kmem_free(select, select_buflen); 27919 return (EIO); 27920 } 27921 sense_page = (uchar_t *) 27922 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 27923 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH); 27924 select_mhp->length = 0; 27925 select_mhp->bdesc_length = 0; 27926 } 27927 /* 27928 * Note: An audio control data structure could be created and overlayed 27929 * on the following in place of the array indexing method implemented. 27930 */ 27931 27932 /* Build the select data for the user volume data */ 27933 select_page[0] = MODEPAGE_AUDIO_CTRL; 27934 select_page[1] = 0xE; 27935 /* Set the immediate bit */ 27936 select_page[2] = 0x04; 27937 /* Zero out reserved fields */ 27938 select_page[3] = 0x00; 27939 select_page[4] = 0x00; 27940 /* Return sense data for fields not to be modified */ 27941 select_page[5] = sense_page[5]; 27942 select_page[6] = sense_page[6]; 27943 select_page[7] = sense_page[7]; 27944 /* Set the user specified volume levels for channel 0 and 1 */ 27945 select_page[8] = 0x01; 27946 select_page[9] = vol->channel0; 27947 select_page[10] = 0x02; 27948 select_page[11] = vol->channel1; 27949 /* Channel 2 and 3 are currently unsupported so return the sense data */ 27950 select_page[12] = sense_page[12]; 27951 select_page[13] = sense_page[13]; 27952 select_page[14] = sense_page[14]; 27953 select_page[15] = sense_page[15]; 27954 27955 ssc = sd_ssc_init(un); 27956 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 27957 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP1, select, 27958 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 27959 } else { 27960 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 27961 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 27962 } 27963 sd_ssc_fini(ssc); 27964 27965 kmem_free(sense, sense_buflen); 27966 kmem_free(select, select_buflen); 27967 return (rval); 27968 } 27969 27970 27971 /* 27972 * Function: sr_read_sony_session_offset() 27973 * 27974 * Description: This routine is the driver entry point for handling CD-ROM 27975 * ioctl requests for session offset information. (CDROMREADOFFSET) 27976 * The address of the first track in the last session of a 27977 * multi-session CD-ROM is returned 27978 * 27979 * Note: This routine uses a vendor specific key value in the 27980 * command control field without implementing any vendor check here 27981 * or in the ioctl routine. 27982 * 27983 * Arguments: dev - the device 'dev_t' 27984 * data - pointer to an int to hold the requested address 27985 * flag - this argument is a pass through to ddi_copyxxx() 27986 * directly from the mode argument of ioctl(). 27987 * 27988 * Return Code: the code returned by sd_send_scsi_cmd() 27989 * EFAULT if ddi_copyxxx() fails 27990 * ENXIO if fail ddi_get_soft_state 27991 * EINVAL if data pointer is NULL 27992 */ 27993 27994 static int 27995 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag) 27996 { 27997 struct sd_lun *un; 27998 struct uscsi_cmd *com; 27999 caddr_t buffer; 28000 char cdb[CDB_GROUP1]; 28001 int session_offset = 0; 28002 int rval; 28003 28004 if (data == NULL) { 28005 return (EINVAL); 28006 } 28007 28008 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28009 (un->un_state == SD_STATE_OFFLINE)) { 28010 return (ENXIO); 28011 } 28012 28013 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP); 28014 bzero(cdb, CDB_GROUP1); 28015 cdb[0] = SCMD_READ_TOC; 28016 /* 28017 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 28018 * (4 byte TOC response header + 8 byte response data) 28019 */ 28020 cdb[8] = SONY_SESSION_OFFSET_LEN; 28021 /* Byte 9 is the control byte. A vendor specific value is used */ 28022 cdb[9] = SONY_SESSION_OFFSET_KEY; 28023 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28024 com->uscsi_cdb = cdb; 28025 com->uscsi_cdblen = CDB_GROUP1; 28026 com->uscsi_bufaddr = buffer; 28027 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN; 28028 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28029 28030 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE, 28031 SD_PATH_STANDARD); 28032 if (rval != 0) { 28033 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 28034 kmem_free(com, sizeof (*com)); 28035 return (rval); 28036 } 28037 if (buffer[1] == SONY_SESSION_OFFSET_VALID) { 28038 session_offset = 28039 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 28040 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 28041 /* 28042 * Offset returned offset in current lbasize block's. Convert to 28043 * 2k block's to return to the user 28044 */ 28045 if (un->un_tgt_blocksize == CDROM_BLK_512) { 28046 session_offset >>= 2; 28047 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) { 28048 session_offset >>= 1; 28049 } 28050 } 28051 28052 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) { 28053 rval = EFAULT; 28054 } 28055 28056 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 28057 kmem_free(com, sizeof (*com)); 28058 return (rval); 28059 } 28060 28061 28062 /* 28063 * Function: sd_wm_cache_constructor() 28064 * 28065 * Description: Cache Constructor for the wmap cache for the read/modify/write 28066 * devices. 28067 * 28068 * Arguments: wm - A pointer to the sd_w_map to be initialized. 28069 * un - sd_lun structure for the device. 28070 * flag - the km flags passed to constructor 28071 * 28072 * Return Code: 0 on success. 28073 * -1 on failure. 28074 */ 28075 28076 /*ARGSUSED*/ 28077 static int 28078 sd_wm_cache_constructor(void *wm, void *un, int flags) 28079 { 28080 bzero(wm, sizeof (struct sd_w_map)); 28081 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL); 28082 return (0); 28083 } 28084 28085 28086 /* 28087 * Function: sd_wm_cache_destructor() 28088 * 28089 * Description: Cache destructor for the wmap cache for the read/modify/write 28090 * devices. 28091 * 28092 * Arguments: wm - A pointer to the sd_w_map to be initialized. 28093 * un - sd_lun structure for the device. 28094 */ 28095 /*ARGSUSED*/ 28096 static void 28097 sd_wm_cache_destructor(void *wm, void *un) 28098 { 28099 cv_destroy(&((struct sd_w_map *)wm)->wm_avail); 28100 } 28101 28102 28103 /* 28104 * Function: sd_range_lock() 28105 * 28106 * Description: Lock the range of blocks specified as parameter to ensure 28107 * that read, modify write is atomic and no other i/o writes 28108 * to the same location. The range is specified in terms 28109 * of start and end blocks. Block numbers are the actual 28110 * media block numbers and not system. 28111 * 28112 * Arguments: un - sd_lun structure for the device. 28113 * startb - The starting block number 28114 * endb - The end block number 28115 * typ - type of i/o - simple/read_modify_write 28116 * 28117 * Return Code: wm - pointer to the wmap structure. 28118 * 28119 * Context: This routine can sleep. 28120 */ 28121 28122 static struct sd_w_map * 28123 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ) 28124 { 28125 struct sd_w_map *wmp = NULL; 28126 struct sd_w_map *sl_wmp = NULL; 28127 struct sd_w_map *tmp_wmp; 28128 wm_state state = SD_WM_CHK_LIST; 28129 28130 28131 ASSERT(un != NULL); 28132 ASSERT(!mutex_owned(SD_MUTEX(un))); 28133 28134 mutex_enter(SD_MUTEX(un)); 28135 28136 while (state != SD_WM_DONE) { 28137 28138 switch (state) { 28139 case SD_WM_CHK_LIST: 28140 /* 28141 * This is the starting state. Check the wmap list 28142 * to see if the range is currently available. 28143 */ 28144 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) { 28145 /* 28146 * If this is a simple write and no rmw 28147 * i/o is pending then try to lock the 28148 * range as the range should be available. 28149 */ 28150 state = SD_WM_LOCK_RANGE; 28151 } else { 28152 tmp_wmp = sd_get_range(un, startb, endb); 28153 if (tmp_wmp != NULL) { 28154 if ((wmp != NULL) && ONLIST(un, wmp)) { 28155 /* 28156 * Should not keep onlist wmps 28157 * while waiting this macro 28158 * will also do wmp = NULL; 28159 */ 28160 FREE_ONLIST_WMAP(un, wmp); 28161 } 28162 /* 28163 * sl_wmp is the wmap on which wait 28164 * is done, since the tmp_wmp points 28165 * to the inuse wmap, set sl_wmp to 28166 * tmp_wmp and change the state to sleep 28167 */ 28168 sl_wmp = tmp_wmp; 28169 state = SD_WM_WAIT_MAP; 28170 } else { 28171 state = SD_WM_LOCK_RANGE; 28172 } 28173 28174 } 28175 break; 28176 28177 case SD_WM_LOCK_RANGE: 28178 ASSERT(un->un_wm_cache); 28179 /* 28180 * The range need to be locked, try to get a wmap. 28181 * First attempt it with NO_SLEEP, want to avoid a sleep 28182 * if possible as we will have to release the sd mutex 28183 * if we have to sleep. 28184 */ 28185 if (wmp == NULL) 28186 wmp = kmem_cache_alloc(un->un_wm_cache, 28187 KM_NOSLEEP); 28188 if (wmp == NULL) { 28189 mutex_exit(SD_MUTEX(un)); 28190 _NOTE(DATA_READABLE_WITHOUT_LOCK 28191 (sd_lun::un_wm_cache)) 28192 wmp = kmem_cache_alloc(un->un_wm_cache, 28193 KM_SLEEP); 28194 mutex_enter(SD_MUTEX(un)); 28195 /* 28196 * we released the mutex so recheck and go to 28197 * check list state. 28198 */ 28199 state = SD_WM_CHK_LIST; 28200 } else { 28201 /* 28202 * We exit out of state machine since we 28203 * have the wmap. Do the housekeeping first. 28204 * place the wmap on the wmap list if it is not 28205 * on it already and then set the state to done. 28206 */ 28207 wmp->wm_start = startb; 28208 wmp->wm_end = endb; 28209 wmp->wm_flags = typ | SD_WM_BUSY; 28210 if (typ & SD_WTYPE_RMW) { 28211 un->un_rmw_count++; 28212 } 28213 /* 28214 * If not already on the list then link 28215 */ 28216 if (!ONLIST(un, wmp)) { 28217 wmp->wm_next = un->un_wm; 28218 wmp->wm_prev = NULL; 28219 if (wmp->wm_next) 28220 wmp->wm_next->wm_prev = wmp; 28221 un->un_wm = wmp; 28222 } 28223 state = SD_WM_DONE; 28224 } 28225 break; 28226 28227 case SD_WM_WAIT_MAP: 28228 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY); 28229 /* 28230 * Wait is done on sl_wmp, which is set in the 28231 * check_list state. 28232 */ 28233 sl_wmp->wm_wanted_count++; 28234 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un)); 28235 sl_wmp->wm_wanted_count--; 28236 /* 28237 * We can reuse the memory from the completed sl_wmp 28238 * lock range for our new lock, but only if noone is 28239 * waiting for it. 28240 */ 28241 ASSERT(!(sl_wmp->wm_flags & SD_WM_BUSY)); 28242 if (sl_wmp->wm_wanted_count == 0) { 28243 if (wmp != NULL) 28244 CHK_N_FREEWMP(un, wmp); 28245 wmp = sl_wmp; 28246 } 28247 sl_wmp = NULL; 28248 /* 28249 * After waking up, need to recheck for availability of 28250 * range. 28251 */ 28252 state = SD_WM_CHK_LIST; 28253 break; 28254 28255 default: 28256 panic("sd_range_lock: " 28257 "Unknown state %d in sd_range_lock", state); 28258 /*NOTREACHED*/ 28259 } /* switch(state) */ 28260 28261 } /* while(state != SD_WM_DONE) */ 28262 28263 mutex_exit(SD_MUTEX(un)); 28264 28265 ASSERT(wmp != NULL); 28266 28267 return (wmp); 28268 } 28269 28270 28271 /* 28272 * Function: sd_get_range() 28273 * 28274 * Description: Find if there any overlapping I/O to this one 28275 * Returns the write-map of 1st such I/O, NULL otherwise. 28276 * 28277 * Arguments: un - sd_lun structure for the device. 28278 * startb - The starting block number 28279 * endb - The end block number 28280 * 28281 * Return Code: wm - pointer to the wmap structure. 28282 */ 28283 28284 static struct sd_w_map * 28285 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb) 28286 { 28287 struct sd_w_map *wmp; 28288 28289 ASSERT(un != NULL); 28290 28291 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) { 28292 if (!(wmp->wm_flags & SD_WM_BUSY)) { 28293 continue; 28294 } 28295 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) { 28296 break; 28297 } 28298 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) { 28299 break; 28300 } 28301 } 28302 28303 return (wmp); 28304 } 28305 28306 28307 /* 28308 * Function: sd_free_inlist_wmap() 28309 * 28310 * Description: Unlink and free a write map struct. 28311 * 28312 * Arguments: un - sd_lun structure for the device. 28313 * wmp - sd_w_map which needs to be unlinked. 28314 */ 28315 28316 static void 28317 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp) 28318 { 28319 ASSERT(un != NULL); 28320 28321 if (un->un_wm == wmp) { 28322 un->un_wm = wmp->wm_next; 28323 } else { 28324 wmp->wm_prev->wm_next = wmp->wm_next; 28325 } 28326 28327 if (wmp->wm_next) { 28328 wmp->wm_next->wm_prev = wmp->wm_prev; 28329 } 28330 28331 wmp->wm_next = wmp->wm_prev = NULL; 28332 28333 kmem_cache_free(un->un_wm_cache, wmp); 28334 } 28335 28336 28337 /* 28338 * Function: sd_range_unlock() 28339 * 28340 * Description: Unlock the range locked by wm. 28341 * Free write map if nobody else is waiting on it. 28342 * 28343 * Arguments: un - sd_lun structure for the device. 28344 * wmp - sd_w_map which needs to be unlinked. 28345 */ 28346 28347 static void 28348 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm) 28349 { 28350 ASSERT(un != NULL); 28351 ASSERT(wm != NULL); 28352 ASSERT(!mutex_owned(SD_MUTEX(un))); 28353 28354 mutex_enter(SD_MUTEX(un)); 28355 28356 if (wm->wm_flags & SD_WTYPE_RMW) { 28357 un->un_rmw_count--; 28358 } 28359 28360 if (wm->wm_wanted_count) { 28361 wm->wm_flags = 0; 28362 /* 28363 * Broadcast that the wmap is available now. 28364 */ 28365 cv_broadcast(&wm->wm_avail); 28366 } else { 28367 /* 28368 * If no one is waiting on the map, it should be free'ed. 28369 */ 28370 sd_free_inlist_wmap(un, wm); 28371 } 28372 28373 mutex_exit(SD_MUTEX(un)); 28374 } 28375 28376 28377 /* 28378 * Function: sd_read_modify_write_task 28379 * 28380 * Description: Called from a taskq thread to initiate the write phase of 28381 * a read-modify-write request. This is used for targets where 28382 * un->un_sys_blocksize != un->un_tgt_blocksize. 28383 * 28384 * Arguments: arg - a pointer to the buf(9S) struct for the write command. 28385 * 28386 * Context: Called under taskq thread context. 28387 */ 28388 28389 static void 28390 sd_read_modify_write_task(void *arg) 28391 { 28392 struct sd_mapblocksize_info *bsp; 28393 struct buf *bp; 28394 struct sd_xbuf *xp; 28395 struct sd_lun *un; 28396 28397 bp = arg; /* The bp is given in arg */ 28398 ASSERT(bp != NULL); 28399 28400 /* Get the pointer to the layer-private data struct */ 28401 xp = SD_GET_XBUF(bp); 28402 ASSERT(xp != NULL); 28403 bsp = xp->xb_private; 28404 ASSERT(bsp != NULL); 28405 28406 un = SD_GET_UN(bp); 28407 ASSERT(un != NULL); 28408 ASSERT(!mutex_owned(SD_MUTEX(un))); 28409 28410 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 28411 "sd_read_modify_write_task: entry: buf:0x%p\n", bp); 28412 28413 /* 28414 * This is the write phase of a read-modify-write request, called 28415 * under the context of a taskq thread in response to the completion 28416 * of the read portion of the rmw request completing under interrupt 28417 * context. The write request must be sent from here down the iostart 28418 * chain as if it were being sent from sd_mapblocksize_iostart(), so 28419 * we use the layer index saved in the layer-private data area. 28420 */ 28421 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp); 28422 28423 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 28424 "sd_read_modify_write_task: exit: buf:0x%p\n", bp); 28425 } 28426 28427 28428 /* 28429 * Function: sddump_do_read_of_rmw() 28430 * 28431 * Description: This routine will be called from sddump, If sddump is called 28432 * with an I/O which not aligned on device blocksize boundary 28433 * then the write has to be converted to read-modify-write. 28434 * Do the read part here in order to keep sddump simple. 28435 * Note - That the sd_mutex is held across the call to this 28436 * routine. 28437 * 28438 * Arguments: un - sd_lun 28439 * blkno - block number in terms of media block size. 28440 * nblk - number of blocks. 28441 * bpp - pointer to pointer to the buf structure. On return 28442 * from this function, *bpp points to the valid buffer 28443 * to which the write has to be done. 28444 * 28445 * Return Code: 0 for success or errno-type return code 28446 */ 28447 28448 static int 28449 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 28450 struct buf **bpp) 28451 { 28452 int err; 28453 int i; 28454 int rval; 28455 struct buf *bp; 28456 struct scsi_pkt *pkt = NULL; 28457 uint32_t target_blocksize; 28458 28459 ASSERT(un != NULL); 28460 ASSERT(mutex_owned(SD_MUTEX(un))); 28461 28462 target_blocksize = un->un_tgt_blocksize; 28463 28464 mutex_exit(SD_MUTEX(un)); 28465 28466 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL, 28467 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL); 28468 if (bp == NULL) { 28469 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28470 "no resources for dumping; giving up"); 28471 err = ENOMEM; 28472 goto done; 28473 } 28474 28475 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL, 28476 blkno, nblk); 28477 if (rval != 0) { 28478 scsi_free_consistent_buf(bp); 28479 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28480 "no resources for dumping; giving up"); 28481 err = ENOMEM; 28482 goto done; 28483 } 28484 28485 pkt->pkt_flags |= FLAG_NOINTR; 28486 28487 err = EIO; 28488 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 28489 28490 /* 28491 * Scsi_poll returns 0 (success) if the command completes and 28492 * the status block is STATUS_GOOD. We should only check 28493 * errors if this condition is not true. Even then we should 28494 * send our own request sense packet only if we have a check 28495 * condition and auto request sense has not been performed by 28496 * the hba. 28497 */ 28498 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n"); 28499 28500 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) { 28501 err = 0; 28502 break; 28503 } 28504 28505 /* 28506 * Check CMD_DEV_GONE 1st, give up if device is gone, 28507 * no need to read RQS data. 28508 */ 28509 if (pkt->pkt_reason == CMD_DEV_GONE) { 28510 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28511 "Error while dumping state with rmw..." 28512 "Device is gone\n"); 28513 break; 28514 } 28515 28516 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) { 28517 SD_INFO(SD_LOG_DUMP, un, 28518 "sddump: read failed with CHECK, try # %d\n", i); 28519 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) { 28520 (void) sd_send_polled_RQS(un); 28521 } 28522 28523 continue; 28524 } 28525 28526 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) { 28527 int reset_retval = 0; 28528 28529 SD_INFO(SD_LOG_DUMP, un, 28530 "sddump: read failed with BUSY, try # %d\n", i); 28531 28532 if (un->un_f_lun_reset_enabled == TRUE) { 28533 reset_retval = scsi_reset(SD_ADDRESS(un), 28534 RESET_LUN); 28535 } 28536 if (reset_retval == 0) { 28537 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 28538 } 28539 (void) sd_send_polled_RQS(un); 28540 28541 } else { 28542 SD_INFO(SD_LOG_DUMP, un, 28543 "sddump: read failed with 0x%x, try # %d\n", 28544 SD_GET_PKT_STATUS(pkt), i); 28545 mutex_enter(SD_MUTEX(un)); 28546 sd_reset_target(un, pkt); 28547 mutex_exit(SD_MUTEX(un)); 28548 } 28549 28550 /* 28551 * If we are not getting anywhere with lun/target resets, 28552 * let's reset the bus. 28553 */ 28554 if (i > SD_NDUMP_RETRIES/2) { 28555 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 28556 (void) sd_send_polled_RQS(un); 28557 } 28558 28559 } 28560 scsi_destroy_pkt(pkt); 28561 28562 if (err != 0) { 28563 scsi_free_consistent_buf(bp); 28564 *bpp = NULL; 28565 } else { 28566 *bpp = bp; 28567 } 28568 28569 done: 28570 mutex_enter(SD_MUTEX(un)); 28571 return (err); 28572 } 28573 28574 28575 /* 28576 * Function: sd_failfast_flushq 28577 * 28578 * Description: Take all bp's on the wait queue that have B_FAILFAST set 28579 * in b_flags and move them onto the failfast queue, then kick 28580 * off a thread to return all bp's on the failfast queue to 28581 * their owners with an error set. 28582 * 28583 * Arguments: un - pointer to the soft state struct for the instance. 28584 * 28585 * Context: may execute in interrupt context. 28586 */ 28587 28588 static void 28589 sd_failfast_flushq(struct sd_lun *un) 28590 { 28591 struct buf *bp; 28592 struct buf *next_waitq_bp; 28593 struct buf *prev_waitq_bp = NULL; 28594 28595 ASSERT(un != NULL); 28596 ASSERT(mutex_owned(SD_MUTEX(un))); 28597 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE); 28598 ASSERT(un->un_failfast_bp == NULL); 28599 28600 SD_TRACE(SD_LOG_IO_FAILFAST, un, 28601 "sd_failfast_flushq: entry: un:0x%p\n", un); 28602 28603 /* 28604 * Check if we should flush all bufs when entering failfast state, or 28605 * just those with B_FAILFAST set. 28606 */ 28607 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) { 28608 /* 28609 * Move *all* bp's on the wait queue to the failfast flush 28610 * queue, including those that do NOT have B_FAILFAST set. 28611 */ 28612 if (un->un_failfast_headp == NULL) { 28613 ASSERT(un->un_failfast_tailp == NULL); 28614 un->un_failfast_headp = un->un_waitq_headp; 28615 } else { 28616 ASSERT(un->un_failfast_tailp != NULL); 28617 un->un_failfast_tailp->av_forw = un->un_waitq_headp; 28618 } 28619 28620 un->un_failfast_tailp = un->un_waitq_tailp; 28621 28622 /* update kstat for each bp moved out of the waitq */ 28623 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) { 28624 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 28625 } 28626 28627 /* empty the waitq */ 28628 un->un_waitq_headp = un->un_waitq_tailp = NULL; 28629 28630 } else { 28631 /* 28632 * Go thru the wait queue, pick off all entries with 28633 * B_FAILFAST set, and move these onto the failfast queue. 28634 */ 28635 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) { 28636 /* 28637 * Save the pointer to the next bp on the wait queue, 28638 * so we get to it on the next iteration of this loop. 28639 */ 28640 next_waitq_bp = bp->av_forw; 28641 28642 /* 28643 * If this bp from the wait queue does NOT have 28644 * B_FAILFAST set, just move on to the next element 28645 * in the wait queue. Note, this is the only place 28646 * where it is correct to set prev_waitq_bp. 28647 */ 28648 if ((bp->b_flags & B_FAILFAST) == 0) { 28649 prev_waitq_bp = bp; 28650 continue; 28651 } 28652 28653 /* 28654 * Remove the bp from the wait queue. 28655 */ 28656 if (bp == un->un_waitq_headp) { 28657 /* The bp is the first element of the waitq. */ 28658 un->un_waitq_headp = next_waitq_bp; 28659 if (un->un_waitq_headp == NULL) { 28660 /* The wait queue is now empty */ 28661 un->un_waitq_tailp = NULL; 28662 } 28663 } else { 28664 /* 28665 * The bp is either somewhere in the middle 28666 * or at the end of the wait queue. 28667 */ 28668 ASSERT(un->un_waitq_headp != NULL); 28669 ASSERT(prev_waitq_bp != NULL); 28670 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST) 28671 == 0); 28672 if (bp == un->un_waitq_tailp) { 28673 /* bp is the last entry on the waitq. */ 28674 ASSERT(next_waitq_bp == NULL); 28675 un->un_waitq_tailp = prev_waitq_bp; 28676 } 28677 prev_waitq_bp->av_forw = next_waitq_bp; 28678 } 28679 bp->av_forw = NULL; 28680 28681 /* 28682 * update kstat since the bp is moved out of 28683 * the waitq 28684 */ 28685 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 28686 28687 /* 28688 * Now put the bp onto the failfast queue. 28689 */ 28690 if (un->un_failfast_headp == NULL) { 28691 /* failfast queue is currently empty */ 28692 ASSERT(un->un_failfast_tailp == NULL); 28693 un->un_failfast_headp = 28694 un->un_failfast_tailp = bp; 28695 } else { 28696 /* Add the bp to the end of the failfast q */ 28697 ASSERT(un->un_failfast_tailp != NULL); 28698 ASSERT(un->un_failfast_tailp->b_flags & 28699 B_FAILFAST); 28700 un->un_failfast_tailp->av_forw = bp; 28701 un->un_failfast_tailp = bp; 28702 } 28703 } 28704 } 28705 28706 /* 28707 * Now return all bp's on the failfast queue to their owners. 28708 */ 28709 while ((bp = un->un_failfast_headp) != NULL) { 28710 28711 un->un_failfast_headp = bp->av_forw; 28712 if (un->un_failfast_headp == NULL) { 28713 un->un_failfast_tailp = NULL; 28714 } 28715 28716 /* 28717 * We want to return the bp with a failure error code, but 28718 * we do not want a call to sd_start_cmds() to occur here, 28719 * so use sd_return_failed_command_no_restart() instead of 28720 * sd_return_failed_command(). 28721 */ 28722 sd_return_failed_command_no_restart(un, bp, EIO); 28723 } 28724 28725 /* Flush the xbuf queues if required. */ 28726 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) { 28727 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback); 28728 } 28729 28730 SD_TRACE(SD_LOG_IO_FAILFAST, un, 28731 "sd_failfast_flushq: exit: un:0x%p\n", un); 28732 } 28733 28734 28735 /* 28736 * Function: sd_failfast_flushq_callback 28737 * 28738 * Description: Return TRUE if the given bp meets the criteria for failfast 28739 * flushing. Used with ddi_xbuf_flushq(9F). 28740 * 28741 * Arguments: bp - ptr to buf struct to be examined. 28742 * 28743 * Context: Any 28744 */ 28745 28746 static int 28747 sd_failfast_flushq_callback(struct buf *bp) 28748 { 28749 /* 28750 * Return TRUE if (1) we want to flush ALL bufs when the failfast 28751 * state is entered; OR (2) the given bp has B_FAILFAST set. 28752 */ 28753 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) || 28754 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE); 28755 } 28756 28757 28758 28759 /* 28760 * Function: sd_setup_next_xfer 28761 * 28762 * Description: Prepare next I/O operation using DMA_PARTIAL 28763 * 28764 */ 28765 28766 static int 28767 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 28768 struct scsi_pkt *pkt, struct sd_xbuf *xp) 28769 { 28770 ssize_t num_blks_not_xfered; 28771 daddr_t strt_blk_num; 28772 ssize_t bytes_not_xfered; 28773 int rval; 28774 28775 ASSERT(pkt->pkt_resid == 0); 28776 28777 /* 28778 * Calculate next block number and amount to be transferred. 28779 * 28780 * How much data NOT transfered to the HBA yet. 28781 */ 28782 bytes_not_xfered = xp->xb_dma_resid; 28783 28784 /* 28785 * figure how many blocks NOT transfered to the HBA yet. 28786 */ 28787 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered); 28788 28789 /* 28790 * set starting block number to the end of what WAS transfered. 28791 */ 28792 strt_blk_num = xp->xb_blkno + 28793 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered); 28794 28795 /* 28796 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt 28797 * will call scsi_initpkt with NULL_FUNC so we do not have to release 28798 * the disk mutex here. 28799 */ 28800 rval = sd_setup_next_rw_pkt(un, pkt, bp, 28801 strt_blk_num, num_blks_not_xfered); 28802 28803 if (rval == 0) { 28804 28805 /* 28806 * Success. 28807 * 28808 * Adjust things if there are still more blocks to be 28809 * transfered. 28810 */ 28811 xp->xb_dma_resid = pkt->pkt_resid; 28812 pkt->pkt_resid = 0; 28813 28814 return (1); 28815 } 28816 28817 /* 28818 * There's really only one possible return value from 28819 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt 28820 * returns NULL. 28821 */ 28822 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 28823 28824 bp->b_resid = bp->b_bcount; 28825 bp->b_flags |= B_ERROR; 28826 28827 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28828 "Error setting up next portion of DMA transfer\n"); 28829 28830 return (0); 28831 } 28832 28833 /* 28834 * Function: sd_panic_for_res_conflict 28835 * 28836 * Description: Call panic with a string formatted with "Reservation Conflict" 28837 * and a human readable identifier indicating the SD instance 28838 * that experienced the reservation conflict. 28839 * 28840 * Arguments: un - pointer to the soft state struct for the instance. 28841 * 28842 * Context: may execute in interrupt context. 28843 */ 28844 28845 #define SD_RESV_CONFLICT_FMT_LEN 40 28846 void 28847 sd_panic_for_res_conflict(struct sd_lun *un) 28848 { 28849 char panic_str[SD_RESV_CONFLICT_FMT_LEN+MAXPATHLEN]; 28850 char path_str[MAXPATHLEN]; 28851 28852 (void) snprintf(panic_str, sizeof (panic_str), 28853 "Reservation Conflict\nDisk: %s", 28854 ddi_pathname(SD_DEVINFO(un), path_str)); 28855 28856 panic(panic_str); 28857 } 28858 28859 /* 28860 * Note: The following sd_faultinjection_ioctl( ) routines implement 28861 * driver support for handling fault injection for error analysis 28862 * causing faults in multiple layers of the driver. 28863 * 28864 */ 28865 28866 #ifdef SD_FAULT_INJECTION 28867 static uint_t sd_fault_injection_on = 0; 28868 28869 /* 28870 * Function: sd_faultinjection_ioctl() 28871 * 28872 * Description: This routine is the driver entry point for handling 28873 * faultinjection ioctls to inject errors into the 28874 * layer model 28875 * 28876 * Arguments: cmd - the ioctl cmd received 28877 * arg - the arguments from user and returns 28878 */ 28879 28880 static void 28881 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) { 28882 28883 uint_t i = 0; 28884 uint_t rval; 28885 28886 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); 28887 28888 mutex_enter(SD_MUTEX(un)); 28889 28890 switch (cmd) { 28891 case SDIOCRUN: 28892 /* Allow pushed faults to be injected */ 28893 SD_INFO(SD_LOG_SDTEST, un, 28894 "sd_faultinjection_ioctl: Injecting Fault Run\n"); 28895 28896 sd_fault_injection_on = 1; 28897 28898 SD_INFO(SD_LOG_IOERR, un, 28899 "sd_faultinjection_ioctl: run finished\n"); 28900 break; 28901 28902 case SDIOCSTART: 28903 /* Start Injection Session */ 28904 SD_INFO(SD_LOG_SDTEST, un, 28905 "sd_faultinjection_ioctl: Injecting Fault Start\n"); 28906 28907 sd_fault_injection_on = 0; 28908 un->sd_injection_mask = 0xFFFFFFFF; 28909 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 28910 un->sd_fi_fifo_pkt[i] = NULL; 28911 un->sd_fi_fifo_xb[i] = NULL; 28912 un->sd_fi_fifo_un[i] = NULL; 28913 un->sd_fi_fifo_arq[i] = NULL; 28914 } 28915 un->sd_fi_fifo_start = 0; 28916 un->sd_fi_fifo_end = 0; 28917 28918 mutex_enter(&(un->un_fi_mutex)); 28919 un->sd_fi_log[0] = '\0'; 28920 un->sd_fi_buf_len = 0; 28921 mutex_exit(&(un->un_fi_mutex)); 28922 28923 SD_INFO(SD_LOG_IOERR, un, 28924 "sd_faultinjection_ioctl: start finished\n"); 28925 break; 28926 28927 case SDIOCSTOP: 28928 /* Stop Injection Session */ 28929 SD_INFO(SD_LOG_SDTEST, un, 28930 "sd_faultinjection_ioctl: Injecting Fault Stop\n"); 28931 sd_fault_injection_on = 0; 28932 un->sd_injection_mask = 0x0; 28933 28934 /* Empty stray or unuseds structs from fifo */ 28935 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 28936 if (un->sd_fi_fifo_pkt[i] != NULL) { 28937 kmem_free(un->sd_fi_fifo_pkt[i], 28938 sizeof (struct sd_fi_pkt)); 28939 } 28940 if (un->sd_fi_fifo_xb[i] != NULL) { 28941 kmem_free(un->sd_fi_fifo_xb[i], 28942 sizeof (struct sd_fi_xb)); 28943 } 28944 if (un->sd_fi_fifo_un[i] != NULL) { 28945 kmem_free(un->sd_fi_fifo_un[i], 28946 sizeof (struct sd_fi_un)); 28947 } 28948 if (un->sd_fi_fifo_arq[i] != NULL) { 28949 kmem_free(un->sd_fi_fifo_arq[i], 28950 sizeof (struct sd_fi_arq)); 28951 } 28952 un->sd_fi_fifo_pkt[i] = NULL; 28953 un->sd_fi_fifo_un[i] = NULL; 28954 un->sd_fi_fifo_xb[i] = NULL; 28955 un->sd_fi_fifo_arq[i] = NULL; 28956 } 28957 un->sd_fi_fifo_start = 0; 28958 un->sd_fi_fifo_end = 0; 28959 28960 SD_INFO(SD_LOG_IOERR, un, 28961 "sd_faultinjection_ioctl: stop finished\n"); 28962 break; 28963 28964 case SDIOCINSERTPKT: 28965 /* Store a packet struct to be pushed onto fifo */ 28966 SD_INFO(SD_LOG_SDTEST, un, 28967 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n"); 28968 28969 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 28970 28971 sd_fault_injection_on = 0; 28972 28973 /* No more that SD_FI_MAX_ERROR allowed in Queue */ 28974 if (un->sd_fi_fifo_pkt[i] != NULL) { 28975 kmem_free(un->sd_fi_fifo_pkt[i], 28976 sizeof (struct sd_fi_pkt)); 28977 } 28978 if (arg != NULL) { 28979 un->sd_fi_fifo_pkt[i] = 28980 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP); 28981 if (un->sd_fi_fifo_pkt[i] == NULL) { 28982 /* Alloc failed don't store anything */ 28983 break; 28984 } 28985 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i], 28986 sizeof (struct sd_fi_pkt), 0); 28987 if (rval == -1) { 28988 kmem_free(un->sd_fi_fifo_pkt[i], 28989 sizeof (struct sd_fi_pkt)); 28990 un->sd_fi_fifo_pkt[i] = NULL; 28991 } 28992 } else { 28993 SD_INFO(SD_LOG_IOERR, un, 28994 "sd_faultinjection_ioctl: pkt null\n"); 28995 } 28996 break; 28997 28998 case SDIOCINSERTXB: 28999 /* Store a xb struct to be pushed onto fifo */ 29000 SD_INFO(SD_LOG_SDTEST, un, 29001 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n"); 29002 29003 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29004 29005 sd_fault_injection_on = 0; 29006 29007 if (un->sd_fi_fifo_xb[i] != NULL) { 29008 kmem_free(un->sd_fi_fifo_xb[i], 29009 sizeof (struct sd_fi_xb)); 29010 un->sd_fi_fifo_xb[i] = NULL; 29011 } 29012 if (arg != NULL) { 29013 un->sd_fi_fifo_xb[i] = 29014 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP); 29015 if (un->sd_fi_fifo_xb[i] == NULL) { 29016 /* Alloc failed don't store anything */ 29017 break; 29018 } 29019 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i], 29020 sizeof (struct sd_fi_xb), 0); 29021 29022 if (rval == -1) { 29023 kmem_free(un->sd_fi_fifo_xb[i], 29024 sizeof (struct sd_fi_xb)); 29025 un->sd_fi_fifo_xb[i] = NULL; 29026 } 29027 } else { 29028 SD_INFO(SD_LOG_IOERR, un, 29029 "sd_faultinjection_ioctl: xb null\n"); 29030 } 29031 break; 29032 29033 case SDIOCINSERTUN: 29034 /* Store a un struct to be pushed onto fifo */ 29035 SD_INFO(SD_LOG_SDTEST, un, 29036 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n"); 29037 29038 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29039 29040 sd_fault_injection_on = 0; 29041 29042 if (un->sd_fi_fifo_un[i] != NULL) { 29043 kmem_free(un->sd_fi_fifo_un[i], 29044 sizeof (struct sd_fi_un)); 29045 un->sd_fi_fifo_un[i] = NULL; 29046 } 29047 if (arg != NULL) { 29048 un->sd_fi_fifo_un[i] = 29049 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP); 29050 if (un->sd_fi_fifo_un[i] == NULL) { 29051 /* Alloc failed don't store anything */ 29052 break; 29053 } 29054 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i], 29055 sizeof (struct sd_fi_un), 0); 29056 if (rval == -1) { 29057 kmem_free(un->sd_fi_fifo_un[i], 29058 sizeof (struct sd_fi_un)); 29059 un->sd_fi_fifo_un[i] = NULL; 29060 } 29061 29062 } else { 29063 SD_INFO(SD_LOG_IOERR, un, 29064 "sd_faultinjection_ioctl: un null\n"); 29065 } 29066 29067 break; 29068 29069 case SDIOCINSERTARQ: 29070 /* Store a arq struct to be pushed onto fifo */ 29071 SD_INFO(SD_LOG_SDTEST, un, 29072 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n"); 29073 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29074 29075 sd_fault_injection_on = 0; 29076 29077 if (un->sd_fi_fifo_arq[i] != NULL) { 29078 kmem_free(un->sd_fi_fifo_arq[i], 29079 sizeof (struct sd_fi_arq)); 29080 un->sd_fi_fifo_arq[i] = NULL; 29081 } 29082 if (arg != NULL) { 29083 un->sd_fi_fifo_arq[i] = 29084 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP); 29085 if (un->sd_fi_fifo_arq[i] == NULL) { 29086 /* Alloc failed don't store anything */ 29087 break; 29088 } 29089 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i], 29090 sizeof (struct sd_fi_arq), 0); 29091 if (rval == -1) { 29092 kmem_free(un->sd_fi_fifo_arq[i], 29093 sizeof (struct sd_fi_arq)); 29094 un->sd_fi_fifo_arq[i] = NULL; 29095 } 29096 29097 } else { 29098 SD_INFO(SD_LOG_IOERR, un, 29099 "sd_faultinjection_ioctl: arq null\n"); 29100 } 29101 29102 break; 29103 29104 case SDIOCPUSH: 29105 /* Push stored xb, pkt, un, and arq onto fifo */ 29106 sd_fault_injection_on = 0; 29107 29108 if (arg != NULL) { 29109 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); 29110 if (rval != -1 && 29111 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 29112 un->sd_fi_fifo_end += i; 29113 } 29114 } else { 29115 SD_INFO(SD_LOG_IOERR, un, 29116 "sd_faultinjection_ioctl: push arg null\n"); 29117 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 29118 un->sd_fi_fifo_end++; 29119 } 29120 } 29121 SD_INFO(SD_LOG_IOERR, un, 29122 "sd_faultinjection_ioctl: push to end=%d\n", 29123 un->sd_fi_fifo_end); 29124 break; 29125 29126 case SDIOCRETRIEVE: 29127 /* Return buffer of log from Injection session */ 29128 SD_INFO(SD_LOG_SDTEST, un, 29129 "sd_faultinjection_ioctl: Injecting Fault Retreive"); 29130 29131 sd_fault_injection_on = 0; 29132 29133 mutex_enter(&(un->un_fi_mutex)); 29134 rval = ddi_copyout(un->sd_fi_log, (void *)arg, 29135 un->sd_fi_buf_len+1, 0); 29136 mutex_exit(&(un->un_fi_mutex)); 29137 29138 if (rval == -1) { 29139 /* 29140 * arg is possibly invalid setting 29141 * it to NULL for return 29142 */ 29143 arg = NULL; 29144 } 29145 break; 29146 } 29147 29148 mutex_exit(SD_MUTEX(un)); 29149 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl:" 29150 " exit\n"); 29151 } 29152 29153 29154 /* 29155 * Function: sd_injection_log() 29156 * 29157 * Description: This routine adds buff to the already existing injection log 29158 * for retrieval via faultinjection_ioctl for use in fault 29159 * detection and recovery 29160 * 29161 * Arguments: buf - the string to add to the log 29162 */ 29163 29164 static void 29165 sd_injection_log(char *buf, struct sd_lun *un) 29166 { 29167 uint_t len; 29168 29169 ASSERT(un != NULL); 29170 ASSERT(buf != NULL); 29171 29172 mutex_enter(&(un->un_fi_mutex)); 29173 29174 len = min(strlen(buf), 255); 29175 /* Add logged value to Injection log to be returned later */ 29176 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) { 29177 uint_t offset = strlen((char *)un->sd_fi_log); 29178 char *destp = (char *)un->sd_fi_log + offset; 29179 int i; 29180 for (i = 0; i < len; i++) { 29181 *destp++ = *buf++; 29182 } 29183 un->sd_fi_buf_len += len; 29184 un->sd_fi_log[un->sd_fi_buf_len] = '\0'; 29185 } 29186 29187 mutex_exit(&(un->un_fi_mutex)); 29188 } 29189 29190 29191 /* 29192 * Function: sd_faultinjection() 29193 * 29194 * Description: This routine takes the pkt and changes its 29195 * content based on error injection scenerio. 29196 * 29197 * Arguments: pktp - packet to be changed 29198 */ 29199 29200 static void 29201 sd_faultinjection(struct scsi_pkt *pktp) 29202 { 29203 uint_t i; 29204 struct sd_fi_pkt *fi_pkt; 29205 struct sd_fi_xb *fi_xb; 29206 struct sd_fi_un *fi_un; 29207 struct sd_fi_arq *fi_arq; 29208 struct buf *bp; 29209 struct sd_xbuf *xb; 29210 struct sd_lun *un; 29211 29212 ASSERT(pktp != NULL); 29213 29214 /* pull bp xb and un from pktp */ 29215 bp = (struct buf *)pktp->pkt_private; 29216 xb = SD_GET_XBUF(bp); 29217 un = SD_GET_UN(bp); 29218 29219 ASSERT(un != NULL); 29220 29221 mutex_enter(SD_MUTEX(un)); 29222 29223 SD_TRACE(SD_LOG_SDTEST, un, 29224 "sd_faultinjection: entry Injection from sdintr\n"); 29225 29226 /* if injection is off return */ 29227 if (sd_fault_injection_on == 0 || 29228 un->sd_fi_fifo_start == un->sd_fi_fifo_end) { 29229 mutex_exit(SD_MUTEX(un)); 29230 return; 29231 } 29232 29233 SD_INFO(SD_LOG_SDTEST, un, 29234 "sd_faultinjection: is working for copying\n"); 29235 29236 /* take next set off fifo */ 29237 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR; 29238 29239 fi_pkt = un->sd_fi_fifo_pkt[i]; 29240 fi_xb = un->sd_fi_fifo_xb[i]; 29241 fi_un = un->sd_fi_fifo_un[i]; 29242 fi_arq = un->sd_fi_fifo_arq[i]; 29243 29244 29245 /* set variables accordingly */ 29246 /* set pkt if it was on fifo */ 29247 if (fi_pkt != NULL) { 29248 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags"); 29249 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp"); 29250 if (fi_pkt->pkt_cdbp != 0xff) 29251 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp"); 29252 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state"); 29253 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics"); 29254 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason"); 29255 29256 } 29257 /* set xb if it was on fifo */ 29258 if (fi_xb != NULL) { 29259 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno"); 29260 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid"); 29261 if (fi_xb->xb_retry_count != 0) 29262 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count"); 29263 SD_CONDSET(xb, xb, xb_victim_retry_count, 29264 "xb_victim_retry_count"); 29265 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status"); 29266 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state"); 29267 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid"); 29268 29269 /* copy in block data from sense */ 29270 /* 29271 * if (fi_xb->xb_sense_data[0] != -1) { 29272 * bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, 29273 * SENSE_LENGTH); 29274 * } 29275 */ 29276 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, SENSE_LENGTH); 29277 29278 /* copy in extended sense codes */ 29279 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 29280 xb, es_code, "es_code"); 29281 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 29282 xb, es_key, "es_key"); 29283 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 29284 xb, es_add_code, "es_add_code"); 29285 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data), 29286 xb, es_qual_code, "es_qual_code"); 29287 struct scsi_extended_sense *esp; 29288 esp = (struct scsi_extended_sense *)xb->xb_sense_data; 29289 esp->es_class = CLASS_EXTENDED_SENSE; 29290 } 29291 29292 /* set un if it was on fifo */ 29293 if (fi_un != NULL) { 29294 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb"); 29295 SD_CONDSET(un, un, un_ctype, "un_ctype"); 29296 SD_CONDSET(un, un, un_reset_retry_count, 29297 "un_reset_retry_count"); 29298 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type"); 29299 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status"); 29300 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled"); 29301 SD_CONDSET(un, un, un_f_allow_bus_device_reset, 29302 "un_f_allow_bus_device_reset"); 29303 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing"); 29304 29305 } 29306 29307 /* copy in auto request sense if it was on fifo */ 29308 if (fi_arq != NULL) { 29309 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq)); 29310 } 29311 29312 /* free structs */ 29313 if (un->sd_fi_fifo_pkt[i] != NULL) { 29314 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt)); 29315 } 29316 if (un->sd_fi_fifo_xb[i] != NULL) { 29317 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb)); 29318 } 29319 if (un->sd_fi_fifo_un[i] != NULL) { 29320 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un)); 29321 } 29322 if (un->sd_fi_fifo_arq[i] != NULL) { 29323 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); 29324 } 29325 29326 /* 29327 * kmem_free does not gurantee to set to NULL 29328 * since we uses these to determine if we set 29329 * values or not lets confirm they are always 29330 * NULL after free 29331 */ 29332 un->sd_fi_fifo_pkt[i] = NULL; 29333 un->sd_fi_fifo_un[i] = NULL; 29334 un->sd_fi_fifo_xb[i] = NULL; 29335 un->sd_fi_fifo_arq[i] = NULL; 29336 29337 un->sd_fi_fifo_start++; 29338 29339 mutex_exit(SD_MUTEX(un)); 29340 29341 SD_INFO(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n"); 29342 } 29343 29344 #endif /* SD_FAULT_INJECTION */ 29345 29346 /* 29347 * This routine is invoked in sd_unit_attach(). Before calling it, the 29348 * properties in conf file should be processed already, and "hotpluggable" 29349 * property was processed also. 29350 * 29351 * The sd driver distinguishes 3 different type of devices: removable media, 29352 * non-removable media, and hotpluggable. Below the differences are defined: 29353 * 29354 * 1. Device ID 29355 * 29356 * The device ID of a device is used to identify this device. Refer to 29357 * ddi_devid_register(9F). 29358 * 29359 * For a non-removable media disk device which can provide 0x80 or 0x83 29360 * VPD page (refer to INQUIRY command of SCSI SPC specification), a unique 29361 * device ID is created to identify this device. For other non-removable 29362 * media devices, a default device ID is created only if this device has 29363 * at least 2 alter cylinders. Otherwise, this device has no devid. 29364 * 29365 * ------------------------------------------------------- 29366 * removable media hotpluggable | Can Have Device ID 29367 * ------------------------------------------------------- 29368 * false false | Yes 29369 * false true | Yes 29370 * true x | No 29371 * ------------------------------------------------------ 29372 * 29373 * 29374 * 2. SCSI group 4 commands 29375 * 29376 * In SCSI specs, only some commands in group 4 command set can use 29377 * 8-byte addresses that can be used to access >2TB storage spaces. 29378 * Other commands have no such capability. Without supporting group4, 29379 * it is impossible to make full use of storage spaces of a disk with 29380 * capacity larger than 2TB. 29381 * 29382 * ----------------------------------------------- 29383 * removable media hotpluggable LP64 | Group 29384 * ----------------------------------------------- 29385 * false false false | 1 29386 * false false true | 4 29387 * false true false | 1 29388 * false true true | 4 29389 * true x x | 5 29390 * ----------------------------------------------- 29391 * 29392 * 29393 * 3. Check for VTOC Label 29394 * 29395 * If a direct-access disk has no EFI label, sd will check if it has a 29396 * valid VTOC label. Now, sd also does that check for removable media 29397 * and hotpluggable devices. 29398 * 29399 * -------------------------------------------------------------- 29400 * Direct-Access removable media hotpluggable | Check Label 29401 * ------------------------------------------------------------- 29402 * false false false | No 29403 * false false true | No 29404 * false true false | Yes 29405 * false true true | Yes 29406 * true x x | Yes 29407 * -------------------------------------------------------------- 29408 * 29409 * 29410 * 4. Building default VTOC label 29411 * 29412 * As section 3 says, sd checks if some kinds of devices have VTOC label. 29413 * If those devices have no valid VTOC label, sd(7d) will attempt to 29414 * create default VTOC for them. Currently sd creates default VTOC label 29415 * for all devices on x86 platform (VTOC_16), but only for removable 29416 * media devices on SPARC (VTOC_8). 29417 * 29418 * ----------------------------------------------------------- 29419 * removable media hotpluggable platform | Default Label 29420 * ----------------------------------------------------------- 29421 * false false sparc | No 29422 * false true x86 | Yes 29423 * false true sparc | Yes 29424 * true x x | Yes 29425 * ---------------------------------------------------------- 29426 * 29427 * 29428 * 5. Supported blocksizes of target devices 29429 * 29430 * Sd supports non-512-byte blocksize for removable media devices only. 29431 * For other devices, only 512-byte blocksize is supported. This may be 29432 * changed in near future because some RAID devices require non-512-byte 29433 * blocksize 29434 * 29435 * ----------------------------------------------------------- 29436 * removable media hotpluggable | non-512-byte blocksize 29437 * ----------------------------------------------------------- 29438 * false false | No 29439 * false true | No 29440 * true x | Yes 29441 * ----------------------------------------------------------- 29442 * 29443 * 29444 * 6. Automatic mount & unmount 29445 * 29446 * Sd(7d) driver provides DKIOCREMOVABLE ioctl. This ioctl is used to query 29447 * if a device is removable media device. It return 1 for removable media 29448 * devices, and 0 for others. 29449 * 29450 * The automatic mounting subsystem should distinguish between the types 29451 * of devices and apply automounting policies to each. 29452 * 29453 * 29454 * 7. fdisk partition management 29455 * 29456 * Fdisk is traditional partition method on x86 platform. Sd(7d) driver 29457 * just supports fdisk partitions on x86 platform. On sparc platform, sd 29458 * doesn't support fdisk partitions at all. Note: pcfs(7fs) can recognize 29459 * fdisk partitions on both x86 and SPARC platform. 29460 * 29461 * ----------------------------------------------------------- 29462 * platform removable media USB/1394 | fdisk supported 29463 * ----------------------------------------------------------- 29464 * x86 X X | true 29465 * ------------------------------------------------------------ 29466 * sparc X X | false 29467 * ------------------------------------------------------------ 29468 * 29469 * 29470 * 8. MBOOT/MBR 29471 * 29472 * Although sd(7d) doesn't support fdisk on SPARC platform, it does support 29473 * read/write mboot for removable media devices on sparc platform. 29474 * 29475 * ----------------------------------------------------------- 29476 * platform removable media USB/1394 | mboot supported 29477 * ----------------------------------------------------------- 29478 * x86 X X | true 29479 * ------------------------------------------------------------ 29480 * sparc false false | false 29481 * sparc false true | true 29482 * sparc true false | true 29483 * sparc true true | true 29484 * ------------------------------------------------------------ 29485 * 29486 * 29487 * 9. error handling during opening device 29488 * 29489 * If failed to open a disk device, an errno is returned. For some kinds 29490 * of errors, different errno is returned depending on if this device is 29491 * a removable media device. This brings USB/1394 hard disks in line with 29492 * expected hard disk behavior. It is not expected that this breaks any 29493 * application. 29494 * 29495 * ------------------------------------------------------ 29496 * removable media hotpluggable | errno 29497 * ------------------------------------------------------ 29498 * false false | EIO 29499 * false true | EIO 29500 * true x | ENXIO 29501 * ------------------------------------------------------ 29502 * 29503 * 29504 * 11. ioctls: DKIOCEJECT, CDROMEJECT 29505 * 29506 * These IOCTLs are applicable only to removable media devices. 29507 * 29508 * ----------------------------------------------------------- 29509 * removable media hotpluggable |DKIOCEJECT, CDROMEJECT 29510 * ----------------------------------------------------------- 29511 * false false | No 29512 * false true | No 29513 * true x | Yes 29514 * ----------------------------------------------------------- 29515 * 29516 * 29517 * 12. Kstats for partitions 29518 * 29519 * sd creates partition kstat for non-removable media devices. USB and 29520 * Firewire hard disks now have partition kstats 29521 * 29522 * ------------------------------------------------------ 29523 * removable media hotpluggable | kstat 29524 * ------------------------------------------------------ 29525 * false false | Yes 29526 * false true | Yes 29527 * true x | No 29528 * ------------------------------------------------------ 29529 * 29530 * 29531 * 13. Removable media & hotpluggable properties 29532 * 29533 * Sd driver creates a "removable-media" property for removable media 29534 * devices. Parent nexus drivers create a "hotpluggable" property if 29535 * it supports hotplugging. 29536 * 29537 * --------------------------------------------------------------------- 29538 * removable media hotpluggable | "removable-media" " hotpluggable" 29539 * --------------------------------------------------------------------- 29540 * false false | No No 29541 * false true | No Yes 29542 * true false | Yes No 29543 * true true | Yes Yes 29544 * --------------------------------------------------------------------- 29545 * 29546 * 29547 * 14. Power Management 29548 * 29549 * sd only power manages removable media devices or devices that support 29550 * LOG_SENSE or have a "pm-capable" property (PSARC/2002/250) 29551 * 29552 * A parent nexus that supports hotplugging can also set "pm-capable" 29553 * if the disk can be power managed. 29554 * 29555 * ------------------------------------------------------------ 29556 * removable media hotpluggable pm-capable | power manage 29557 * ------------------------------------------------------------ 29558 * false false false | No 29559 * false false true | Yes 29560 * false true false | No 29561 * false true true | Yes 29562 * true x x | Yes 29563 * ------------------------------------------------------------ 29564 * 29565 * USB and firewire hard disks can now be power managed independently 29566 * of the framebuffer 29567 * 29568 * 29569 * 15. Support for USB disks with capacity larger than 1TB 29570 * 29571 * Currently, sd doesn't permit a fixed disk device with capacity 29572 * larger than 1TB to be used in a 32-bit operating system environment. 29573 * However, sd doesn't do that for removable media devices. Instead, it 29574 * assumes that removable media devices cannot have a capacity larger 29575 * than 1TB. Therefore, using those devices on 32-bit system is partially 29576 * supported, which can cause some unexpected results. 29577 * 29578 * --------------------------------------------------------------------- 29579 * removable media USB/1394 | Capacity > 1TB | Used in 32-bit env 29580 * --------------------------------------------------------------------- 29581 * false false | true | no 29582 * false true | true | no 29583 * true false | true | Yes 29584 * true true | true | Yes 29585 * --------------------------------------------------------------------- 29586 * 29587 * 29588 * 16. Check write-protection at open time 29589 * 29590 * When a removable media device is being opened for writing without NDELAY 29591 * flag, sd will check if this device is writable. If attempting to open 29592 * without NDELAY flag a write-protected device, this operation will abort. 29593 * 29594 * ------------------------------------------------------------ 29595 * removable media USB/1394 | WP Check 29596 * ------------------------------------------------------------ 29597 * false false | No 29598 * false true | No 29599 * true false | Yes 29600 * true true | Yes 29601 * ------------------------------------------------------------ 29602 * 29603 * 29604 * 17. syslog when corrupted VTOC is encountered 29605 * 29606 * Currently, if an invalid VTOC is encountered, sd only print syslog 29607 * for fixed SCSI disks. 29608 * ------------------------------------------------------------ 29609 * removable media USB/1394 | print syslog 29610 * ------------------------------------------------------------ 29611 * false false | Yes 29612 * false true | No 29613 * true false | No 29614 * true true | No 29615 * ------------------------------------------------------------ 29616 */ 29617 static void 29618 sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi) 29619 { 29620 int pm_capable_prop; 29621 29622 ASSERT(un->un_sd); 29623 ASSERT(un->un_sd->sd_inq); 29624 29625 /* 29626 * Enable SYNC CACHE support for all devices. 29627 */ 29628 un->un_f_sync_cache_supported = TRUE; 29629 29630 /* 29631 * Set the sync cache required flag to false. 29632 * This would ensure that there is no SYNC CACHE 29633 * sent when there are no writes 29634 */ 29635 un->un_f_sync_cache_required = FALSE; 29636 29637 if (un->un_sd->sd_inq->inq_rmb) { 29638 /* 29639 * The media of this device is removable. And for this kind 29640 * of devices, it is possible to change medium after opening 29641 * devices. Thus we should support this operation. 29642 */ 29643 un->un_f_has_removable_media = TRUE; 29644 29645 /* 29646 * support non-512-byte blocksize of removable media devices 29647 */ 29648 un->un_f_non_devbsize_supported = TRUE; 29649 29650 /* 29651 * Assume that all removable media devices support DOOR_LOCK 29652 */ 29653 un->un_f_doorlock_supported = TRUE; 29654 29655 /* 29656 * For a removable media device, it is possible to be opened 29657 * with NDELAY flag when there is no media in drive, in this 29658 * case we don't care if device is writable. But if without 29659 * NDELAY flag, we need to check if media is write-protected. 29660 */ 29661 un->un_f_chk_wp_open = TRUE; 29662 29663 /* 29664 * need to start a SCSI watch thread to monitor media state, 29665 * when media is being inserted or ejected, notify syseventd. 29666 */ 29667 un->un_f_monitor_media_state = TRUE; 29668 29669 /* 29670 * Some devices don't support START_STOP_UNIT command. 29671 * Therefore, we'd better check if a device supports it 29672 * before sending it. 29673 */ 29674 un->un_f_check_start_stop = TRUE; 29675 29676 /* 29677 * support eject media ioctl: 29678 * FDEJECT, DKIOCEJECT, CDROMEJECT 29679 */ 29680 un->un_f_eject_media_supported = TRUE; 29681 29682 /* 29683 * Because many removable-media devices don't support 29684 * LOG_SENSE, we couldn't use this command to check if 29685 * a removable media device support power-management. 29686 * We assume that they support power-management via 29687 * START_STOP_UNIT command and can be spun up and down 29688 * without limitations. 29689 */ 29690 un->un_f_pm_supported = TRUE; 29691 29692 /* 29693 * Need to create a zero length (Boolean) property 29694 * removable-media for the removable media devices. 29695 * Note that the return value of the property is not being 29696 * checked, since if unable to create the property 29697 * then do not want the attach to fail altogether. Consistent 29698 * with other property creation in attach. 29699 */ 29700 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, 29701 DDI_PROP_CANSLEEP, "removable-media", NULL, 0); 29702 29703 } else { 29704 /* 29705 * create device ID for device 29706 */ 29707 un->un_f_devid_supported = TRUE; 29708 29709 /* 29710 * Spin up non-removable-media devices once it is attached 29711 */ 29712 un->un_f_attach_spinup = TRUE; 29713 29714 /* 29715 * According to SCSI specification, Sense data has two kinds of 29716 * format: fixed format, and descriptor format. At present, we 29717 * don't support descriptor format sense data for removable 29718 * media. 29719 */ 29720 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT) { 29721 un->un_f_descr_format_supported = TRUE; 29722 } 29723 29724 /* 29725 * kstats are created only for non-removable media devices. 29726 * 29727 * Set this in sd.conf to 0 in order to disable kstats. The 29728 * default is 1, so they are enabled by default. 29729 */ 29730 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY, 29731 SD_DEVINFO(un), DDI_PROP_DONTPASS, 29732 "enable-partition-kstats", 1)); 29733 29734 /* 29735 * Check if HBA has set the "pm-capable" property. 29736 * If "pm-capable" exists and is non-zero then we can 29737 * power manage the device without checking the start/stop 29738 * cycle count log sense page. 29739 * 29740 * If "pm-capable" exists and is SD_PM_CAPABLE_FALSE (0) 29741 * then we should not power manage the device. 29742 * 29743 * If "pm-capable" doesn't exist then pm_capable_prop will 29744 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case, 29745 * sd will check the start/stop cycle count log sense page 29746 * and power manage the device if the cycle count limit has 29747 * not been exceeded. 29748 */ 29749 pm_capable_prop = ddi_prop_get_int(DDI_DEV_T_ANY, devi, 29750 DDI_PROP_DONTPASS, "pm-capable", SD_PM_CAPABLE_UNDEFINED); 29751 if (pm_capable_prop == SD_PM_CAPABLE_UNDEFINED) { 29752 un->un_f_log_sense_supported = TRUE; 29753 } else { 29754 /* 29755 * pm-capable property exists. 29756 * 29757 * Convert "TRUE" values for pm_capable_prop to 29758 * SD_PM_CAPABLE_TRUE (1) to make it easier to check 29759 * later. "TRUE" values are any values except 29760 * SD_PM_CAPABLE_FALSE (0) and 29761 * SD_PM_CAPABLE_UNDEFINED (-1) 29762 */ 29763 if (pm_capable_prop == SD_PM_CAPABLE_FALSE) { 29764 un->un_f_log_sense_supported = FALSE; 29765 } else { 29766 un->un_f_pm_supported = TRUE; 29767 } 29768 29769 SD_INFO(SD_LOG_ATTACH_DETACH, un, 29770 "sd_unit_attach: un:0x%p pm-capable " 29771 "property set to %d.\n", un, un->un_f_pm_supported); 29772 } 29773 } 29774 29775 if (un->un_f_is_hotpluggable) { 29776 29777 /* 29778 * Have to watch hotpluggable devices as well, since 29779 * that's the only way for userland applications to 29780 * detect hot removal while device is busy/mounted. 29781 */ 29782 un->un_f_monitor_media_state = TRUE; 29783 29784 un->un_f_check_start_stop = TRUE; 29785 29786 } 29787 } 29788 29789 /* 29790 * sd_tg_rdwr: 29791 * Provides rdwr access for cmlb via sd_tgops. The start_block is 29792 * in sys block size, req_length in bytes. 29793 * 29794 */ 29795 static int 29796 sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 29797 diskaddr_t start_block, size_t reqlength, void *tg_cookie) 29798 { 29799 struct sd_lun *un; 29800 int path_flag = (int)(uintptr_t)tg_cookie; 29801 char *dkl = NULL; 29802 diskaddr_t real_addr = start_block; 29803 diskaddr_t first_byte, end_block; 29804 29805 size_t buffer_size = reqlength; 29806 int rval = 0; 29807 diskaddr_t cap; 29808 uint32_t lbasize; 29809 sd_ssc_t *ssc; 29810 29811 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 29812 if (un == NULL) 29813 return (ENXIO); 29814 29815 if (cmd != TG_READ && cmd != TG_WRITE) 29816 return (EINVAL); 29817 29818 ssc = sd_ssc_init(un); 29819 mutex_enter(SD_MUTEX(un)); 29820 if (un->un_f_tgt_blocksize_is_valid == FALSE) { 29821 mutex_exit(SD_MUTEX(un)); 29822 rval = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap, 29823 &lbasize, path_flag); 29824 if (rval != 0) 29825 goto done1; 29826 mutex_enter(SD_MUTEX(un)); 29827 sd_update_block_info(un, lbasize, cap); 29828 if ((un->un_f_tgt_blocksize_is_valid == FALSE)) { 29829 mutex_exit(SD_MUTEX(un)); 29830 rval = EIO; 29831 goto done; 29832 } 29833 } 29834 29835 if (NOT_DEVBSIZE(un)) { 29836 /* 29837 * sys_blocksize != tgt_blocksize, need to re-adjust 29838 * blkno and save the index to beginning of dk_label 29839 */ 29840 first_byte = SD_SYSBLOCKS2BYTES(un, start_block); 29841 real_addr = first_byte / un->un_tgt_blocksize; 29842 29843 end_block = (first_byte + reqlength + 29844 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize; 29845 29846 /* round up buffer size to multiple of target block size */ 29847 buffer_size = (end_block - real_addr) * un->un_tgt_blocksize; 29848 29849 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_tg_rdwr", 29850 "label_addr: 0x%x allocation size: 0x%x\n", 29851 real_addr, buffer_size); 29852 29853 if (((first_byte % un->un_tgt_blocksize) != 0) || 29854 (reqlength % un->un_tgt_blocksize) != 0) 29855 /* the request is not aligned */ 29856 dkl = kmem_zalloc(buffer_size, KM_SLEEP); 29857 } 29858 29859 /* 29860 * The MMC standard allows READ CAPACITY to be 29861 * inaccurate by a bounded amount (in the interest of 29862 * response latency). As a result, failed READs are 29863 * commonplace (due to the reading of metadata and not 29864 * data). Depending on the per-Vendor/drive Sense data, 29865 * the failed READ can cause many (unnecessary) retries. 29866 */ 29867 29868 if (ISCD(un) && (cmd == TG_READ) && 29869 (un->un_f_blockcount_is_valid == TRUE) && 29870 ((start_block == (un->un_blockcount - 1))|| 29871 (start_block == (un->un_blockcount - 2)))) { 29872 path_flag = SD_PATH_DIRECT_PRIORITY; 29873 } 29874 29875 mutex_exit(SD_MUTEX(un)); 29876 if (cmd == TG_READ) { 29877 rval = sd_send_scsi_READ(ssc, (dkl != NULL)? dkl: bufaddr, 29878 buffer_size, real_addr, path_flag); 29879 if (dkl != NULL) 29880 bcopy(dkl + SD_TGTBYTEOFFSET(un, start_block, 29881 real_addr), bufaddr, reqlength); 29882 } else { 29883 if (dkl) { 29884 rval = sd_send_scsi_READ(ssc, dkl, buffer_size, 29885 real_addr, path_flag); 29886 if (rval) { 29887 goto done1; 29888 } 29889 bcopy(bufaddr, dkl + SD_TGTBYTEOFFSET(un, start_block, 29890 real_addr), reqlength); 29891 } 29892 rval = sd_send_scsi_WRITE(ssc, (dkl != NULL)? dkl: bufaddr, 29893 buffer_size, real_addr, path_flag); 29894 } 29895 29896 done1: 29897 if (dkl != NULL) 29898 kmem_free(dkl, buffer_size); 29899 29900 if (rval != 0) { 29901 if (rval == EIO) 29902 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK); 29903 else 29904 sd_ssc_assessment(ssc, SD_FMT_IGNORE); 29905 } 29906 done: 29907 sd_ssc_fini(ssc); 29908 return (rval); 29909 } 29910 29911 29912 static int 29913 sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie) 29914 { 29915 29916 struct sd_lun *un; 29917 diskaddr_t cap; 29918 uint32_t lbasize; 29919 int path_flag = (int)(uintptr_t)tg_cookie; 29920 int ret = 0; 29921 29922 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 29923 if (un == NULL) 29924 return (ENXIO); 29925 29926 switch (cmd) { 29927 case TG_GETPHYGEOM: 29928 case TG_GETVIRTGEOM: 29929 case TG_GETCAPACITY: 29930 case TG_GETBLOCKSIZE: 29931 mutex_enter(SD_MUTEX(un)); 29932 29933 if ((un->un_f_blockcount_is_valid == TRUE) && 29934 (un->un_f_tgt_blocksize_is_valid == TRUE)) { 29935 cap = un->un_blockcount; 29936 lbasize = un->un_tgt_blocksize; 29937 mutex_exit(SD_MUTEX(un)); 29938 } else { 29939 sd_ssc_t *ssc; 29940 mutex_exit(SD_MUTEX(un)); 29941 ssc = sd_ssc_init(un); 29942 ret = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap, 29943 &lbasize, path_flag); 29944 if (ret != 0) { 29945 if (ret == EIO) 29946 sd_ssc_assessment(ssc, 29947 SD_FMT_STATUS_CHECK); 29948 else 29949 sd_ssc_assessment(ssc, 29950 SD_FMT_IGNORE); 29951 sd_ssc_fini(ssc); 29952 return (ret); 29953 } 29954 sd_ssc_fini(ssc); 29955 mutex_enter(SD_MUTEX(un)); 29956 sd_update_block_info(un, lbasize, cap); 29957 if ((un->un_f_blockcount_is_valid == FALSE) || 29958 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 29959 mutex_exit(SD_MUTEX(un)); 29960 return (EIO); 29961 } 29962 mutex_exit(SD_MUTEX(un)); 29963 } 29964 29965 if (cmd == TG_GETCAPACITY) { 29966 *(diskaddr_t *)arg = cap; 29967 return (0); 29968 } 29969 29970 if (cmd == TG_GETBLOCKSIZE) { 29971 *(uint32_t *)arg = lbasize; 29972 return (0); 29973 } 29974 29975 if (cmd == TG_GETPHYGEOM) 29976 ret = sd_get_physical_geometry(un, (cmlb_geom_t *)arg, 29977 cap, lbasize, path_flag); 29978 else 29979 /* TG_GETVIRTGEOM */ 29980 ret = sd_get_virtual_geometry(un, 29981 (cmlb_geom_t *)arg, cap, lbasize); 29982 29983 return (ret); 29984 29985 case TG_GETATTR: 29986 mutex_enter(SD_MUTEX(un)); 29987 ((tg_attribute_t *)arg)->media_is_writable = 29988 un->un_f_mmc_writable_media; 29989 mutex_exit(SD_MUTEX(un)); 29990 return (0); 29991 default: 29992 return (ENOTTY); 29993 29994 } 29995 } 29996 29997 /* 29998 * Function: sd_ssc_ereport_post 29999 * 30000 * Description: Will be called when SD driver need to post an ereport. 30001 * 30002 * Context: Kernel thread or interrupt context. 30003 */ 30004 static void 30005 sd_ssc_ereport_post(sd_ssc_t *ssc, enum sd_driver_assessment drv_assess) 30006 { 30007 int uscsi_path_instance = 0; 30008 uchar_t uscsi_pkt_reason; 30009 uint32_t uscsi_pkt_state; 30010 uint32_t uscsi_pkt_statistics; 30011 uint64_t uscsi_ena; 30012 uchar_t op_code; 30013 uint8_t *sensep; 30014 union scsi_cdb *cdbp; 30015 uint_t cdblen = 0; 30016 uint_t senlen = 0; 30017 struct sd_lun *un; 30018 dev_info_t *dip; 30019 char *devid; 30020 int ssc_invalid_flags = SSC_FLAGS_INVALID_PKT_REASON | 30021 SSC_FLAGS_INVALID_STATUS | 30022 SSC_FLAGS_INVALID_SENSE | 30023 SSC_FLAGS_INVALID_DATA; 30024 char assessment[16]; 30025 30026 ASSERT(ssc != NULL); 30027 ASSERT(ssc->ssc_uscsi_cmd != NULL); 30028 ASSERT(ssc->ssc_uscsi_info != NULL); 30029 30030 un = ssc->ssc_un; 30031 ASSERT(un != NULL); 30032 30033 dip = un->un_sd->sd_dev; 30034 30035 /* 30036 * Get the devid: 30037 * devid will only be passed to non-transport error reports. 30038 */ 30039 devid = DEVI(dip)->devi_devid_str; 30040 30041 /* 30042 * If we are syncing or dumping, the command will not be executed 30043 * so we bypass this situation. 30044 */ 30045 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 30046 (un->un_state == SD_STATE_DUMPING)) 30047 return; 30048 30049 uscsi_pkt_reason = ssc->ssc_uscsi_info->ui_pkt_reason; 30050 uscsi_path_instance = ssc->ssc_uscsi_cmd->uscsi_path_instance; 30051 uscsi_pkt_state = ssc->ssc_uscsi_info->ui_pkt_state; 30052 uscsi_pkt_statistics = ssc->ssc_uscsi_info->ui_pkt_statistics; 30053 uscsi_ena = ssc->ssc_uscsi_info->ui_ena; 30054 30055 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf; 30056 cdbp = (union scsi_cdb *)ssc->ssc_uscsi_cmd->uscsi_cdb; 30057 30058 /* In rare cases, EG:DOORLOCK, the cdb could be NULL */ 30059 if (cdbp == NULL) { 30060 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 30061 "sd_ssc_ereport_post meet empty cdb\n"); 30062 return; 30063 } 30064 30065 op_code = cdbp->scc_cmd; 30066 30067 cdblen = (int)ssc->ssc_uscsi_cmd->uscsi_cdblen; 30068 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen - 30069 ssc->ssc_uscsi_cmd->uscsi_rqresid); 30070 30071 if (senlen > 0) 30072 ASSERT(sensep != NULL); 30073 30074 /* 30075 * Initialize drv_assess to corresponding values. 30076 * SD_FM_DRV_FATAL will be mapped to "fail" or "fatal" depending 30077 * on the sense-key returned back. 30078 */ 30079 switch (drv_assess) { 30080 case SD_FM_DRV_RECOVERY: 30081 (void) sprintf(assessment, "%s", "recovered"); 30082 break; 30083 case SD_FM_DRV_RETRY: 30084 (void) sprintf(assessment, "%s", "retry"); 30085 break; 30086 case SD_FM_DRV_NOTICE: 30087 (void) sprintf(assessment, "%s", "info"); 30088 break; 30089 case SD_FM_DRV_FATAL: 30090 default: 30091 (void) sprintf(assessment, "%s", "unknown"); 30092 } 30093 /* 30094 * If drv_assess == SD_FM_DRV_RECOVERY, this should be a recovered 30095 * command, we will post ereport.io.scsi.cmd.disk.recovered. 30096 * driver-assessment will always be "recovered" here. 30097 */ 30098 if (drv_assess == SD_FM_DRV_RECOVERY) { 30099 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 30100 "cmd.disk.recovered", uscsi_ena, devid, DDI_NOSLEEP, 30101 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30102 "driver-assessment", DATA_TYPE_STRING, assessment, 30103 "op-code", DATA_TYPE_UINT8, op_code, 30104 "cdb", DATA_TYPE_UINT8_ARRAY, 30105 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30106 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 30107 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 30108 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics, 30109 NULL); 30110 return; 30111 } 30112 30113 /* 30114 * If there is un-expected/un-decodable data, we should post 30115 * ereport.io.scsi.cmd.disk.dev.uderr. 30116 * driver-assessment will be set based on parameter drv_assess. 30117 * SSC_FLAGS_INVALID_SENSE - invalid sense data sent back. 30118 * SSC_FLAGS_INVALID_PKT_REASON - invalid pkt-reason encountered. 30119 * SSC_FLAGS_INVALID_STATUS - invalid stat-code encountered. 30120 * SSC_FLAGS_INVALID_DATA - invalid data sent back. 30121 */ 30122 if (ssc->ssc_flags & ssc_invalid_flags) { 30123 if (ssc->ssc_flags & SSC_FLAGS_INVALID_SENSE) { 30124 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 30125 "cmd.disk.dev.uderr", uscsi_ena, devid, DDI_NOSLEEP, 30126 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30127 "driver-assessment", DATA_TYPE_STRING, 30128 drv_assess == SD_FM_DRV_FATAL ? 30129 "fail" : assessment, 30130 "op-code", DATA_TYPE_UINT8, op_code, 30131 "cdb", DATA_TYPE_UINT8_ARRAY, 30132 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30133 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 30134 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 30135 "pkt-stats", DATA_TYPE_UINT32, 30136 uscsi_pkt_statistics, 30137 "stat-code", DATA_TYPE_UINT8, 30138 ssc->ssc_uscsi_cmd->uscsi_status, 30139 "un-decode-info", DATA_TYPE_STRING, 30140 ssc->ssc_info, 30141 "un-decode-value", DATA_TYPE_UINT8_ARRAY, 30142 senlen, sensep, 30143 NULL); 30144 } else { 30145 /* 30146 * For other type of invalid data, the 30147 * un-decode-value field would be empty because the 30148 * un-decodable content could be seen from upper 30149 * level payload or inside un-decode-info. 30150 */ 30151 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 30152 "cmd.disk.dev.uderr", uscsi_ena, devid, DDI_NOSLEEP, 30153 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30154 "driver-assessment", DATA_TYPE_STRING, 30155 drv_assess == SD_FM_DRV_FATAL ? 30156 "fail" : assessment, 30157 "op-code", DATA_TYPE_UINT8, op_code, 30158 "cdb", DATA_TYPE_UINT8_ARRAY, 30159 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30160 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 30161 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state, 30162 "pkt-stats", DATA_TYPE_UINT32, 30163 uscsi_pkt_statistics, 30164 "stat-code", DATA_TYPE_UINT8, 30165 ssc->ssc_uscsi_cmd->uscsi_status, 30166 "un-decode-info", DATA_TYPE_STRING, 30167 ssc->ssc_info, 30168 "un-decode-value", DATA_TYPE_UINT8_ARRAY, 30169 0, NULL, 30170 NULL); 30171 } 30172 ssc->ssc_flags &= ~ssc_invalid_flags; 30173 return; 30174 } 30175 30176 if (uscsi_pkt_reason != CMD_CMPLT || 30177 (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT)) { 30178 /* 30179 * pkt-reason != CMD_CMPLT or SSC_FLAGS_TRAN_ABORT was 30180 * set inside sd_start_cmds due to errors(bad packet or 30181 * fatal transport error), we should take it as a 30182 * transport error, so we post ereport.io.scsi.cmd.disk.tran. 30183 * driver-assessment will be set based on drv_assess. 30184 * We will set devid to NULL because it is a transport 30185 * error. 30186 */ 30187 if (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT) 30188 ssc->ssc_flags &= ~SSC_FLAGS_TRAN_ABORT; 30189 30190 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, 30191 "cmd.disk.tran", uscsi_ena, NULL, DDI_NOSLEEP, FM_VERSION, 30192 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30193 "driver-assessment", DATA_TYPE_STRING, 30194 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment, 30195 "op-code", DATA_TYPE_UINT8, op_code, 30196 "cdb", DATA_TYPE_UINT8_ARRAY, 30197 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30198 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason, 30199 "pkt-state", DATA_TYPE_UINT8, uscsi_pkt_state, 30200 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics, 30201 NULL); 30202 } else { 30203 /* 30204 * If we got here, we have a completed command, and we need 30205 * to further investigate the sense data to see what kind 30206 * of ereport we should post. 30207 * Post ereport.io.scsi.cmd.disk.dev.rqs.merr 30208 * if sense-key == 0x3. 30209 * Post ereport.io.scsi.cmd.disk.dev.rqs.derr otherwise. 30210 * driver-assessment will be set based on the parameter 30211 * drv_assess. 30212 */ 30213 if (senlen > 0) { 30214 /* 30215 * Here we have sense data available. 30216 */ 30217 uint8_t sense_key; 30218 sense_key = scsi_sense_key(sensep); 30219 if (sense_key == 0x3) { 30220 /* 30221 * sense-key == 0x3(medium error), 30222 * driver-assessment should be "fatal" if 30223 * drv_assess is SD_FM_DRV_FATAL. 30224 */ 30225 scsi_fm_ereport_post(un->un_sd, 30226 uscsi_path_instance, 30227 "cmd.disk.dev.rqs.merr", 30228 uscsi_ena, devid, DDI_NOSLEEP, FM_VERSION, 30229 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30230 "driver-assessment", 30231 DATA_TYPE_STRING, 30232 drv_assess == SD_FM_DRV_FATAL ? 30233 "fatal" : assessment, 30234 "op-code", 30235 DATA_TYPE_UINT8, op_code, 30236 "cdb", 30237 DATA_TYPE_UINT8_ARRAY, cdblen, 30238 ssc->ssc_uscsi_cmd->uscsi_cdb, 30239 "pkt-reason", 30240 DATA_TYPE_UINT8, uscsi_pkt_reason, 30241 "pkt-state", 30242 DATA_TYPE_UINT8, uscsi_pkt_state, 30243 "pkt-stats", 30244 DATA_TYPE_UINT32, 30245 uscsi_pkt_statistics, 30246 "stat-code", 30247 DATA_TYPE_UINT8, 30248 ssc->ssc_uscsi_cmd->uscsi_status, 30249 "key", 30250 DATA_TYPE_UINT8, 30251 scsi_sense_key(sensep), 30252 "asc", 30253 DATA_TYPE_UINT8, 30254 scsi_sense_asc(sensep), 30255 "ascq", 30256 DATA_TYPE_UINT8, 30257 scsi_sense_ascq(sensep), 30258 "sense-data", 30259 DATA_TYPE_UINT8_ARRAY, 30260 senlen, sensep, 30261 "lba", 30262 DATA_TYPE_UINT64, 30263 ssc->ssc_uscsi_info->ui_lba, 30264 NULL); 30265 } else { 30266 /* 30267 * if sense-key == 0x4(hardware 30268 * error), driver-assessment should 30269 * be "fatal" if drv_assess is 30270 * SD_FM_DRV_FATAL. 30271 */ 30272 scsi_fm_ereport_post(un->un_sd, 30273 uscsi_path_instance, 30274 "cmd.disk.dev.rqs.derr", 30275 uscsi_ena, devid, DDI_NOSLEEP, 30276 FM_VERSION, 30277 DATA_TYPE_UINT8, FM_EREPORT_VERS0, 30278 "driver-assessment", 30279 DATA_TYPE_STRING, 30280 drv_assess == SD_FM_DRV_FATAL ? 30281 (sense_key == 0x4 ? 30282 "fatal" : "fail") : assessment, 30283 "op-code", 30284 DATA_TYPE_UINT8, op_code, 30285 "cdb", 30286 DATA_TYPE_UINT8_ARRAY, cdblen, 30287 ssc->ssc_uscsi_cmd->uscsi_cdb, 30288 "pkt-reason", 30289 DATA_TYPE_UINT8, uscsi_pkt_reason, 30290 "pkt-state", 30291 DATA_TYPE_UINT8, uscsi_pkt_state, 30292 "pkt-stats", 30293 DATA_TYPE_UINT32, 30294 uscsi_pkt_statistics, 30295 "stat-code", 30296 DATA_TYPE_UINT8, 30297 ssc->ssc_uscsi_cmd->uscsi_status, 30298 "key", 30299 DATA_TYPE_UINT8, 30300 scsi_sense_key(sensep), 30301 "asc", 30302 DATA_TYPE_UINT8, 30303 scsi_sense_asc(sensep), 30304 "ascq", 30305 DATA_TYPE_UINT8, 30306 scsi_sense_ascq(sensep), 30307 "sense-data", 30308 DATA_TYPE_UINT8_ARRAY, 30309 senlen, sensep, 30310 NULL); 30311 } 30312 } else { 30313 /* 30314 * For stat_code == STATUS_GOOD, this is not a 30315 * hardware error. 30316 */ 30317 if (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD) 30318 return; 30319 30320 /* 30321 * Post ereport.io.scsi.cmd.disk.dev.serr if we got the 30322 * stat-code but with sense data unavailable. 30323 * driver-assessment will be set based on parameter 30324 * drv_assess. 30325 */ 30326 scsi_fm_ereport_post(un->un_sd, 30327 uscsi_path_instance, "cmd.disk.dev.serr", uscsi_ena, 30328 devid, DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 30329 FM_EREPORT_VERS0, 30330 "driver-assessment", DATA_TYPE_STRING, 30331 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment, 30332 "op-code", DATA_TYPE_UINT8, op_code, 30333 "cdb", 30334 DATA_TYPE_UINT8_ARRAY, 30335 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb, 30336 "pkt-reason", 30337 DATA_TYPE_UINT8, uscsi_pkt_reason, 30338 "pkt-state", 30339 DATA_TYPE_UINT8, uscsi_pkt_state, 30340 "pkt-stats", 30341 DATA_TYPE_UINT32, uscsi_pkt_statistics, 30342 "stat-code", 30343 DATA_TYPE_UINT8, 30344 ssc->ssc_uscsi_cmd->uscsi_status, 30345 NULL); 30346 } 30347 } 30348 } 30349 30350 /* 30351 * Function: sd_ssc_extract_info 30352 * 30353 * Description: Extract information available to help generate ereport. 30354 * 30355 * Context: Kernel thread or interrupt context. 30356 */ 30357 static void 30358 sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un, struct scsi_pkt *pktp, 30359 struct buf *bp, struct sd_xbuf *xp) 30360 { 30361 size_t senlen = 0; 30362 union scsi_cdb *cdbp; 30363 int path_instance; 30364 /* 30365 * Need scsi_cdb_size array to determine the cdb length. 30366 */ 30367 extern uchar_t scsi_cdb_size[]; 30368 30369 ASSERT(un != NULL); 30370 ASSERT(pktp != NULL); 30371 ASSERT(bp != NULL); 30372 ASSERT(xp != NULL); 30373 ASSERT(ssc != NULL); 30374 ASSERT(mutex_owned(SD_MUTEX(un))); 30375 30376 /* 30377 * Transfer the cdb buffer pointer here. 30378 */ 30379 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 30380 30381 ssc->ssc_uscsi_cmd->uscsi_cdblen = scsi_cdb_size[GETGROUP(cdbp)]; 30382 ssc->ssc_uscsi_cmd->uscsi_cdb = (caddr_t)cdbp; 30383 30384 /* 30385 * Transfer the sense data buffer pointer if sense data is available, 30386 * calculate the sense data length first. 30387 */ 30388 if ((xp->xb_sense_state & STATE_XARQ_DONE) || 30389 (xp->xb_sense_state & STATE_ARQ_DONE)) { 30390 /* 30391 * For arq case, we will enter here. 30392 */ 30393 if (xp->xb_sense_state & STATE_XARQ_DONE) { 30394 senlen = MAX_SENSE_LENGTH - xp->xb_sense_resid; 30395 } else { 30396 senlen = SENSE_LENGTH; 30397 } 30398 } else { 30399 /* 30400 * For non-arq case, we will enter this branch. 30401 */ 30402 if (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK && 30403 (xp->xb_sense_state & STATE_XFERRED_DATA)) { 30404 senlen = SENSE_LENGTH - xp->xb_sense_resid; 30405 } 30406 30407 } 30408 30409 ssc->ssc_uscsi_cmd->uscsi_rqlen = (senlen & 0xff); 30410 ssc->ssc_uscsi_cmd->uscsi_rqresid = 0; 30411 ssc->ssc_uscsi_cmd->uscsi_rqbuf = (caddr_t)xp->xb_sense_data; 30412 30413 ssc->ssc_uscsi_cmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 30414 30415 /* 30416 * Only transfer path_instance when scsi_pkt was properly allocated. 30417 */ 30418 path_instance = pktp->pkt_path_instance; 30419 if (scsi_pkt_allocated_correctly(pktp) && path_instance) 30420 ssc->ssc_uscsi_cmd->uscsi_path_instance = path_instance; 30421 else 30422 ssc->ssc_uscsi_cmd->uscsi_path_instance = 0; 30423 30424 /* 30425 * Copy in the other fields we may need when posting ereport. 30426 */ 30427 ssc->ssc_uscsi_info->ui_pkt_reason = pktp->pkt_reason; 30428 ssc->ssc_uscsi_info->ui_pkt_state = pktp->pkt_state; 30429 ssc->ssc_uscsi_info->ui_pkt_statistics = pktp->pkt_statistics; 30430 ssc->ssc_uscsi_info->ui_lba = (uint64_t)SD_GET_BLKNO(bp); 30431 30432 /* 30433 * For partially read/write command, we will not create ena 30434 * in case of a successful command be reconized as recovered. 30435 */ 30436 if ((pktp->pkt_reason == CMD_CMPLT) && 30437 (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD) && 30438 (senlen == 0)) { 30439 return; 30440 } 30441 30442 /* 30443 * To associate ereports of a single command execution flow, we 30444 * need a shared ena for a specific command. 30445 */ 30446 if (xp->xb_ena == 0) 30447 xp->xb_ena = fm_ena_generate(0, FM_ENA_FMT1); 30448 ssc->ssc_uscsi_info->ui_ena = xp->xb_ena; 30449 } 30450